def get_lists_publidns(file) -> Tuple[List, List, List]: with open(get_abspath_source_file(file)) as csv_file: servers_list = csv.reader(csv_file, delimiter=',', quotechar='"') lipv4 = [] lipv6 = [] lhostname = [] for row in servers_list: if row[7] in (None, ""): try: ip = ipaddress.ip_address(row[0]) if ip.version == 4: lipv4.append(ip.compressed) elif ip.version == 6: lipv6.append(ip.compressed) if row[1] not in (None, "", '.'): lhostname.append(row[1]) except ValueError as exc: logging.warning(str(exc)) for golden in golden_servers_ipv4: if golden not in lipv4: lipv4.append(golden) return lipv4, lipv6, lhostname
def process(file, dst, type): hashes = set() with open(get_abspath_source_file(file), 'r') as f_in: for obj in csv.DictReader(f_in): pem = obj['PEM Info'].strip("'").replace('\r', '').replace('\n\n', '\n') try: obj['Certificate Name'] except: obj['Common Name or Certificate Name'] cert = load_certificate(FILETYPE_PEM, pem) hashes.add(gethash(cert, 'md5')) hashes.add(gethash(cert, 'sha1')) hashes.add(obj['SHA-256 Fingerprint'].lower()) warninglist = { 'name': 'Fingerprint of {type}'.format(type=type), 'version': get_version(), 'description': "Fingerprint of {type} taken from Mozilla's lists at https://wiki.mozilla.org/CA" .format(type=type), 'list': hashes, 'type': 'string', 'matching_attributes': [ "md5", "sha1", "sha256", "filename|md5", "filename|sha1", "filename|sha256", "x509-fingerprint-md5", "x509-fingerprint-sha1", "x509-fingerprint-sha256" ] } write_to_file(warninglist, dst)
def get_lists(file): with zipfile.ZipFile(get_abspath_source_file(file), 'r') as tranco_lists: for name in tranco_lists.namelist(): if name == 'top-1m.csv': with tranco_lists.open(name) as tranco: all_sites = tranco.readlines() top10k = all_sites[:10000] else: continue return top10k, all_sites
def get_lists(file): with zipfile.ZipFile(get_abspath_source_file(file), 'r') as cisco_lists: for name in cisco_lists.namelist(): if name == "top-1m.csv": with cisco_lists.open(name) as cisco_list: all = cisco_list.readlines() top1k = all[:1000] top5k = all[:5000] top10k = all[:10000] top20k = all[:20000] else: continue return top1k, top5k, top10k, top20k
def process(files, dst): warninglist = { 'type': "string", 'matching_attributes': ["hostname", "domain", "ip-dst", "ip-src", "url", "domain|ip"], 'name': "CRL Warninglist", 'version': get_version(), 'description': "CRL Warninglist from threatstop (https://github.com/threatstop/crl-ocsp-whitelist/)", 'list': [] } for file in files: with open(get_abspath_source_file(file), 'r') as f: ips = f.readlines() for ip in ips: warninglist['list'].append(ip.strip()) write_to_file(warninglist, dst)
def process(file, dst): warninglist = { 'name': 'List of known Microsoft Azure Datacenter IP Ranges', 'version': get_version(), 'description': 'Microsoft Azure Datacenter IP Ranges', 'list': [], 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"], 'type': 'cidr' } with open(get_abspath_source_file(file), 'r') as json_file: ms_azure_ip_list = json.load(json_file) for value in ms_azure_ip_list['values']: warninglist['list'] += value['properties']['addressPrefixes'] write_to_file(warninglist, dst)
def process(file, dst): with open(get_abspath_source_file(file), newline='\n', encoding='utf-8', errors='replace') as csv_file: sites = csv_file.readlines()[:10000] warninglist = { 'name': 'Top 10K websites from Majestic Million', 'version': get_version(), 'description': 'Event contains one or more entries from the top 10K of the most used websites (Majestic Million).', 'matching_attributes': ['hostname', 'domain'], 'type': 'hostname', 'list': [] } for site in sites: v = site.split(',')[2] warninglist['list'].append(v.rstrip()) write_to_file(warninglist, dst)
def process(files, dst): warninglist = { 'description': "Event contains one or more entries from the top 500 of the most used domains (Mozilla).", 'version': get_version(), 'name': "Top 500 domains and pages from https://moz.com/top500", 'type': 'string', 'list': [], 'matching_attributes': ['hostname', 'domain', 'uri', 'url'] } for file in files: with open(get_abspath_source_file(file)) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: v = row[1] warninglist['list'].append(v.rstrip().rstrip('/')) write_to_file(warninglist, dst)
def get_lists_dnscrypt(file) -> Tuple[List, List]: with open(get_abspath_source_file(file)) as csv_file: servers_list = csv.reader(csv_file, delimiter=',', quotechar='"') next(servers_list) # skip header lipv4 = [] lipv6 = [] for row in servers_list: address = row[10] if address[0] == "[": address = address[1:address.index("]")] else: address = address.split(":")[0] ip = ipaddress.ip_address(address) if ip.version == 4: lipv4.append(ip.compressed) elif ip.version == 6: lipv6.append(ip.compressed) return lipv4, lipv6
def process(file, dst, name: str, description: str): warninglist = { 'name': name, 'version': get_version(), 'description': description, 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"], 'type': 'cidr' } with open(get_abspath_source_file(file), 'r') as json_file: ms_azure_ip_list = json.load(json_file) values = [] for value in ms_azure_ip_list['values']: values += value['properties']['addressPrefixes'] warninglist['list'] = consolidate_networks(values) write_to_file(warninglist, dst)
def get_networks_for_asn(asn: int) -> List[str]: temp_file = get_abspath_source_file("bgp-asn-{}".format(asn)) try: prefixes = json.load(open(temp_file, "r")) except: sleep(0.5) # API has limitation, we have to wait between requests response = download( "https://api.bgpview.io/asn/{}/prefixes".format(asn)) response.raise_for_status() prefixes = response.json() json.dump(prefixes, open(temp_file, "w")) output = [] for ipv4_prefix in prefixes["data"]["ipv4_prefixes"]: output.append(ipv4_prefix["prefix"]) for ipv6_prefix in prefixes["data"]["ipv6_prefixes"]: output.append(ipv6_prefix["prefix"]) return output
def process(file, dst): with open(get_abspath_source_file(file), 'r') as json_file: amazon_aws_ip_list = json.load(json_file) l = [] for prefix in amazon_aws_ip_list['prefixes']: l.append(prefix['ip_prefix']) for prefix in amazon_aws_ip_list['ipv6_prefixes']: l.append(prefix['ipv6_prefix']) warninglist = { 'name': 'List of known Amazon AWS IP address ranges', 'version': get_version(), 'description': 'Amazon AWS IP address ranges (https://ip-ranges.amazonaws.com/ip-ranges.json)', 'type': 'cidr', 'list': l, 'matching_attributes': ["ip-src", "ip-dst", "domain|ip"] } write_to_file(warninglist, dst)
def process(files, dst): warninglist = { 'name': "List of known Stackpath CDN IP ranges", 'version': get_version(), 'description': "List of known Stackpath (Highwinds) CDN IP ranges (https://support.stackpath.com/hc/en-us/articles/360001091666-Whitelist-CDN-WAF-IP-Blocks)", 'type': "cidr", 'list': [], 'matching_attributes': ["ip-dst", "ip-src", "domain|ip"] } for file in files: with open(get_abspath_source_file(file), 'r') as f: ips = f.readlines() for ip in ips: iptoadd = ip.strip() try: ipaddress.ip_network(ip.strip()) except ValueError as err:# if it's host given strip to the subnet iptoadd = str(ipaddress.IPv6Interface(ip.strip()).ip) warninglist['list'].append(iptoadd) write_to_file(warninglist, dst)
def process(file, dst): with zipfile.ZipFile(get_abspath_source_file(file), 'r') as alexa_lists: for name in alexa_lists.namelist(): if name == "top-1m.csv": with alexa_lists.open(name) as top: top1000 = top.readlines()[:1000] else: continue warninglist = { 'description': "Event contains one or more entries from the top 1000 of the most used website (Alexa).", 'version': get_version(), 'name': "Top 1000 website from Alexa", 'type': 'hostname', 'list': [], 'matching_attributes': ['hostname', 'domain', 'url', 'domain|ip'] } for site in top1000: v = site.decode('UTF-8').split(',')[1] warninglist['list'].append(v.rstrip()) write_to_file(warninglist, dst)