Example #1
0
def main():
    publicdns_url = 'https://public-dns.info/nameservers.csv'
    publicdns_file = 'public-dns-nameservers.csv'
    download_to_file(publicdns_url, publicdns_file)

    dnscrypt_url = "https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v1/dnscrypt-resolvers.csv"
    dnscrypt_file = "dnscrypt-resolvers.csv"
    download_to_file(dnscrypt_url, dnscrypt_file)

    ipv4, ipv6, hostname = get_lists_publidns(publicdns_file)
    ipv4_c, ipv6_c = get_lists_dnscrypt(dnscrypt_file)

    ipv4 += ipv4_c
    ipv6 += ipv6_c

    process(ipv4, ipv6, hostname)
Example #2
0

def process(file, dst):

    with open(get_abspath_source_file(file), newline='\n', encoding='utf-8', errors='replace') as csv_file:
        sites = csv_file.readlines()[:10000]

    warninglist = {
        'name': 'Top 10K websites from Majestic Million',
        'version': get_version(),
        'description': 'Event contains one or more entries from the top 10K of the most used websites (Majestic Million).',
        'matching_attributes': ['hostname', 'domain'],
        'type': 'hostname',
        'list': []
    }

    for site in sites:
        v = site.split(',')[2]
        warninglist['list'].append(v.rstrip())

    write_to_file(warninglist, dst)


if __name__ == '__main__':
    majestic_url = 'http://downloads.majestic.com/majestic_million.csv'
    majestic_file = 'majestic_million.csv'
    majestic_dst = 'majestic_million'

    download_to_file(majestic_url, majestic_file)
    process(majestic_file, majestic_dst)
Example #3
0
        'description':
        "List of known Cloudflare IP ranges (https://www.cloudflare.com/ips/)",
        'type': "cidr",
        'list': [],
        'matching_attributes': ["ip-dst", "ip-src", "domain|ip"]
    }

    for file in files:
        with open(file, 'r') as f:
            ips = f.readlines()
        for ip in ips:
            warninglist['list'].append(ip.strip())

    write_to_file(warninglist, dst)


if __name__ == '__main__':
    cf_base_url = "https://www.cloudflare.com/"
    uri_list = ['ips-v4', 'ips-v6']
    cf_dst = 'cloudflare'

    to_process = list()

    for uri in uri_list:
        url = cf_base_url + uri
        file = 'cloudflare_{}.txt'.format(uri)
        download_to_file(url, file)
        to_process.append(file)

    process(to_process, cf_dst)
        servers_list = csv.reader(csv_file, delimiter=',', quotechar='"')

        lipv4 = []
        lipv6 = []
        lhostname = []
        for row in servers_list:
            if row[7] in (None, ""):
                try:
                    ip = ipaddress.ip_address(row[0])

                    if ip.version == 4:
                        lipv4.append(ip.compressed)
                    elif ip.version == 6:
                        lipv6.append(ip.compressed)

                    if row[1] not in (None, "", '.'):
                        lhostname.append(row[1])
                except ValueError as exc:
                    logging.warning(str(exc))

    return lipv4, lipv6, lhostname


if __name__ == '__main__':
    publicdns_url = 'https://public-dns.info/nameservers.csv'
    publicdns_file = 'public-dns-nameservers.csv'

    download_to_file(publicdns_url, publicdns_file)

    process(publicdns_file)
Example #5
0
    for site in sites:
        v = site.decode('UTF-8').split(',')[1]
        warninglist['list'].append(v.strip().replace('\\r\\n', ''))

    write_to_file(warninglist, dst)


def get_lists(file):
    with zipfile.ZipFile(get_abspath_source_file(file), 'r') as cisco_lists:
        for name in cisco_lists.namelist():
            if name == "top-1m.csv":
                with cisco_lists.open(name) as cisco_list:
                    all = cisco_list.readlines()
                    top1k = all[:1000]
                    top5k = all[:5000]
                    top10k = all[:10000]
                    top20k = all[:20000]
            else:
                continue

    return top1k, top5k, top10k, top20k


if __name__ == '__main__':
    cisco_url = "http://s3-us-west-1.amazonaws.com/umbrella-static/top-1m.csv.zip"
    cisco_file = "cisco_top-1m.csv.zip"

    download_to_file(cisco_url, cisco_file)

    process(cisco_file)
Example #6
0
    ]
    warninglist['list'] = []

    for site in sites:
        v = site.decode('UTF-8').split(',')[1]
        warninglist['list'].append(v.rstrip())

    write_to_file(warninglist, dst)


def get_lists(file):
    with zipfile.ZipFile(get_abspath_source_file(file), 'r') as tranco_lists:
        for name in tranco_lists.namelist():
            if name == 'top-1m.csv':
                with tranco_lists.open(name) as tranco:
                    all_sites = tranco.readlines()
                    top10k = all_sites[:10000]
            else:
                continue

    return top10k, all_sites


if __name__ == '__main__':
    tranco_url = 'https://tranco-list.eu/top-1m.csv.zip'
    tranco_file = 'tranco_top-1m.csv.zip'

    download_to_file(tranco_url, tranco_file)

    process(tranco_file)
    with open(get_abspath_source_file(file), 'r') as json_file:
        amazon_aws_ip_list = json.load(json_file)
    l = []

    for prefix in amazon_aws_ip_list['prefixes']:
        l.append(prefix['ip_prefix'])

    for prefix in amazon_aws_ip_list['ipv6_prefixes']:
        l.append(prefix['ipv6_prefix'])

    warninglist = {
        'name': 'List of known Amazon AWS IP address ranges',
        'version': get_version(),
        'description':
        'Amazon AWS IP address ranges (https://ip-ranges.amazonaws.com/ip-ranges.json)',
        'type': 'cidr',
        'list': l,
        'matching_attributes': ["ip-src", "ip-dst", "domain|ip"]
    }

    write_to_file(warninglist, dst)


if __name__ == '__main__':
    amazon_url = "https://ip-ranges.amazonaws.com/ip-ranges.json"
    amazon_file = "amazon_ip-ranges.json"
    amazon_dst = "amazon-aws"

    download_to_file(amazon_url, amazon_file)
    process(amazon_file, amazon_dst)
        'name': "Top 500 domains and pages from https://moz.com/top500",
        'type': 'string',
        'list': [],
        'matching_attributes': ['hostname', 'domain', 'uri', 'url']
    }

    for file in files:
        with open(get_abspath_source_file(file)) as csv_file:
            csv_reader = csv.reader(csv_file, delimiter=',')
            for row in csv_reader:
                v = row[1]
                warninglist['list'].append(v.rstrip().rstrip('/'))

    write_to_file(warninglist, dst)


if __name__ == '__main__':
    moz_domains_url = "https://moz.com/top-500/download/?table=top500Domains"
    #moz_pages_url = "https://moz.com/top500/pages/csv"

    moz_domains_file = "moz-top500.domains.csv"
    #moz_pages_file = "moz-top500.pages.csv"

    moz_dst = 'moz-top500'

    download_to_file(moz_domains_url, moz_domains_file)
    #download_to_file(moz_pages_url, moz_pages_file)

    #process([moz_domains_file, moz_pages_file], moz_dst)
    process([moz_domains_file], moz_dst)
    }

    for file in files:
        with open(get_abspath_source_file(file), 'r') as f:
            ips = f.readlines()
        for ip in ips:
            iptoadd = ip.strip()
            try:
                ipaddress.ip_network(ip.strip())
            except ValueError as err:  # if it's host given strip to the subnet
                iptoadd = str(ipaddress.IPv6Interface(ip.strip()).ip)
            warninglist['list'].append(iptoadd)

    write_to_file(warninglist, dst)


if __name__ == '__main__':
    # Base url where a text file is attached https://support.stackpath.com/hc/en-us/articles/360001091666-Whitelist-CDN-WAF-IP-Blocks"
    sp_base_url = "https://support.stackpath.com/hc/en-us/article_attachments/360096407372/ipblocks.txt"
    filename = 'ipblocks.txt'
    sp_dst = 'stackpath'

    to_process = list()

    #url = get_file_link(sp_base_url, filename)
    file = 'stackpath_{}'.format(filename)
    download_to_file(sp_base_url, file)
    to_process.append(file)

    process(to_process, sp_dst)
        for name in alexa_lists.namelist():
            if name == "top-1m.csv":
                with alexa_lists.open(name) as top:
                    top1000 = top.readlines()[:1000]
            else:
                continue

    warninglist = {
        'description': "Event contains one or more entries from the top 1000 of the most used website (Alexa).",
        'version': get_version(),
        'name': "Top 1000 website from Alexa",
        'type': 'hostname',
        'list': [],
        'matching_attributes': ['hostname', 'domain', 'url', 'domain|ip']
    }

    for site in top1000:
        v = site.decode('UTF-8').split(',')[1]
        warninglist['list'].append(v.rstrip())

    write_to_file(warninglist, dst)


if __name__ == "__main__":
    alexa_url = "http://s3.amazonaws.com/alexa-static/top-1m.csv.zip"
    alexa_file = "alexa_top-1m.csv.zip"
    alexa_dst = "alexa"

    download_to_file(alexa_url, alexa_file)
    process(alexa_file, alexa_dst)
        'description':
        "Fingerprint of {type} taken from Mozilla's lists at https://wiki.mozilla.org/CA"
        .format(type=type),
        'list':
        hashes,
        'type':
        'string',
        'matching_attributes': [
            "md5", "sha1", "sha256", "filename|md5", "filename|sha1",
            "filename|sha256", "x509-fingerprint-md5", "x509-fingerprint-sha1",
            "x509-fingerprint-sha256"
        ]
    }

    write_to_file(warninglist, dst)


if __name__ == '__main__':
    Included_CA_url = 'https://ccadb-public.secure.force.com/mozilla/IncludedCACertificateReportPEMCSV'
    Included_CA_file = 'IncludedCACertificateReportPEMCSV.csv'
    Included_CA_dst = 'mozilla-CA'
    CA_known_intermediate_url = 'https://ccadb-public.secure.force.com/mozilla/PublicAllIntermediateCertsWithPEMCSV'
    CA_known_intermediate_file = 'PublicAllIntermediateCertsWithPEMCSV.csv'
    CA_known_intermediate_dst = 'mozilla-IntermediateCA'

    download_to_file(Included_CA_url, Included_CA_file)
    process(Included_CA_file, Included_CA_dst, 'trusted CA certificates')
    download_to_file(CA_known_intermediate_url, CA_known_intermediate_file)
    process(CA_known_intermediate_file, CA_known_intermediate_dst,
            'known intermedicate of trusted certificates')

def process(file, dst):

    warninglist = {
        'name': 'List of known Microsoft Azure Datacenter IP Ranges',
        'version': get_version(),
        'description': 'Microsoft Azure Datacenter IP Ranges',
        'list': [],
        'matching_attributes': ["ip-src", "ip-dst", "domain|ip"],
        'type': 'cidr'
    }

    with open(file, 'r') as json_file:
        ms_azure_ip_list = json.load(json_file)

    for value in ms_azure_ip_list['values']:
        warninglist['list'] += value['properties']['addressPrefixes']

    write_to_file(warninglist, dst)


if __name__ == '__main__':
    ms_azure_url = 'https://www.microsoft.com/en-us/download/confirmation.aspx?id=56519'
    ms_azure_file = 'ms-azure.json'
    ms_azure_dst = 'microsoft-azure'

    ms_azure_json_url = get_json_url(download(ms_azure_url))
    download_to_file(ms_azure_json_url, ms_azure_file)
    process(ms_azure_file, ms_azure_dst)
    }, {
        "name":
        "List of known Microsoft Azure US Government Cloud Datacenter IP Ranges",
        "description":
        "Microsoft Azure US Government Cloud Datacenter IP Ranges",
        "url":
        "https://www.microsoft.com/en-us/download/confirmation.aspx?id=57063",
        "file": "ms-azure-us-gov.json",
        "destination_folder": "microsoft-azure-us-gov",
    }, {
        "name": "List of known Microsoft Azure Germany Datacenter IP Ranges",
        "description": "Microsoft Azure Germany Datacenter IP Ranges",
        "url":
        "https://www.microsoft.com/en-us/download/confirmation.aspx?id=57064",
        "file": "ms-azure-germany.json",
        "destination_folder": "microsoft-azure-germany",
    }, {
        "name": "List of known Microsoft Azure China Datacenter IP Ranges",
        "description": "Microsoft Azure China Datacenter IP Ranges",
        "url":
        "https://www.microsoft.com/en-us/download/confirmation.aspx?id=57062",
        "file": "ms-azure-china.json",
        "destination_folder": "microsoft-azure-china",
    }]

    for type in TYPES:
        ms_azure_json_url = get_json_url(download(type["url"]))
        download_to_file(ms_azure_json_url, type["file"])
        process(type["file"], type["destination_folder"], type["name"],
                type["description"])