def process(url, warninglist, dst):
    whitelist = download(url).text
    whitelist = list(set(whitelist.split()))

    warninglist['type'] = 'hostname'
    warninglist['matching_attributes'] = ['domain', 'hostname', 'url']
    warninglist['version'] = get_version()
    warninglist['list'] = whitelist

    write_to_file(warninglist, dst)
Пример #2
0
def get_lists(url):
    service_list = download(url).json()

    lurls = []
    lips = []

    for service in service_list:
        for url in service.get('urls', []):
            if url.find(".*.") == -1:
                lurls.append(url.replace('*', ''))
        for ip in service.get('ips', []):
            lips.append(ip)

    return lurls, lips
Пример #3
0
def process(url, dst):
    warninglist = {
        'name': 'TLDs as known by IANA',
        'version': get_version(),
        'description': 'Event contains one or more TLDs as attribute with an IDS flag set',
        'list': [],
        'matching_attributes': ["hostname", "domain", "domain|ip"],
        'type': 'string'
    }

    r = download(url)
    for tld in r.text.splitlines():
        if tld.startswith('#'):
            continue
        warninglist['list'].append(tld)

    write_to_file(warninglist, dst)
Пример #4
0
def get_networks_for_asn(asn: int) -> List[str]:
    temp_file = get_abspath_source_file("bgp-asn-{}".format(asn))

    try:
        prefixes = json.load(open(temp_file, "r"))
    except:
        sleep(0.5)  # API has limitation, we have to wait between requests
        response = download(
            "https://api.bgpview.io/asn/{}/prefixes".format(asn))
        response.raise_for_status()
        prefixes = response.json()
        json.dump(prefixes, open(temp_file, "w"))

    output = []
    for ipv4_prefix in prefixes["data"]["ipv4_prefixes"]:
        output.append(ipv4_prefix["prefix"])

    for ipv6_prefix in prefixes["data"]["ipv6_prefixes"]:
        output.append(ipv6_prefix["prefix"])
    return output
Пример #5
0
def process(url, dst):

    university_list = download(url).json()

    warninglist = {
        'type': "string",
        'name': "University domains",
        'matching_attributes': ['hostname', 'domain', 'url', 'domain|ip'],
        'version': get_version(),
        'description':
        "List of University domains from https://raw.githubusercontent.com/Hipo/university-domains-list/master/world_universities_and_domains.json",
        'list': []
    }

    for university in university_list:
        for domain in university.get('domains'):
            if domain not in warninglist['list']:
                warninglist['list'].append(domain)

    write_to_file(warninglist, dst)
Пример #6
0
def process(url, dst):
    warninglist = {
        'name': 'List of known Wikimedia address ranges',
        'version': get_version(),
        'description':
        'Wikimedia address ranges (http://noc.wikimedia.org/conf/reverse-proxy.php.txt)',
        'type': 'cidr',
        'list': [],
        'matching_attributes': ["ip-src", "ip-dst", "domain|ip"]
    }

    matched = re.findall(r'\'(.*?)\'',
                         codecs.decode(download(url).content, 'UTF-8'))
    for ip in matched:
        try:
            ipaddress.ip_network(ip)
            warninglist['list'].append(ip)
        except ValueError:
            pass

    write_to_file(warninglist, dst)
Пример #7
0
def search(term: str):
    response = download(
        "https://api.bgpview.io/search?query_term={}".format(term))
    response.raise_for_status()
    return response.json()
Пример #8
0
#!/usr/bin/env python3

from generator import download, get_version, write_to_file

if __name__ == '__main__':
    source_url = 'https://publicsuffix.org/list/public_suffix_list.dat'
    destination_folder = 'second-level-tlds'

    data = download(source_url).text
    lines = data.split("\n")
    # Filter out comments
    domains = [
        line.strip() for line in lines
        if len(line) != 0 and not line.startswith('//')
    ]
    # Convert IDN domain to xn-- format
    domains = [domain.encode('idna').decode('utf-8') for domain in domains]
    # Filter out invalid domains
    domains = [
        domain.lstrip('*.') for domain in domains if not domain.startswith('!')
    ]

    warninglist = {
        'name': 'Second level TLDs as known by Mozilla Foundation',
        'description':
        'Event contains one or more second level TLDs as attribute with an IDS flag set.',
        'matching_attributes': ['hostname', 'domain', 'domain|ip'],
        'type': 'string',
        'version': get_version(),
        'list': domains,
    }
import generator

url_list = [
    "https://www.gutenberg.org/files/1952/1952-0.txt",
    "https://www.gutenberg.org/files/43/43-0.txt"
]

generator = generator.generator(url_list)

generator.download("https://www.gutenberg.org/files/1342/1342-0.txt",
                   "PrideandPrejudice.txt")
Пример #10
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import json
from generator import download, get_version, write_to_file, consolidate_networks

if __name__ == '__main__':
    cloud = download("https://www.gstatic.com/ipranges/cloud.json")
    parsed = json.loads(cloud.text)

    ranges = [
        p["ipv4Prefix"] if "ipv4Prefix" in p else p["ipv6Prefix"]
        for p in parsed["prefixes"]
    ]

    warninglist = {
        'name': "List of known GCP (Google Cloud Platform) IP address ranges",
        'version': get_version(),
        'description':
        "GCP (Google Cloud Platform) IP address ranges (https://www.gstatic.com/ipranges/cloud.json)",
        'matching_attributes': ["ip-src", "ip-dst", "domain|ip"],
        'type': 'cidr',
        'list': consolidate_networks(ranges),
    }

    write_to_file(warninglist, "google-gcp")

def process(file, dst):

    warninglist = {
        'name': 'List of known Microsoft Azure Datacenter IP Ranges',
        'version': get_version(),
        'description': 'Microsoft Azure Datacenter IP Ranges',
        'list': [],
        'matching_attributes': ["ip-src", "ip-dst", "domain|ip"],
        'type': 'cidr'
    }

    with open(file, 'r') as json_file:
        ms_azure_ip_list = json.load(json_file)

    for value in ms_azure_ip_list['values']:
        warninglist['list'] += value['properties']['addressPrefixes']

    write_to_file(warninglist, dst)


if __name__ == '__main__':
    ms_azure_url = 'https://www.microsoft.com/en-us/download/confirmation.aspx?id=56519'
    ms_azure_file = 'ms-azure.json'
    ms_azure_dst = 'microsoft-azure'

    ms_azure_json_url = get_json_url(download(ms_azure_url))
    download_to_file(ms_azure_json_url, ms_azure_file)
    process(ms_azure_file, ms_azure_dst)
    }, {
        "name":
        "List of known Microsoft Azure US Government Cloud Datacenter IP Ranges",
        "description":
        "Microsoft Azure US Government Cloud Datacenter IP Ranges",
        "url":
        "https://www.microsoft.com/en-us/download/confirmation.aspx?id=57063",
        "file": "ms-azure-us-gov.json",
        "destination_folder": "microsoft-azure-us-gov",
    }, {
        "name": "List of known Microsoft Azure Germany Datacenter IP Ranges",
        "description": "Microsoft Azure Germany Datacenter IP Ranges",
        "url":
        "https://www.microsoft.com/en-us/download/confirmation.aspx?id=57064",
        "file": "ms-azure-germany.json",
        "destination_folder": "microsoft-azure-germany",
    }, {
        "name": "List of known Microsoft Azure China Datacenter IP Ranges",
        "description": "Microsoft Azure China Datacenter IP Ranges",
        "url":
        "https://www.microsoft.com/en-us/download/confirmation.aspx?id=57062",
        "file": "ms-azure-china.json",
        "destination_folder": "microsoft-azure-china",
    }]

    for type in TYPES:
        ms_azure_json_url = get_json_url(download(type["url"]))
        download_to_file(ms_azure_json_url, type["file"])
        process(type["file"], type["destination_folder"], type["name"],
                type["description"])