Esempio n. 1
0
def test_vuln_id_parser():
    DEBUG = False
    ##DEBUG = True

    if DEBUG: from pprint import pprint

    print "Testing the vulnerability ID parsers..."
    if DEBUG:
        print "-" * 79
        pprint(_test_case_extract_solution)
        print "-" * 79
    vulns = extract_vuln_ids(_test_case_extract)
    if DEBUG:
        pprint(vulns)
        print "-" * 79
    assert vulns == _test_case_extract_solution
    all_vulns = []
    for v in vulns.values():
        all_vulns.extend(v)
    all_vulns.sort()
    if DEBUG:
        pprint(all_vulns)
        print "-" * 79
    refs = convert_vuln_ids_to_references(all_vulns)
    if DEBUG:
        pprint(refs)
        print "-" * 79
    unrefs = convert_references_to_vuln_ids(refs)
    if DEBUG:
        pprint(unrefs)
        print "-" * 79
    assert unrefs == vulns

    print "Testing reference URLs..."
    import requests
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36"}
    for url in refs:
        print "--> " + url
        requests.get(url, headers=headers)
Esempio n. 2
0
    def __query_exploitdb(self, url_template, query_string):

        # This is where we'll collect the IDs.
        collected = set()

        # For each page of results...
        page = 1
        while True:

            # Build the request URL.
            url = url_template % (page, query_string)

            # Get the result page. Stop on any errors.
            try:
                result = requests.get(url).content
            except Exception:
                break

            # Extract all URLs from the result page.
            urls = self.REGEXP.findall(result)

            # Extract all Exploit-DB IDs from the URLs.
            ids = convert_references_to_vuln_ids(urls).get("EDB", [])

            # If no more IDs were found, stop.
            if not ids:
                break

            # Collect the IDs.
            collected.update(ids)

            # Go to the next page.
            page += 1

        # Return the collected IDs.
        return collected
Esempio n. 3
0
    def __query_exploitdb(self, url_template, query_string):

        # This is where we'll collect the IDs.
        collected = set()

        # For each page of results...
        page = 1
        while True:

            # Build the request URL.
            url = url_template % (page, query_string)

            # Get the result page. Stop on any errors.
            try:
                result = requests.get(url).content
            except Exception:
                break

            # Extract all URLs from the result page.
            urls = self.REGEXP.findall(result)

            # Extract all Exploit-DB IDs from the URLs.
            ids = convert_references_to_vuln_ids(urls).get("EDB", [])

            # If no more IDs were found, stop.
            if not ids:
                break

            # Collect the IDs.
            collected.update(ids)

            # Go to the next page.
            page += 1

        # Return the collected IDs.
        return collected
def test_vuln_id_parser():
    DEBUG = False
    ##DEBUG = True

    if DEBUG:
        from pprint import pprint

    print "Testing the vulnerability ID parsers..."
    if DEBUG:
        print "-" * 79
        print "-- test case solution"
        pprint(_test_case_extract_solution)
        print "-" * 79
    vulns = extract_vuln_ids(_test_case_extract)
    if DEBUG:
        print "-- extracted vuln ids"
        pprint(vulns)
        print "-" * 79
    assert vulns == _test_case_extract_solution
    all_vulns = []
    for v in vulns.values():
        all_vulns.extend(v)
    all_vulns.sort()
    if DEBUG:
        print "-- only the ids"
        pprint(all_vulns)
        print "-" * 79
    refs = convert_vuln_ids_to_references(all_vulns)
    if DEBUG:
        print "-- references"
        pprint(refs)
        print "-" * 79
    unrefs = convert_references_to_vuln_ids(refs)
    if DEBUG:
        print "-- vuln ids back from references"
        pprint(unrefs)
        print "-" * 79
    assert unrefs == vulns
    urls = []
    for url in _test_case_url.split("\n"):
        url = url.strip()
        if not url:
            continue
        urls.append(url)
    parsed = set()
    for vuln_ids in convert_references_to_vuln_ids(urls).itervalues():
        parsed.update(vuln_ids)
    if DEBUG:
        print "-- test case"
        pprint(urls)
        print "-" * 79
        print "-- extracted vuln ids"
        pprint(sorted(parsed))
        print "-" * 79
    assert len(urls) == len(parsed), "%d vs %d" % (len(urls), len(parsed))

    print "Testing reference URLs..."
    import requests

    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36"
    }
    for url in refs:
        print "--> " + url
        requests.get(url, headers=headers, verify=False)
    for url in urls:
        if url not in refs:
            print "--> " + url
            requests.get(url, headers=headers, verify=False)
def test_vuln_id_parser():
    DEBUG = False
    ##DEBUG = True

    if DEBUG: from pprint import pprint

    print "Testing the vulnerability ID parsers..."
    if DEBUG:
        print "-" * 79
        print "-- test case solution"
        pprint(_test_case_extract_solution)
        print "-" * 79
    vulns = extract_vuln_ids(_test_case_extract)
    if DEBUG:
        print "-- extracted vuln ids"
        pprint(vulns)
        print "-" * 79
    assert vulns == _test_case_extract_solution
    all_vulns = []
    for v in vulns.values():
        all_vulns.extend(v)
    all_vulns.sort()
    if DEBUG:
        print "-- only the ids"
        pprint(all_vulns)
        print "-" * 79
    refs = convert_vuln_ids_to_references(all_vulns)
    if DEBUG:
        print "-- references"
        pprint(refs)
        print "-" * 79
    unrefs = convert_references_to_vuln_ids(refs)
    if DEBUG:
        print "-- vuln ids back from references"
        pprint(unrefs)
        print "-" * 79
    assert unrefs == vulns
    urls = []
    for url in _test_case_url.split("\n"):
        url = url.strip()
        if not url:
            continue
        urls.append(url)
    parsed = set()
    for vuln_ids in convert_references_to_vuln_ids(urls).itervalues():
        parsed.update(vuln_ids)
    if DEBUG:
        print "-- test case"
        pprint(urls)
        print "-" * 79
        print "-- extracted vuln ids"
        pprint(sorted(parsed))
        print "-" * 79
    assert len(urls) == len(parsed), "%d vs %d" % (len(urls), len(parsed))

    print "Testing reference URLs..."
    import requests
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36"
    }
    for url in refs:
        print "--> " + url
        requests.get(url, headers=headers, verify=False)
    for url in urls:
        if url not in refs:
            print "--> " + url
            requests.get(url, headers=headers, verify=False)