예제 #1
0
def main_intel_amt(url, agent=None, proxy=None):
    proxy = proxy_string_to_dict(proxy) or None
    agent = agent or DEFAULT_USER_AGENT
    logger.info(
        set_color(
            "attempting to connect to '{}' and get hardware info...".format(
                url)))
    try:
        json_data = __get_hardware(url, agent=agent, proxy=proxy)
        if json_data is None:
            logger.error(
                set_color("unable to get any information, skipping...",
                          level=40))
            pass
        else:
            print("-" * 40)
            for key in json_data.keys():
                print("{}:".format(str(key).capitalize()))
                for item in json_data[key]:
                    print(" - {}: {}".format(item.capitalize(),
                                             json_data[key][item]))
            print("-" * 40)
    except Exception as e:
        if "Temporary failure in name resolution" in str(e):
            logger.error(
                set_color("failed to connect on '{}', skipping...".format(url),
                          level=40))
            pass
        else:
            logger.exception(
                set_color(
                    "ran into exception '{}', cannot continue...".format(e)))
            request_issue_creation()
예제 #2
0
 def __config_proxy(proxy_string):
     proxy_type_schema = {
         "http": httplib2.socks.PROXY_TYPE_HTTP,
         "socks4": httplib2.socks.PROXY_TYPE_SOCKS4,
         "socks5": httplib2.socks.PROXY_TYPE_SOCKS5
     }
     proxy_type = get_proxy_type(proxy_string)[0]
     proxy_dict = proxy_string_to_dict(proxy_string)
     proxy_config = httplib2.ProxyInfo(
         proxy_type=proxy_type_schema[proxy_type],
         proxy_host="".join(proxy_dict.keys()),
         proxy_port="".join(proxy_dict.values()))
     return proxy_config
예제 #3
0
def scan_xss(url, agent=None, proxy=None):
    user_agent = agent or DEFAULT_USER_AGENT
    config_proxy = proxy_string_to_dict(proxy)
    config_headers = {"connection": "close", "user-agent": user_agent}
    xss_request = requests.get(url,
                               proxies=config_proxy,
                               headers=config_headers)
    html_data = xss_request.content
    query = find_xss_script(url)
    for db in DBMS_ERRORS.keys():
        for item in DBMS_ERRORS[db]:
            if re.findall(item, html_data):
                return "sqli", db
    if query in html_data:
        return True, None
    return False, None
예제 #4
0
def load_headers(url, **kwargs):
    """
    load the URL headers
    """
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)

    if proxy is not None:
        proxy = proxy_string_to_dict(proxy)
    if not xforward:
        header_value = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: agent
        }
    else:
        ip_list = create_random_ip(), create_random_ip(), create_random_ip()
        header_value = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: agent,
            HTTP_HEADER.X_FORWARDED_FOR: "{}, {}, {}".format(
                ip_list[0], ip_list[1], ip_list[2]
            )
        }
    req = requests.get(url, params=header_value, proxies=proxy, timeout=10)
    if len(req.cookies) > 0:
        logger.info(set_color(
            "found a request cookie, saving to file...", level=25
        ))
        try:
            cookie_start = req.cookies.keys()
            cookie_value = req.cookies.values()
            write_to_log_file(
                "{}={}".format(''.join(cookie_start), ''.join(cookie_value)),
                COOKIE_LOG_PATH, COOKIE_FILENAME.format(replace_http(url))
            )
        except Exception:
            write_to_log_file(
                [c for c in req.cookies.itervalues()], COOKIE_LOG_PATH,
                COOKIE_FILENAME.format(replace_http(url))
            )
    return req.headers
예제 #5
0
def scan_xss(url, agent=None, proxy=None):
    """
    scan the payload to see if the XSS is still present in the HTML, if it is there's a very good
    chance that the URL is vulnerable to XSS attacks. Usually what will happen is the payload will
    be tampered or encoded if the site is not vulnerable
    """
    user_agent = agent or DEFAULT_USER_AGENT
    config_proxy = proxy_string_to_dict(proxy)
    config_headers = {"connection": "close", "user-agent": user_agent}
    xss_request = requests.get(url,
                               proxies=config_proxy,
                               headers=config_headers)
    html_data = xss_request.content
    query = find_xss_script(url)
    for db in DBMS_ERRORS.keys():
        for item in DBMS_ERRORS[db]:
            if re.findall(item, html_data):
                return "sqli", db
    if query in html_data:
        return True, None
    return False, None
예제 #6
0
def detect_protection(url, **kwargs):
    verbose = kwargs.get("verbose", False)
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)

    if xforward:
        ip_list = (create_random_ip(), create_random_ip(), create_random_ip())
        headers = {
            HTTP_HEADER.CONNECTION:
            "close",
            HTTP_HEADER.USER_AGENT:
            agent,
            HTTP_HEADER.X_FORWARDED_FOR:
            "{}, {}, {}".format(ip_list[0], ip_list[1], ip_list[2])
        }
    else:
        headers = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: agent
        }

    url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD)

    if verbose:
        logger.debug(
            set_color("attempting connection to '{}'...".format(url),
                      level=10))
    try:
        protection_check_req = requests.get(
            url,
            params=headers,
            proxies=proxy_string_to_dict(proxy),
            timeout=20)

        html, status, headers = protection_check_req.content, protection_check_req.status_code, protection_check_req.headers

        for dbms in DBMS_ERRORS:  # make sure there are no DBMS errors in the HTML
            for regex in DBMS_ERRORS[dbms]:
                if re.compile(regex).search(html) is not None:
                    logger.info(
                        set_color(
                            "it appears that the WAF/IDS/IPS check threw a DBMS error and may be vulnerable "
                            "to SQL injection attacks. it appears the backend DBMS is '{}'..."
                            .format(dbms),
                            level=25))
                    return None

        retval = []
        if status != 200 and "not found" not in html.lower():
            file_list = [
                f for f in os.listdir(DETECT_FIREWALL_PATH)
                if not any(ex in f for ex in ["__init__", ".pyc"])
            ]
            for item in file_list:
                item = item[:-3]
                detection_name = "lib.firewall.{}"
                detection_name = detection_name.format(item)
                detection_name = importlib.import_module(detection_name)
                if detection_name.detect(html, headers=headers, status=status):
                    retval.append(detection_name.__item__)
            if len(retval) > 1:
                if "Generic (Unknown)" in retval:
                    item = retval.index("Generic (Unknown)")
                    del retval[item]
            else:
                if retval[0] == "Generic (Unknown)":
                    logger.warning(
                        set_color(
                            "identified WAF/IDS/IPS is unknown to Zeus, if you know the firewall and the context "
                            "of the firewall, please create an issue ({}), fingerprint of the firewall will be "
                            "written to a log file...".format(ISSUE_LINK),
                            level=30))
                    full_finger_print = "HTTP/1.1 {}\n{}\n{}".format(
                        status, headers, html)
                    write_to_log_file(
                        full_finger_print, UNKNOWN_FIREWALL_FINGERPRINT_PATH,
                        UNKNOWN_FIREWALL_FILENAME.format(replace_http(url)))
        else:
            retval = None

        return ''.join(retval) if isinstance(retval, list) else retval

    except Exception as e:
        if "Read timed out." or "Connection reset by peer" in str(e):
            logger.warning(
                set_color(
                    "detection request timed out, assuming no protection and continuing...",
                    level=30))
            return None
        else:
            logger.exception(
                set_color(
                    "Zeus ran into an unexpected error '{}'...".format(e),
                    level=50))
            request_issue_creation()
            return None
예제 #7
0
def search_multiple_pages(query, link_amount, verbose=False, **kwargs):
    """
    search multiple pages for a lot of links, this will not be done via Google
    """
    proxy = kwargs.get("proxy", None)
    agent = kwargs.get("agent", None)
    xforward = kwargs.get("xforward", False)
    batch = kwargs.get("batch", False)
    show_success = kwargs.get("show_success", False)
    attrib, desc = "a", "href"
    retval = set()
    search_engine = AUTHORIZED_SEARCH_ENGINES["search-results"]

    logger.warning(
        set_color("searching multiple pages will not be done on Google".format(
            search_engine),
                  level=30))

    if not parse_blacklist(query, BLACKLIST_FILE_PATH, batch=batch):
        shutdown()

    if not xforward:
        params = {"Connection": "close", "user-agent": agent}
    else:
        ip_list = (create_random_ip(), create_random_ip(), create_random_ip())
        params = {
            "Connection":
            "close",
            "user-agent":
            agent,
            "X-Forwarded-For":
            "{}, {}, {}".format(ip_list[0], ip_list[1], ip_list[2])
        }

    page_number = 1
    try:
        while len(retval) <= link_amount:
            if verbose:
                logger.debug(
                    set_color("searching page number {}".format(page_number),
                              level=10))
            if page_number % 10 == 0:
                logger.info(
                    set_color("currently on page {} of search results".format(
                        page_number)))
            page_request = requests.get(search_engine.format(
                page_number, query, page_number),
                                        params=params,
                                        proxies=proxy_string_to_dict(proxy))
            if page_request.status_code == 200:
                html_page = page_request.content
                soup = BeautifulSoup(html_page, "html.parser")
                if not NO_RESULTS_REGEX.findall(str(soup)):
                    for link in soup.findAll(attrib):
                        redirect = link.get(desc)
                        if redirect is not None:
                            if not any(ex in redirect for ex in URL_EXCLUDES):
                                if URL_REGEX.match(redirect):
                                    retval.add(redirect)
                    if page_number < MAX_PAGE_NUMBER:
                        page_number += 1
                    else:
                        logger.warning(
                            set_color("hit max page number {}".format(
                                MAX_PAGE_NUMBER),
                                      level=30))
                        break
                else:
                    logger.warning(
                        set_color("no more results found for given query '{}'".
                                  format(query),
                                  level=30))
                    break
    except KeyboardInterrupt:
        logger.error(
            set_color("user aborted, dumping already found URL(s)", level=40))
        write_to_log_file(retval, URL_LOG_PATH, URL_FILENAME)
        logger.info(
            set_color("found a total of {} URL(s)".format(len(retval)),
                      level=25))
        shutdown()
    except Exception as e:
        logger.exception(
            set_color("Zeus ran into an unexpected error '{}'".format(e),
                      level=50))
        request_issue_creation()
        shutdown()

    if len(retval) > 0:
        logger.info(
            set_color(
                "a total of {} URL(s) found out of the requested {}".format(
                    len(retval), link_amount),
                level=25))
        file_path = write_to_log_file(retval, URL_LOG_PATH, URL_FILENAME)
        if show_success:
            amount_of_urls = len(open(file_path).readlines())
            success_rate = calculate_success(amount_of_urls)
            logger.info(
                set_color("provided query has a {} success rate".format(
                    success_rate)))
        return list(retval)
    else:
        logger.warning(
            set_color(
                "did not find any links with given query '{}' writing to blacklist"
                .format(query),
                level=30))
        write_to_log_file(query, BLACKLIST_FILE_PATH, BLACKLIST_FILENAME)
예제 #8
0
def parse_search_results(query, url_to_search, verbose=False, **kwargs):
    """
      Parse a webpage from Google for URL's with a GET(query) parameter
    """
    possible_leftovers = URLParser(None).possible_leftovers
    splitter = "&amp;"
    retval = set()
    query_url = None

    parse_webcache, pull_all = kwargs.get("parse_webcache", False), kwargs.get(
        "pull_all", False)
    proxy_string, user_agent = kwargs.get("proxy",
                                          None), kwargs.get("agent", None)
    forward_for = kwargs.get("forward_for", False)
    tor = kwargs.get("tor", False)
    batch = kwargs.get("batch", False)
    show_success = kwargs.get("show_success", False)

    if verbose:
        logger.debug(set_color("parsing blacklist", level=10))
    parse_blacklist(query, BLACKLIST_FILE_PATH, batch=batch)

    if verbose:
        logger.debug(
            set_color("checking for user-agent and proxy configuration",
                      level=10))

    if not parse_webcache and "google" in url_to_search:
        logger.warning(
            set_color(
                "will not parse webcache URL's (to parse webcache pass -W)",
                level=30))
    if not pull_all:
        logger.warning(
            set_color(
                "only pulling URLs with GET(query) parameters (to pull all URL's pass -E)",
                level=30))

    user_agent_info = "adjusting user-agent header to {}"
    if user_agent is not DEFAULT_USER_AGENT:
        user_agent_info = user_agent_info.format(user_agent.strip())
    else:
        user_agent_info = user_agent_info.format(
            "default user agent '{}'".format(DEFAULT_USER_AGENT))

    proxy_string_info = "setting proxy to {}"
    if proxy_string is not None:
        proxy_string = proxy_string_to_dict(proxy_string)
        proxy_string_info = proxy_string_info.format(
            ''.join(proxy_string.keys()) + "://" +
            ''.join(proxy_string.values()))
    elif tor:
        proxy_string = proxy_string_to_dict("socks5://127.0.0.1:9050")
        proxy_string_info = proxy_string_info.format("tor proxy settings")
    else:
        proxy_string_info = "no proxy configuration detected"

    if forward_for:
        ip_to_use = (create_random_ip(), create_random_ip(),
                     create_random_ip())
        if verbose:
            logger.debug(
                set_color(
                    "random IP addresses generated for headers '{}'".format(
                        ip_to_use),
                    level=10))

        headers = {
            HTTP_HEADER.CONNECTION:
            "close",
            HTTP_HEADER.USER_AGENT:
            user_agent,
            HTTP_HEADER.X_FORWARDED_FOR:
            "{}, {}, {}".format(ip_to_use[0], ip_to_use[1], ip_to_use[2])
        }
    else:
        headers = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: user_agent
        }
    logger.info(set_color("attempting to gather query URL"))
    try:
        query_url = get_urls(query,
                             url_to_search,
                             verbose=verbose,
                             user_agent=user_agent,
                             proxy=proxy_string,
                             tor=tor,
                             batch=batch,
                             xforward=forward_for)
    except Exception as e:
        if "'/usr/lib/firefoxdriver/webdriver.xpi'" in str(e):
            logger.fatal(
                set_color(
                    "firefox was not found in the default location on your system, "
                    "check your installation and make sure it is in /usr/lib, if you "
                    "find it there, restart your system and try again",
                    level=50))
        elif "connection refused" in str(e).lower():
            logger.fatal(
                set_color(
                    "there are to many sessions of firefox opened and selenium cannot "
                    "create a new one",
                    level=50))
            run_fix(
                "would you like to attempt to auto clean the open sessions",
                "sudo sh {}".format(CLEANUP_TOOL_PATH),
                "kill off the open sessions of firefox and re-run Zeus",
                exit_process=True)
        elif "Program install error!" in str(e):
            logger.error(
                set_color(
                    "seems the program is having some trouble installing would you like "
                    "to try and automatically fix this issue",
                    level=40))
            run_fix(
                "would you like to attempt to fix this issue automatically",
                "sudo sh {}".format(FIX_PROGRAM_INSTALL_PATH),
                "you can manually try and re-install Xvfb to fix the problem",
                exit_process=True)
        elif "Message: Reached error page:" in str(e):
            logger.fatal(
                set_color(
                    "geckodriver has hit an error that usually means it needs to be reinstalled",
                    level=50))
            question = prompt(
                "would you like to attempt a reinstallation of the geckodriver",
                opts="yN")
            if question.lower().startswith("y"):
                logger.warning(
                    set_color(
                        "rewriting all executed information, path information, and removing geckodriver",
                        level=30))
                rewrite_all_paths()
                logger.info(
                    set_color(
                        "all paths rewritten, you will be forced to re-install everything next run of Zeus"
                    ))
            else:
                logger.fatal(
                    set_color(
                        "you will need to remove the geckodriver from /usr/bin and reinstall it",
                        level=50))
                shutdown()
        elif "Unable to find a matching set of capabilities" in str(e):
            logger.fatal(
                set_color(
                    "it appears that firefox, selenium, and geckodriver are not playing nice with one another",
                    level=50))
            run_fix(
                "would you like to attempt to resolve this issue automatically",
                "sudo sh {}".format(REINSTALL_TOOL),
                ("you will need to reinstall firefox to a later version, update selenium, and reinstall the "
                 "geckodriver to continue using Zeus"),
                exit_process=True)
        else:
            logger.exception(
                set_color(
                    "{} failed to gather the URL from search engine, caught exception '{}' "
                    "exception has been logged to current log file".format(
                        os.path.basename(__file__),
                        str(e).strip()),
                    level=50))
            request_issue_creation()
        shutdown()
    logger.info(
        set_color("URL successfully gathered, searching for GET parameters"))

    logger.info(set_color(proxy_string_info))

    try:
        req = requests.get(query_url, proxies=proxy_string, params=headers)
    except ConnectionError:
        logger.warning(
            set_color(
                "target machine refused connection, delaying and trying again",
                level=30))
        time.sleep(3)
        req = requests.get(query_url, proxies=proxy_string, params=headers)

    logger.info(set_color(user_agent_info))
    req.headers.update(headers)
    found_urls = URL_REGEX.findall(req.text)
    for urls in list(found_urls):
        for url in list(urls):
            url = unquote(url)
            if not any(u in url for u in URL_EXCLUDES):
                if not url == "http://" and not url == "https://":
                    if URL_REGEX.match(url):
                        if isinstance(url, unicode):
                            url = str(url).encode("utf-8")
                        if pull_all:
                            retval.add(url.split(splitter)[0])
                        else:
                            if URL_QUERY_REGEX.match(url.split(splitter)[0]):
                                retval.add(url.split(splitter)[0])
                        if verbose:
                            try:
                                logger.debug(
                                    set_color("found '{}'".format(
                                        url.split(splitter)[0]),
                                              level=10))
                            except TypeError:
                                logger.debug(
                                    set_color("found '{}'".format(
                                        str(url).split(splitter)[0]),
                                              level=10))
                            except AttributeError:
                                logger.debug(
                                    set_color("found '{}".format(str(url)),
                                              level=10))
                        if url is not None:
                            retval.add(url.split(splitter)[0])
    true_retval = set()
    for url in list(retval):
        if any(l in url for l in possible_leftovers):
            url = URLParser(url).strip_url_leftovers()
        if parse_webcache:
            if "webcache" in url:
                logger.info(set_color("found a webcache URL, extracting"))
                url = URLParser(url).extract_webcache_url()
                if verbose:
                    logger.debug(set_color("found '{}'".format(url), level=15))
                true_retval.add(url)
            else:
                true_retval.add(url)
        else:
            true_retval.add(url)

    if len(true_retval) != 0:
        file_path = write_to_log_file(true_retval, URL_LOG_PATH, URL_FILENAME)
        if show_success:
            amount_of_urls = len(open(file_path).readlines())
            success_rate = calculate_success(amount_of_urls)
            logger.info(
                set_color("provided query has a {} success rate".format(
                    success_rate)))
    else:
        logger.fatal(
            set_color(
                "did not find any URLs with given query '{}' writing query to blacklist"
                .format(query),
                level=50))
        write_to_log_file(query,
                          BLACKLIST_FILE_PATH,
                          BLACKLIST_FILENAME,
                          blacklist=True)
        shutdown()
    logger.info(
        set_color("found a total of {} URLs with given query '{}'".format(
            len(true_retval), query)))
    return list(true_retval) if len(true_retval) != 0 else None
예제 #9
0
def main_intel_amt(url, agent=None, proxy=None, **kwargs):
    """
    main attack method to be called
    """
    do_ip_address = kwargs.get("do_ip", False)
    proxy = proxy_string_to_dict(proxy) or None
    agent = agent or DEFAULT_USER_AGENT
    if do_ip_address:
        logger.warning(
            set_color(
                "running against IP addresses may result in the targets refusing the connection...",
                level=30))
        logger.info(
            set_color("will run against IP address instead of hostname..."))
        try:
            url = replace_http(url)
            url = socket.gethostbyname(url)
            logger.info(set_color("discovered IP address {}...".format(url)))
        except Exception as e:
            logger.error(
                set_color(
                    "failed to gather IP address from hostname '{}', received an error '{}'. "
                    "will just run against hostname...".format(url, e),
                    level=40))
            url = url
    logger.info(
        set_color(
            "attempting to connect to '{}' and get hardware info...".format(
                url)))
    try:
        json_data = __get_hardware(url, agent=agent, proxy=proxy)
        if json_data is None:
            logger.error(
                set_color("unable to get any information, skipping...",
                          level=40))
            pass
        else:
            print("-" * 40)
            for key in json_data.keys():
                print("{}:".format(str(key).capitalize()))
                for item in json_data[key]:
                    print(" - {}: {}".format(item.capitalize(),
                                             json_data[key][item]))
            print("-" * 40)
    except requests.exceptions.ConnectionError as e:
        if "Max retries exceeded with url" in str(e):
            logger.error(
                set_color(
                    "failed connection, target machine is actively refusing the connection, skipping...",
                    level=40))
            pass
        else:
            logger.error(
                set_color("failed connection with '{}', skipping...",
                          level=40))
            pass
    except Exception as e:
        if "Temporary failure in name resolution" in str(e):
            logger.error(
                set_color("failed to connect on '{}', skipping...".format(url),
                          level=40))
            pass
        else:
            logger.exception(
                set_color(
                    "ran into exception '{}', cannot continue...".format(e)))
            request_issue_creation()
예제 #10
0
def detect_protection(url, **kwargs):
    verbose = kwargs.get("verbose", False)
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)

    if xforward:
        ip_list = (create_random_ip(), create_random_ip(), create_random_ip())
        headers = {
            HTTP_HEADER.CONNECTION:
            "close",
            HTTP_HEADER.USER_AGENT:
            agent,
            HTTP_HEADER.X_FORWARDED_FOR:
            "{}, {}, {}".format(ip_list[0], ip_list[1], ip_list[2])
        }
    else:
        headers = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: agent
        }

    url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD)

    if verbose:
        logger.debug(
            set_color("attempting connection to '{}'...".format(url),
                      level=10))
    try:
        protection_check_req = requests.get(
            url,
            params=headers,
            proxies=proxy_string_to_dict(proxy),
            timeout=20)

        html, status, headers = protection_check_req.content, protection_check_req.status_code, protection_check_req.headers

        # make sure there are no DBMS errors in the HTML
        for dbms in DBMS_ERRORS:
            for regex in DBMS_ERRORS[dbms]:
                if re.compile(regex).search(html) is not None:
                    logger.warning(
                        set_color(
                            "it appears that the WAF/IDS/IPS check threw a DBMS error and may be vulnerable "
                            "to SQL injection attacks. it appears the backend DBMS is '{}', site will be "
                            "saved for further processing...".format(dbms),
                            level=30))
                    write_to_log_file(url, SQLI_SITES_FILEPATH,
                                      SQLI_FOUND_FILENAME)
                    return None

        retval = []
        file_list = [
            f for f in os.listdir(DETECT_FIREWALL_PATH)
            if not any(ex in f for ex in ["__init__", ".pyc"])
        ]
        for item in file_list:
            item = item[:-3]
            if verbose:
                logger.debug(
                    set_color("loading script '{}'...".format(item), level=10))
            detection_name = "lib.firewall.{}"
            detection_name = detection_name.format(item)
            detection_name = importlib.import_module(detection_name)
            if detection_name.detect(html, headers=headers,
                                     status=status) is True:
                retval.append(detection_name.__item__)
        if len(retval) != 0:
            if len(retval) >= 2:
                try:
                    del retval[retval.index("Generic (Unknown)")]
                except (Exception, IndexError):
                    logger.warning(
                        set_color(
                            "multiple firewalls identified ({}), displaying most likely..."
                            .format(", ".join(
                                [item.split("(")[0] for item in retval])),
                            level=30))
                    del retval[retval.index(retval[1])]
                    if len(retval) >= 2:
                        del retval[retval.index(retval[1])]
            if retval[0] == "Generic (Unknown)":
                logger.warning(
                    set_color(
                        "discovered firewall is unknown to Zeus, saving fingerprint to file. "
                        "if you know the details or the context of the firewall please create "
                        "an issue ({}) with the fingerprint, or a pull request with the script..."
                        .format(ISSUE_LINK),
                        level=30))
                fingerprint = "<!---\nHTTP 1.1\nStatus Code: {}\nHTTP Headers: {}\n--->\n{}".format(
                    status, headers, html)
                write_to_log_file(fingerprint,
                                  UNKNOWN_FIREWALL_FINGERPRINT_PATH,
                                  UNKNOWN_FIREWALL_FILENAME)
            return "".join(retval) if isinstance(retval, list) else retval
        else:
            return None

    except Exception as e:
        if "Read timed out." or "Connection reset by peer" in str(e):
            logger.warning(
                set_color(
                    "detection request failed, assuming no protection and continuing...",
                    level=30))
            return None
        else:
            logger.exception(
                set_color(
                    "Zeus ran into an unexpected error '{}'...".format(e),
                    level=50))
            request_issue_creation()
            return None
예제 #11
0
def load_headers(url, **kwargs):
    """
    load the HTTP headers
    """
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)

    # literal_match = re.compile(r"\\(\X(\d+)?\w+)?", re.I)

    if proxy is not None:
        proxy = proxy_string_to_dict(proxy)
    if not xforward:
        header_value = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: agent
        }
    else:
        ip_list = create_random_ip(), create_random_ip(), create_random_ip()
        header_value = {
            HTTP_HEADER.CONNECTION:
            "close",
            HTTP_HEADER.USER_AGENT:
            agent,
            HTTP_HEADER.X_FORWARDED_FOR:
            "{}, {}, {}".format(ip_list[0], ip_list[1], ip_list[2])
        }
    req = requests.get(url, params=header_value, proxies=proxy, timeout=10)
    if len(req.cookies) > 0:
        logger.info(
            set_color("found a request cookie, saving to file...", level=25))
        try:
            cookie_start = req.cookies.keys()
            cookie_value = req.cookies.values()
            write_to_log_file(
                "{}={}".format(''.join(cookie_start), ''.join(cookie_value)),
                COOKIE_LOG_PATH, COOKIE_FILENAME.format(replace_http(url)))
        except Exception:
            write_to_log_file([c for c in req.cookies.itervalues()],
                              COOKIE_LOG_PATH,
                              COOKIE_FILENAME.format(replace_http(url)))
    retval = {}
    do_not_use = []
    http_headers = req.headers
    for header in http_headers:
        try:
            # test to see if there are any unicode errors in the string
            retval[header] = unicodedata.normalize(
                "NFKD",
                u"{}".format(http_headers[header])).encode("ascii",
                                                           errors="ignore")
        # just to be safe, we're going to put all the possible Unicode errors into a tuple
        except (UnicodeEncodeError, UnicodeDecodeError, UnicodeError,
                UnicodeTranslateError, UnicodeWarning):
            # if there are any errors, we're going to append them to a `do_not_use` list
            do_not_use.append(header)
    # clear the dict so we can re-add to it
    retval.clear()
    for head in http_headers:
        # if the header is in the list, we skip it
        if head not in do_not_use:
            retval[head] = http_headers[head]
    # return a dict of safe unicodeless HTTP headers
    return retval