示例#1
0
 def __choose_attack(choice, attacks):
     while True:
         if int(choice) in range(len(attacks)):
             return int(choice)
         else:
             logger.warning(
                 set_color("{} is not a valid choice...".format(choice)))
示例#2
0
def tamper(payload, **kwargs):
    warning = kwargs.get("warning", True)

    if warning:
        logger.warning(
            set_color(
                "obfuscating payloads by their entity encoding equivalent may increase the "
                "risk of false positives",
                level=30))

    skip = ";"
    encoding_schema = {
        " ": " ",
        "<": "&lt;",
        ">": "&gt;",
        "&": "&amp;",
        '"': "&quot;",
        "'": "&apos;",
    }
    retval = ""
    for char in str(payload):
        if char in encoding_schema.iterkeys():
            retval += encoding_schema[char]
        elif char not in encoding_schema.iterkeys() and char != skip:
            retval += char
        else:
            retval += char
    return retval
示例#3
0
def tamper(payload, warning=True, **kwargs):
    if warning:
        logger.warning(set_color(
            "base64 tamper scripts may increase the possibility of not finding vulnerabilities "
            "in otherwise vulnerable sites...", level=30
        ))
    return base64.b64encode(payload)
示例#4
0
def detect_plugins(html, headers, **kwargs):
    verbose = kwargs.get("verbose", False)

    try:
        retval = []
        plugin_skip_schema = ("__init__", ".pyc")
        plugin_file_list = [f for f in os.listdir(DETECT_PLUGINS_PATH) if not any(s in f for s in plugin_skip_schema)]
        for plugin in plugin_file_list:
            plugin = plugin[:-3]
            if verbose:
                logger.debug(set_color(
                    "loading script '{}'".format(plugin), level=10
                ))
            plugin_detection = "lib.plugins.{}"
            plugin_detection = plugin_detection.format(plugin)
            plugin_detection = importlib.import_module(plugin_detection)
            if plugin_detection.search(html, headers=headers) is True:
                retval.append((plugin_detection.__product__, plugin_detection.__description__))
        if len(retval) > 0:
            return retval
        return None
    except Exception as e:
        logger.exception(str(e))
        if "Read timed out." or "Connection reset by peer" in str(e):
            logger.warning(set_color(
                "plugin request failed, assuming no plugins and continuing", level=30
            ))
            return None
        else:
            logger.exception(set_color(
                "plugin detection has failed with error {}".format(str(e))
            ))
            request_issue_creation()
示例#5
0
 def __config_search_engine(verbose=False):
     """
     configure the search engine if a one different from google is given
     """
     non_default_msg = "specified to use non-default search engine..."
     se_message = "using '{}' as the search engine..."
     if opt.useDDG:
         if verbose:
             logger.debug(
                 set_color(se_message.format("DuckDuckGo"), level=10))
         logger.info(set_color(non_default_msg))
         se = AUTHORIZED_SEARCH_ENGINES["duckduckgo"]
     elif opt.useAOL:
         logger.warning(
             set_color("AOL will take a little longer due to pop-ups...",
                       level=30))
         if verbose:
             logger.debug(set_color(se_message.format("AOL"), level=10))
         logger.info(set_color(non_default_msg))
         se = AUTHORIZED_SEARCH_ENGINES["aol"]
     elif opt.useBing:
         if verbose:
             logger.debug(set_color(se_message.format("Bing"), level=10))
         logger.info(set_color(non_default_msg))
         se = AUTHORIZED_SEARCH_ENGINES["bing"]
     else:
         if verbose:
             logger.debug(
                 set_color("using default search engine (Google)...",
                           level=10))
         logger.info(set_color("using default search engine...")
                     ) if opt.fileToEnumerate is None else ""
         se = AUTHORIZED_SEARCH_ENGINES["google"]
     return se
示例#6
0
def tamper(payload, warning=True, **kwargs):
    if warning:
        logger.warning(
            set_color(
                "hex tamper scripts may increase the risk of false positives...",
                level=30))
    retval = hex(hash(payload))
    if "-" in str(retval):
        return retval[1:-1]
    else:
        return retval
def tamper(payload, warning=True, **kwargs):
    if warning:
        logger.warning(
            set_color(
                "enclosing brackets is meant to be used as an obfuscation "
                "against an already valid vulnerable site...",
                level=30))

    to_enclose = string.digits
    retval = ""
    for char in payload:
        if char in to_enclose:
            char = "['{}']".format(char)
            retval += char
        else:
            retval += char
    return retval
def tamper(payload, warning=True, **kwargs):
    if warning:
        logger.warning(
            set_color(
                "obfuscating the payloads by ordinal equivalents may increase the risk "
                "of false positives...",
                level=30))

    retval = ""
    danger_characters = "%&<>/\\;'\""
    for char in payload:
        if char in danger_characters:
            char = "%{}".format(ord(char) * 10 / 7)
            retval += char
        else:
            retval += char
    return retval
示例#9
0
def tamper(payload, **kwargs):
    warning = kwargs.get("warning", True)
    if warning:
        logger.warning(
            set_color(
                "NULL encoding tamper scripts may increase the possibility of not finding vulnerabilities "
                "in otherwise vulnerable sites...",
                level=30))

    retval = ""
    encoder = "%00"
    for char in payload:
        if char == " ":
            char = encoder
            retval += char
        else:
            retval += char
    return retval
示例#10
0
def main(url, show=False, verbose=False, **kwargs):
    """
    main method to be called
    """
    do_threading = kwargs.get("do_threading", False)
    proc_num = kwargs.get("proc_num", 3)
    logger.info(set_color("parsing robots.txt..."))
    results = check_for_robots(url)
    if not results:
        logger.warning(
            set_color(
                "seems like this page is blocking access to robots.txt...",
                level=30))
    logger.info(set_color("loading extensions..."))
    extensions = __load_extensions()
    if verbose:
        logger.debug(
            set_color("loaded a total of {} extensions...".format(
                len(extensions)),
                      level=10))
    logger.info(set_color("attempting to bruteforce admin panel..."))
    if do_threading:
        logger.warning(
            set_color("starting parallel processing with {} processes, this "
                      "will depend on your GPU speed...".format(proc_num),
                      level=30))
        tasks = []
        for _ in range(0, proc_num):
            p = multiprocessing.Process(target=check_for_admin_page,
                                        args=(url, extensions),
                                        kwargs={
                                            "show_possibles": show,
                                            "verbose": verbose
                                        })
            p.start()
            tasks.append(p)
        for proc in tasks:
            proc.join()
    else:
        check_for_admin_page(url,
                             extensions,
                             show_possibles=show,
                             verbose=verbose)
示例#11
0
 def __create_arguments(sqlmap=False, nmap=False):
     """
     create the sqlmap arguments (a list of tuples) that will be passed to the API
     """
     logger.info(set_color(
         "creating arguments for {}...".format("sqlmap" if sqlmap else "nmap")
     ))
     retval = []
     splitter = {"sqlmap": ",", "nmap": "|"}
     if sqlmap:
         if opt.sqlmapArguments is not None:
             for line in opt.sqlmapArguments.split(splitter["sqlmap"]):
                 to_use = line.strip().split(" ")
                 option = (to_use[0], to_use[1])
                 if to_use[0] in SQLMAP_API_OPTIONS:
                     retval.append(option)
                 else:
                     logger.warning(set_color(
                         "option '{}' is not recognized by sqlmap API, skipping...".format(option[0]),
                         level=30
                     ))
     elif nmap:
         warning_msg = "option {} is not known by the nmap api, skipping..."
         if opt.nmapArguments is not None:
             for line in opt.nmapArguments.split(splitter["nmap"]):
                 try:
                     data = line.index(" ")
                 except Exception:
                     data = None
                     pass
                 if data is not None:
                     argument = line[0:data]
                     if argument in NMAP_API_OPTS:
                         retval.append(line)
                     else:
                         logger.warning(set_color(
                             warning_msg.format(argument), level=30
                         ))
                 else:
                     if line in NMAP_API_OPTS:
                         retval.append(line)
                     else:
                         logger.warning(set_color(
                             warning_msg.format(line), level=30
                         ))
     return retval
示例#12
0
def search_multiple_pages(query, link_amount, verbose=False, **kwargs):

    def __config_proxy(proxy_string):
        proxy_type_schema = {
            "http": httplib2.socks.PROXY_TYPE_HTTP,
            "socks4": httplib2.socks.PROXY_TYPE_SOCKS4,
            "socks5": httplib2.socks.PROXY_TYPE_SOCKS5
        }
        proxy_type = get_proxy_type(proxy_string)[0]
        proxy_dict = proxy_string_to_dict(proxy_string)
        proxy_config = httplib2.ProxyInfo(
            proxy_type=proxy_type_schema[proxy_type],
            proxy_host="".join(proxy_dict.keys()),
            proxy_port="".join(proxy_dict.values())
        )
        return proxy_config

    proxy, agent = kwargs.get("proxy", None), kwargs.get("agent", None)

    if proxy is not None:
        if verbose:
            logger.debug(set_color(
                "configuring to use proxy '{}'...".format(proxy), level=10
            ))
        __config_proxy(proxy)

    if agent is not None:
        if verbose:
            logger.debug(set_color(
                "settings user-agent to '{}'...".format(agent), level=10
            ))

    logger.warning(set_color(
        "multiple pages will be searched using Google's API client, searches may be blocked after a certain "
        "amount of time...", level=30
    ))
    results, limit, found, index = set(), link_amount, 0, google_api.search(query, user_agent=agent, safe="on")
    try:
        while limit > 0:
            results.add(next(index))
            limit -= 1
            found += 1
    except Exception as e:
        if "Error 503" in str(e):
            logger.fatal(set_color(
                "Google is blocking the current IP address, dumping already found URL's...", level=50
            ))
            results = results
            pass

    retval = set()
    for url in results:
        if URL_REGEX.match(url) and URL_QUERY_REGEX.match(url):
            if verbose:
                logger.debug(set_color(
                    "found '{}'...".format(url), level=10
                ))
            retval.add(url)

    if len(retval) != 0:
        logger.info(set_color(
            "a total of {} links found out of requested {}...".format(
                len(retval), link_amount
            )
        ))
        write_to_log_file(list(retval), URL_LOG_PATH, "url-log-{}.log")
    else:
        logger.error(set_color(
            "unable to extract URL's from results...", level=40
        ))
示例#13
0
def parse_search_results(
        query, url_to_search, verbose=False, **kwargs):
    """
      Parse a webpage from Google for URL's with a GET(query) parameter
    """
    splitter = "&amp;"
    retval = set()
    query_url = None

    proxy_string, user_agent = kwargs.get("proxy", None), kwargs.get("agent", None)

    if verbose:
        logger.debug(set_color(
            "checking for user-agent and proxy configuration...", level=10
        ))

    user_agent_info = "adjusting user-agent header to {}..."
    if user_agent is not DEFAULT_USER_AGENT:
        user_agent_info = user_agent_info.format(user_agent.strip())
    else:
        user_agent_info = user_agent_info.format("default user agent '{}'".format(DEFAULT_USER_AGENT))

    proxy_string_info = "setting proxy to {}..."
    if proxy_string is not None:
        proxy_string_info = proxy_string_info.format(
            ''.join(proxy_string.keys()) + "://" + ''.join(proxy_string.values()))
    else:
        proxy_string_info = "no proxy configuration detected..."

    headers = {
        "Connection": "close",
        "user-agent": user_agent
    }
    logger.info(set_color(
        "attempting to gather query URL..."
    ))
    try:
        query_url = get_urls(query, url_to_search, verbose=verbose, user_agent=user_agent, proxy=proxy_string)
    except Exception as e:
        if "WebDriverException" in str(e):
            logger.exception(set_color(
                "it seems that you exited the browser, please allow the browser "
                "to complete it's run so that Zeus can bypass captchas and API "
                "calls", level=50
            ))
        elif "'/usr/lib/firefoxdriver/webdriver.xpi'" in str(e):
            logger.fatal(set_color(
                "firefox was not found in the default location on your system, "
                "check your installation and make sure it is in /usr/lib, if you "
                "find it there, restart your system and try again...", level=50
            ))
        else:
            logger.exception(set_color(
                "{} failed to gather the URL from search engine, caught exception '{}' "
                "exception has been logged to current log file...".format(
                    os.path.basename(__file__), str(e).strip()), level=50)
            )
            request_issue_creation()
        shutdown()
    logger.info(set_color(
        "URL successfully gathered, searching for GET parameters..."
    ))

    logger.info(set_color(proxy_string_info))
    req = requests.get(query_url, proxies=proxy_string)
    logger.info(set_color(user_agent_info))
    req.headers.update(headers)
    found_urls = URL_REGEX.findall(req.text)
    url_skip_schema = ("maps.google", "play.google", "youtube")
    for urls in list(found_urls):
        for url in list(urls):
            url = unquote(url)
            if not any(u in url for u in url_skip_schema):
                if URL_QUERY_REGEX.match(url) and not any(l in url for l in URL_EXCLUDES):
                    if isinstance(url, unicode):
                        url = str(url).encode("utf-8")
                    if "webcache" in url:
                        logger.info(set_color(
                            "received webcache URL, extracting URL from webcache..."
                        ))
                        webcache_url = url
                        url = extract_webcache_url(webcache_url)
                        if url is None:
                            logger.warning(set_color(
                                "unable to extract url from given webcache URL '{}'...".format(
                                    webcache_url
                                ), level=30
                            ))
                    if verbose:
                        try:
                            logger.debug(set_color(
                                "found '{}'...".format(url.split(splitter)[0]), level=10
                            ))
                        except TypeError:
                            logger.debug(set_color(
                                "found '{}'...".format(str(url).split(splitter)[0]), level=10
                            ))
                        except AttributeError:
                            logger.debug(set_color(
                                "found '{}...".format(str(url)), level=10
                            ))
                    if url is not None:
                        retval.add(url.split("&amp;")[0])
    logger.info(set_color(
        "found a total of {} URL's with a GET parameter...".format(len(retval))
    ))
    if len(retval) != 0:
        write_to_log_file(retval, URL_LOG_PATH, "url-log-{}.log")
    else:
        logger.critical(set_color(
            "did not find any usable URL's with the given query '{}' "
            "using search engine '{}'...".format(query, url_to_search), level=50
        ))
        shutdown()
    return list(retval) if len(retval) != 0 else None
示例#14
0
def main_header_check(url, **kwargs):
    """
    main function
    """
    verbose = kwargs.get("verbose", False)
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)
    identify_waf = kwargs.get("identify_waf", True)
    identify_plugins = kwargs.get("identify_plugins", True)
    show_description = kwargs.get("show_description", False)
    attempts = kwargs.get("attempts", 3)

    default_sleep_time = 5
    protection = {"hostname": url}
    definition = {
        "x-xss": ("protection against XSS attacks", "XSS"),
        "strict-transport": ("protection against unencrypted connections (force HTTPS connection)", "HTTPS"),
        "x-frame": ("protection against clickjacking vulnerabilities", "CLICKJACKING"),
        "x-content": ("protection against MIME type attacks", "MIME"),
        "x-csrf": ("protection against Cross-Site Forgery attacks", "CSRF"),
        "x-xsrf": ("protection against Cross-Site Forgery attacks", "CSRF"),
        "public-key": ("protection to reduce success rates of MITM attacks", "MITM"),
        "content-security": ("header protection against multiple attack types", "ALL")
    }

    try:
        req, status, html, headers = get_page(url, proxy=proxy, agent=agent, xforward=xforward)

        logger.info(set_color(
            "detecting target charset"
        ))
        charset = get_charset(html, headers)
        if charset is not None:
            logger.info(set_color(
                "target charset appears to be '{}'".format(charset), level=25
            ))
        else:
            logger.warning(set_color(
                "unable to detect target charset", level=30
            ))
        if identify_waf:
            waf_url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD)
            _, waf_status, waf_html, waf_headers = get_page(waf_url, xforward=xforward, proxy=proxy, agent=agent)
            logger.info(set_color(
                "checking if target URL is protected by some kind of WAF/IPS/IDS"
            ))
            if verbose:
                logger.debug(set_color(
                    "attempting connection to '{}'".format(waf_url), level=10
                ))

            identified_waf = detect_protection(url, waf_status, waf_html, waf_headers, verbose=verbose)

            if identified_waf is None:
                logger.info(set_color(
                    "no WAF/IDS/IPS has been identified on target URL", level=25
                ))
            else:
                logger.warning(set_color(
                    "the target URL WAF/IDS/IPS has been identified as '{}'".format(identified_waf), level=35
                ))

        if identify_plugins:
            logger.info(set_color(
                "attempting to identify plugins"
            ))
            identified_plugin = detect_plugins(html, headers, verbose=verbose)
            if identified_plugin is not None:
                for plugin in identified_plugin:
                    if show_description:
                        logger.info(set_color(
                            "possible plugin identified as '{}' (description: '{}')".format(
                                plugin[0], plugin[1]
                            ), level=25
                        ))
                    else:
                        logger.info(set_color(
                            "possible plugin identified as '{}'".format(
                                plugin[0]
                            ), level=25
                        ))
            else:
                logger.warning(set_color(
                    "no known plugins identified on target", level=30
                ))

        if verbose:
            logger.debug(set_color(
                "loading XML data", level=10
            ))
        comparable_headers = load_xml_data(HEADER_XML_DATA)
        logger.info(set_color(
            "attempting to get request headers for '{}'".format(url.strip())
        ))
        try:
            found_headers = load_headers(url, req)
        except (ConnectionError, Exception) as e:
            if "Read timed out." or "Connection reset by peer" in str(e):
                found_headers = None
            else:
                logger.exception(set_color(
                    "Zeus has hit an unexpected error and cannot continue '{}'".format(e), level=50
                ))
                request_issue_creation()

        if found_headers is not None:
            if verbose:
                logger.debug(set_color(
                    "fetched {}".format(found_headers), level=10
                ))
            headers_established = [str(h) for h in compare_headers(found_headers, comparable_headers)]
            for key in definition.iterkeys():
                if any(key in h.lower() for h in headers_established):
                    logger.warning(set_color(
                        "provided target has {}".format(definition[key][0]), level=30
                    ))
            for key in found_headers.iterkeys():
                protection[key] = found_headers[key]
            logger.info(set_color(
                "writing found headers to log file", level=25
            ))
            return write_to_log_file(protection, HEADER_RESULT_PATH, HEADERS_FILENAME.format(replace_http(url)))
        else:
            logger.error(set_color(
                "unable to retrieve headers for site '{}'".format(url.strip()), level=40
            ))
    except ConnectionError:
        attempts = attempts - 1
        if attempts == 0:
            return False
        logger.warning(set_color(
            "target actively refused the connection, sleeping for {}s and retrying the request".format(
                default_sleep_time
            ), level=30
        ))
        time.sleep(default_sleep_time)
        main_header_check(
            url, proxy=proxy, agent=agent, xforward=xforward, show_description=show_description,
            identify_plugins=identify_plugins, identify_waf=identify_waf, verbose=verbose,
            attempts=attempts
        )
    except ReadTimeout:
        logger.error(set_color(
            "meta-data retrieval failed due to target URL timing out, skipping", level=40
        ))
    except KeyboardInterrupt:
        if not pause():
            shutdown()
    except Exception as e:
        logger.exception(set_color(
            "meta-data retrieval failed with unexpected error '{}'".format(
                str(e)
            ), level=50
        ))
示例#15
0
def detect_protection(url, **kwargs):
    verbose = kwargs.get("verbose", False)
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)

    if xforward:
        ip_list = (create_random_ip(), create_random_ip(), create_random_ip())
        headers = {
            HTTP_HEADER.CONNECTION:
            "close",
            HTTP_HEADER.USER_AGENT:
            agent,
            HTTP_HEADER.X_FORWARDED_FOR:
            "{}, {}, {}".format(ip_list[0], ip_list[1], ip_list[2])
        }
    else:
        headers = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: agent
        }

    url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD)

    if verbose:
        logger.debug(
            set_color("attempting connection to '{}'...".format(url),
                      level=10))
    try:
        protection_check_req = requests.get(
            url,
            params=headers,
            proxies=proxy_string_to_dict(proxy),
            timeout=20)

        html, status, headers = protection_check_req.content, protection_check_req.status_code, protection_check_req.headers

        for dbms in DBMS_ERRORS:  # make sure there are no DBMS errors in the HTML
            for regex in DBMS_ERRORS[dbms]:
                if re.compile(regex).search(html) is not None:
                    logger.info(
                        set_color(
                            "it appears that the WAF/IDS/IPS check threw a DBMS error and may be vulnerable "
                            "to SQL injection attacks. it appears the backend DBMS is '{}'..."
                            .format(dbms),
                            level=25))
                    return None

        retval = []
        if status != 200 and "not found" not in html.lower():
            file_list = [
                f for f in os.listdir(DETECT_FIREWALL_PATH)
                if not any(ex in f for ex in ["__init__", ".pyc"])
            ]
            for item in file_list:
                item = item[:-3]
                detection_name = "lib.firewall.{}"
                detection_name = detection_name.format(item)
                detection_name = importlib.import_module(detection_name)
                if detection_name.detect(html, headers=headers, status=status):
                    retval.append(detection_name.__item__)
            if len(retval) > 1:
                if "Generic (Unknown)" in retval:
                    item = retval.index("Generic (Unknown)")
                    del retval[item]
            else:
                if retval[0] == "Generic (Unknown)":
                    logger.warning(
                        set_color(
                            "identified WAF/IDS/IPS is unknown to Zeus, if you know the firewall and the context "
                            "of the firewall, please create an issue ({}), fingerprint of the firewall will be "
                            "written to a log file...".format(ISSUE_LINK),
                            level=30))
                    full_finger_print = "HTTP/1.1 {}\n{}\n{}".format(
                        status, headers, html)
                    write_to_log_file(
                        full_finger_print, UNKNOWN_FIREWALL_FINGERPRINT_PATH,
                        UNKNOWN_FIREWALL_FILENAME.format(replace_http(url)))
        else:
            retval = None

        return ''.join(retval) if isinstance(retval, list) else retval

    except Exception as e:
        if "Read timed out." or "Connection reset by peer" in str(e):
            logger.warning(
                set_color(
                    "detection request timed out, assuming no protection and continuing...",
                    level=30))
            return None
        else:
            logger.exception(
                set_color(
                    "Zeus ran into an unexpected error '{}'...".format(e),
                    level=50))
            request_issue_creation()
            return None
示例#16
0
         logger.exception(set_color(
             "URL provided is not valid, schema appears to be missing...", level=50
         ))
         request_issue_creation()
         shutdown()
     else:
         logger.fatal(set_color(
             "provided file does not exist, make sure you have the full path...", level=50
         ))
 except KeyboardInterrupt:
     logger.error(set_color(
         "user aborted process...", level=40
     ))
 except UnboundLocalError:
     logger.warning(set_color(
         "do not interrupt the browser when selenium is running, "
         "it will cause Zeus to crash...", level=30
     ))
 except WebDriverException as e:
     if "connection refused" in str(e):
         logger.fatal(set_color(
             "there are to many sessions of firefox opened and selenium cannot "
             "create a new one...", level=50
         ))
         do_autoclean = prompt(
             "would you like to attempt auto clean", opts="yN"
         )
         if do_autoclean.lower().startswith("y"):
             logger.warning(set_color(
                 "this will kill all instances of the firefox web browser...", level=30
             ))
             subprocess.call(["sudo", "sh", CLEANUP_TOOL_PATH])
示例#17
0
def get_urls(query, url, verbose=False, **kwargs):
    """
      Bypass Google captchas and Google API by using selenium-webdriver to gather
      the Google URL. This will open a robot controlled browser window and attempt
      to get a URL from Google that will be used for scraping afterwards.
    """
    query = query.decode('unicode_escape').encode('utf-8')
    proxy, user_agent = kwargs.get("proxy",
                                   None), kwargs.get("user_agent", None)
    tor, tor_port = kwargs.get("tor", False), kwargs.get("tor_port", None)
    batch = kwargs.get("batch", False)
    xforward = kwargs.get("xforward", False)
    logger.info(set_color("setting up virtual display to hide the browser"))
    ff_display = Display(visible=0, size=(800, 600))
    ff_display.start()
    browser = var.search.SetBrowser(agent=user_agent,
                                    proxy=proxy,
                                    tor=tor,
                                    xforward=xforward).set_browser()
    logger.info(set_color("browser will open shortly", level=25))
    browser.get(url)
    if verbose:
        logger.debug(
            set_color(
                "searching search engine for the 'q' element (search button)",
                level=10))
    search = browser.find_element_by_name('q')
    logger.info(
        set_color("searching search engine using query '{}'".format(query)))
    try:
        # enter the text you want to search and hit enter
        search.send_keys(query)
        search.send_keys(Keys.RETURN)
        if not tor:
            time.sleep(3)
        else:
            logger.warning(
                set_color(
                    "sleep time has been increased to 10 seconds due to tor being used",
                    level=30))
            time.sleep(10)
    except ElementNotInteractableException:
        # get rid of the popup box and hit enter after entering the text to search
        browser.execute_script(
            "document.querySelectorAll('label.boxed')[1].click()")
        search.send_keys(query)
        search.send_keys(Keys.RETURN)
        time.sleep(3)
    except UnicodeDecodeError:
        logger.error(
            set_color(
                "your query '{}' appears to have unicode characters in it, selenium is not "
                "properly formatted to handle unicode characters, this dork will be skipped"
                .format(query),
                level=40))
    if verbose:
        logger.debug(set_color("obtaining URL from selenium"))
    try:
        retval = browser.current_url
    except UnexpectedAlertPresentException:
        logger.warning(set_color("alert present, closing", level=30))
        # discover the alert and close it before continuing
        alert = browser.switch_to.alert
        alert.accept()
        retval = browser.current_url
    # if you have been IP banned, we'll extract the URL from it
    if IP_BAN_REGEX.search(retval) is not None:
        logger.warning(
            set_color(
                "it appears that Google is attempting to block your IP address, attempting bypass",
                level=30))
        try:
            retval = URLParser(retval).extract_ip_ban_url()
            question_msg = (
                "zeus was able to successfully extract the URL from Google's ban URL "
                "it is advised to shutdown zeus and attempt to extract the URL's manually. "
                "failing to do so will most likely result in no results being found by zeus. "
                "would you like to shutdown")
            if not batch:
                do_continue = prompt(question_msg, opts="yN")
            else:
                do_continue = prompt(question_msg, opts="yN", default="y")

            # shutdown and write the URL to a file
            if not str(do_continue).lower().startswith("n"):
                write_to_log_file(retval, EXTRACTED_URL_LOG,
                                  EXTRACTED_URL_FILENAME)
                logger.info(
                    set_color(
                        "it is advised to extract the URL's from the produced URL written to the above "
                        "(IE open the log, copy the url into firefox)".format(
                            retval)))
                shutdown()
        except Exception as e:
            # stop all the random rogue processes, this isn't guaranteed to stop the processes
            # that's why we have the clean up script in case this fails
            browser.close()
            ff_display.stop()
            logger.exception(
                set_color(
                    "zeus was unable to extract the correct URL from the ban URL '{}', "
                    "got exception '{}'".format(unquote(retval), e),
                    level=50))
            request_issue_creation()
            shutdown()
    if verbose:
        logger.debug(
            set_color("found current URL from selenium browser", level=10))
    logger.info(set_color("closing the browser and continuing process.."))
    browser.close()
    ff_display.stop()
    return retval
示例#18
0
             set_color(
                 "URL provided is not valid, schema appears to be missing...",
                 level=50))
         request_issue_creation()
         shutdown()
     else:
         logger.fatal(
             set_color(
                 "provided file does not exist, make sure you have the full path...",
                 level=50))
 except KeyboardInterrupt:
     logger.error(set_color("user aborted process...", level=40))
 except UnboundLocalError:
     logger.warning(
         set_color(
             "do not interrupt the browser when selenium is running, "
             "it will cause Zeus to crash...",
             level=30))
 except Exception as e:
     if "url did not match a true url" in str(e).lower():
         logger.error(
             set_color(
                 "you did not provide a URL that is capable of being processed, "
                 "the URL provided to the spider needs to contain protocol as well "
                 "ie. 'http://google.com' (it is advised not to add the GET parameter), "
                 "fix the URL you want to scan and try again...",
                 level=40))
         shutdown()
     else:
         logger.exception(
             set_color(
示例#19
0
                pass

            __run_attacks_main()

        # search multiple pages of Google
        elif opt.dorkToUse is not None or opt.useRandomDork and opt.searchMultiplePages:
            if opt.dorkToUse is not None:
                dork_to_use = opt.dorkToUse
            elif opt.useRandomDork:
                dork_to_use = get_random_dork()
            else:
                dork_to_use = None

            if dork_to_use is None:
                logger.warning(set_color(
                    "there has been no dork to specified to do the searching, defaulting to random dork", level=30
                ))
                dork_to_use = get_random_dork()

            dork_to_use = dork_to_use.strip()

            if opt.amountToSearch is None:
                logger.warning(set_color(
                    "did not specify amount of links to find defaulting to 75", level=30
                ))
                link_amount_to_search = 75
            else:
                link_amount_to_search = opt.amountToSearch

            logger.info(set_color(
                "searching Google using dork '{}' for a total of {} links".format(
示例#20
0
def main_header_check(url, **kwargs):
    """
    main function
    """
    verbose = kwargs.get("verbose", False)
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)
    identify = kwargs.get("identify", True)

    protection = {"hostname": url}
    definition = {
        "x-xss": ("protection against XSS attacks", "XSS"),
        "strict-transport": ("protection against unencrypted connections (force HTTPS connection)", "HTTPS"),
        "x-frame": ("protection against clickjacking vulnerabilities", "CLICKJACKING"),
        "x-content": ("protection against MIME type attacks", "MIME"),
        "x-csrf": ("protection against Cross-Site Forgery attacks", "CSRF"),
        "x-xsrf": ("protection against Cross-Site Forgery attacks", "CSRF"),
        "public-key": ("protection to reduce success rates of MITM attacks", "MITM"),
        "content-security": ("header protection against multiple attack types", "ALL")
    }

    try:
        if identify:
            logger.info(set_color(
                "checking if target URL is protected by some kind of WAF/IPS/IDS..."
            ))
            identified = detect_protection(url, proxy=proxy, agent=agent, verbose=verbose, xforward=xforward)

            if identified is None:
                logger.info(set_color(
                    "no WAF/IDS/IPS has been identified on target URL...", level=25
                ))
            else:
                logger.warning(set_color(
                    "the target URL WAF/IDS/IPS has been identified as '{}'...".format(identified), level=35
                ))

        if verbose:
            logger.debug(set_color(
                "loading XML data...", level=10
            ))
        comparable_headers = load_xml_data(HEADER_XML_DATA)
        logger.info(set_color(
            "attempting to get request headers for '{}'...".format(url.strip())
        ))
        try:
            found_headers = load_headers(url, proxy=proxy, agent=agent, xforward=xforward)
        except (ConnectionError, Exception) as e:
            if "Read timed out." or "Connection reset by peer" in str(e):
                found_headers = None
            else:
                logger.exception(set_color(
                    "Zeus has hit an unexpected error and cannot continue '{}'...".format(e), level=50
                ))
                request_issue_creation()

        if found_headers is not None:
            if verbose:
                logger.debug(set_color(
                    "fetched {}...".format(found_headers), level=10
                ))
            headers_established = [str(h) for h in compare_headers(found_headers, comparable_headers)]
            for key in definition.iterkeys():
                if any(key in h.lower() for h in headers_established):
                    logger.warning(set_color(
                        "provided target has {}...".format(definition[key][0]), level=30
                    ))
            for key in found_headers.iterkeys():
                protection[key] = found_headers[key]
            logger.info(set_color(
                "writing found headers to log file...", level=25
            ))
            return write_to_log_file(protection, HEADER_RESULT_PATH, HEADERS_FILENAME.format(replace_http(url)))
        else:
            logger.error(set_color(
                "unable to retrieve headers for site '{}'...".format(url.strip()), level=40
            ))
    except KeyboardInterrupt:
        if not pause():
            shutdown()
示例#21
0
    def __run_attacks(url, **kwargs):
        """
        run the attacks if any are requested
        """
        nmap = kwargs.get("nmap", False)
        sqlmap = kwargs.get("sqlmap", False)
        intel = kwargs.get("intel", False)
        xss = kwargs.get("xss", False)
        admin = kwargs.get("admin", False)
        verbose = kwargs.get("verbose", False)
        batch = kwargs.get("batch", False)
        auto_start = kwargs.get("auto_start", False)

        __enabled_attacks = {
            "sqlmap": opt.runSqliScan,
            "port": opt.runPortScan,
            "xss": opt.runXssScan,
            "admin": opt.adminPanelFinder,
            "intel": opt.intelCheck
        }

        enabled = set()
        for key in __enabled_attacks.keys():
            if __enabled_attacks[key] is True:
                enabled.add(key)
            if len(enabled) > 1:
                logger.error(set_color(
                    "it appears that you have enabled multiple attack types, "
                    "as of now only 1 attack is supported at a time, choose "
                    "your attack and try again. You can use the -f flag if "
                    "you do not want to complete an entire search again...", level=40
                ))
                shutdown()

        if not batch:
            question = prompt(
                "would you like to process found URL: '{}'".format(url), opts=["y", "N"]
            )
        else:
            question = "y"

        if question.lower().startswith("y"):
            if sqlmap:
                return sqlmap_scan.sqlmap_scan_main(
                    url.strip(), verbose=verbose,
                    opts=__create_arguments(sqlmap=True), auto_start=auto_start)
            elif nmap:
                url_ip_address = replace_http(url.strip())
                return nmap_scan.perform_port_scan(
                    url_ip_address, verbose=verbose,
                    opts=__create_arguments(nmap=True)
                )
            elif intel:
                url = get_true_url(url)
                return intel_me.main_intel_amt(
                    url, agent=agent_to_use,
                    proxy=proxy_to_use, do_ip=opt.runAgainstIpAddress
                )
            elif admin:
                main(
                    url, show=opt.showAllConnections,
                    verbose=verbose, do_threading=opt.threadPanels
                )
            elif xss:
                main_xss(
                    url, verbose=verbose, proxy=proxy_to_use,
                    agent=agent_to_use, tamper=opt.tamperXssPayloads
                )
            else:
                pass
        else:
            logger.warning(set_color(
                "skipping '{}'...".format(url), level=30
            ))
示例#22
0
def get_urls(query, url, verbose=False, warning=True, **kwargs):
    """
      Bypass Google captchas and Google API by using selenium-webdriver to gather
      the Google URL. This will open a robot controlled browser window and attempt
      to get a URL from Google that will be used for scraping afterwards.
    """
    proxy, user_agent = kwargs.get("proxy", None), kwargs.get("user_agent", None)
    if verbose:
        logger.debug(set_color(
            "setting up the virtual display to hide the browser...", level=10
        ))
    ff_display = Display(visible=0, size=(800, 600))
    ff_display.start()
    logger.info(set_color(
        "firefox browser display will be hidden while it performs the query..."
    ))
    if warning:
        logger.warning(set_color(
            "your web browser will be automated in order for Zeus to successfully "
            "bypass captchas and API calls. this is done in order to grab the URL "
            "from the search and parse the results. please give selenium time to "
            "finish it's task...", level=30
        ))
    if verbose:
        logger.debug(set_color(
            "running selenium-webdriver and launching browser...", level=10
        ))

    if verbose:
        logger.debug(set_color(
            "adjusting selenium-webdriver user-agent to '{}'...".format(user_agent), level=10
        ))
    if proxy is not None:
        proxy_type = proxy.keys()
        proxy_to_use = Proxy({
            "proxyType": ProxyType.MANUAL,
            "httpProxy": proxy[proxy_type[0]],
            "ftpProxy": proxy[proxy_type[0]],
            "sslProxy": proxy[proxy_type[0]],
            "noProxy": ""
        })
        if verbose:
            logger.debug(set_color(
                "setting selenium proxy to '{}'...".format(
                    ''.join(proxy_type) + "://" + ''.join(proxy.values())
                ), level=10
            ))
    else:
        proxy_to_use = None

    profile = webdriver.FirefoxProfile()
    profile.set_preference("general.useragent.override", user_agent)
    browser = webdriver.Firefox(profile, proxy=proxy_to_use)
    logger.info(set_color("browser will open shortly..."))
    browser.get(url)
    if verbose:
        logger.debug(set_color(
            "searching search engine for the 'q' element (search button)...", level=10
        ))
    search = browser.find_element_by_name('q')
    logger.info(set_color(
        "searching '{}' using query '{}'...".format(url, query)
    ))
    try:
        search.send_keys(query)
        search.send_keys(Keys.RETURN)  # hit return after you enter search text
        time.sleep(3)
    except ElementNotInteractableException:
        browser.execute_script("document.querySelectorAll('label.boxed')[1].click()")
        search.send_keys(query)
        search.send_keys(Keys.RETURN)  # hit return after you enter search text
        time.sleep(3)
    if verbose:
        logger.debug(set_color(
            "obtaining URL from selenium..."
        ))
    try:
        retval = browser.current_url
    except UnexpectedAlertPresentException:
        logger.warning(set_color(
            "alert present, closing...", level=30
        ))
        alert = browser.switch_to.alert
        alert.accept()
        retval = browser.current_url
    ban_url_schema = ["http://ipv6.google.com", "http://ipv4.google.com"]
    if any(u in retval for u in ban_url_schema):  # if you got IP banned
        logger.warning(set_color(
            "it appears that Google is attempting to block your IP address, attempting bypass...", level=30
        ))
        try:
            retval = bypass_ip_block(retval)
            do_continue = prompt(
                "zeus was able to successfully extract the URL from Google's ban URL "
                "it is advised to shutdown zeus and attempt to extract the URL's manually. "
                "failing to do so will most likely result in no results being found by zeus. "
                "would you like to shutdown", opts="yN"
            )
            if not str(do_continue).lower().startswith("n"):  # shutdown and write the URL to a file
                write_to_log_file(retval, EXTRACTED_URL_LOG, "extracted-url-{}.log")
                logger.info(set_color(
                    "it is advised to use the built in blackwidow crawler with the extracted URL "
                    "(IE -b '{}')".format(retval)
                ))
                shutdown()
        except Exception as e:
            browser.close()  # stop all the random rogue processes
            ff_display.stop()
            logger.exception(set_color(
                "zeus was unable to extract the correct URL from the ban URL '{}', "
                "got exception '{}'...".format(
                    unquote(retval), e
                ), level=50
            ))
            request_issue_creation()
            shutdown()
    if verbose:
        logger.debug(set_color(
            "found current URL from selenium browser...", level=10
        ))
    logger.info(set_color(
        "closing the browser and continuing process.."
    ))
    browser.close()
    ff_display.stop()
    return retval
示例#23
0
def search_multiple_pages(query, link_amount, verbose=False, **kwargs):
    """
    search multiple pages for a lot of links, this will not be done via Google
    """
    proxy = kwargs.get("proxy", None)
    agent = kwargs.get("agent", None)
    xforward = kwargs.get("xforward", False)
    batch = kwargs.get("batch", False)
    show_success = kwargs.get("show_success", False)
    attrib, desc = "a", "href"
    retval = set()
    search_engine = AUTHORIZED_SEARCH_ENGINES["search-results"]

    logger.warning(
        set_color("searching multiple pages will not be done on Google".format(
            search_engine),
                  level=30))

    if not parse_blacklist(query, BLACKLIST_FILE_PATH, batch=batch):
        shutdown()

    if not xforward:
        params = {"Connection": "close", "user-agent": agent}
    else:
        ip_list = (create_random_ip(), create_random_ip(), create_random_ip())
        params = {
            "Connection":
            "close",
            "user-agent":
            agent,
            "X-Forwarded-For":
            "{}, {}, {}".format(ip_list[0], ip_list[1], ip_list[2])
        }

    page_number = 1
    try:
        while len(retval) <= link_amount:
            if verbose:
                logger.debug(
                    set_color("searching page number {}".format(page_number),
                              level=10))
            if page_number % 10 == 0:
                logger.info(
                    set_color("currently on page {} of search results".format(
                        page_number)))
            page_request = requests.get(search_engine.format(
                page_number, query, page_number),
                                        params=params,
                                        proxies=proxy_string_to_dict(proxy))
            if page_request.status_code == 200:
                html_page = page_request.content
                soup = BeautifulSoup(html_page, "html.parser")
                if not NO_RESULTS_REGEX.findall(str(soup)):
                    for link in soup.findAll(attrib):
                        redirect = link.get(desc)
                        if redirect is not None:
                            if not any(ex in redirect for ex in URL_EXCLUDES):
                                if URL_REGEX.match(redirect):
                                    retval.add(redirect)
                    if page_number < MAX_PAGE_NUMBER:
                        page_number += 1
                    else:
                        logger.warning(
                            set_color("hit max page number {}".format(
                                MAX_PAGE_NUMBER),
                                      level=30))
                        break
                else:
                    logger.warning(
                        set_color("no more results found for given query '{}'".
                                  format(query),
                                  level=30))
                    break
    except KeyboardInterrupt:
        logger.error(
            set_color("user aborted, dumping already found URL(s)", level=40))
        write_to_log_file(retval, URL_LOG_PATH, URL_FILENAME)
        logger.info(
            set_color("found a total of {} URL(s)".format(len(retval)),
                      level=25))
        shutdown()
    except Exception as e:
        logger.exception(
            set_color("Zeus ran into an unexpected error '{}'".format(e),
                      level=50))
        request_issue_creation()
        shutdown()

    if len(retval) > 0:
        logger.info(
            set_color(
                "a total of {} URL(s) found out of the requested {}".format(
                    len(retval), link_amount),
                level=25))
        file_path = write_to_log_file(retval, URL_LOG_PATH, URL_FILENAME)
        if show_success:
            amount_of_urls = len(open(file_path).readlines())
            success_rate = calculate_success(amount_of_urls)
            logger.info(
                set_color("provided query has a {} success rate".format(
                    success_rate)))
        return list(retval)
    else:
        logger.warning(
            set_color(
                "did not find any links with given query '{}' writing to blacklist"
                .format(query),
                level=30))
        write_to_log_file(query, BLACKLIST_FILE_PATH, BLACKLIST_FILENAME)
示例#24
0
def main_intel_amt(url, agent=None, proxy=None, **kwargs):
    """
    main attack method to be called
    """
    do_ip_address = kwargs.get("do_ip", False)
    proxy = proxy_string_to_dict(proxy) or None
    agent = agent or DEFAULT_USER_AGENT
    if do_ip_address:
        logger.warning(
            set_color(
                "running against IP addresses may result in the targets refusing the connection...",
                level=30))
        logger.info(
            set_color("will run against IP address instead of hostname..."))
        try:
            url = replace_http(url)
            url = socket.gethostbyname(url)
            logger.info(set_color("discovered IP address {}...".format(url)))
        except Exception as e:
            logger.error(
                set_color(
                    "failed to gather IP address from hostname '{}', received an error '{}'. "
                    "will just run against hostname...".format(url, e),
                    level=40))
            url = url
    logger.info(
        set_color(
            "attempting to connect to '{}' and get hardware info...".format(
                url)))
    try:
        json_data = __get_hardware(url, agent=agent, proxy=proxy)
        if json_data is None:
            logger.error(
                set_color("unable to get any information, skipping...",
                          level=40))
            pass
        else:
            print("-" * 40)
            for key in json_data.keys():
                print("{}:".format(str(key).capitalize()))
                for item in json_data[key]:
                    print(" - {}: {}".format(item.capitalize(),
                                             json_data[key][item]))
            print("-" * 40)
    except requests.exceptions.ConnectionError as e:
        if "Max retries exceeded with url" in str(e):
            logger.error(
                set_color(
                    "failed connection, target machine is actively refusing the connection, skipping...",
                    level=40))
            pass
        else:
            logger.error(
                set_color("failed connection with '{}', skipping...",
                          level=40))
            pass
    except Exception as e:
        if "Temporary failure in name resolution" in str(e):
            logger.error(
                set_color("failed to connect on '{}', skipping...".format(url),
                          level=40))
            pass
        else:
            logger.exception(
                set_color(
                    "ran into exception '{}', cannot continue...".format(e)))
            request_issue_creation()
示例#25
0
def check_for_admin_page(url, exts, protocol="http://", **kwargs):
    verbose = kwargs.get("verbose", False)
    show_possibles = kwargs.get("show_possibles", False)
    possible_connections, connections = set(), set()
    stripped_url = replace_http(str(url).strip())
    for ext in exts:
        ext = ext.strip()
        true_url = "{}{}{}".format(protocol, stripped_url, ext)
        if verbose:
            logger.debug(set_color("trying '{}'...".format(true_url),
                                   level=10))
        try:
            urlopen(true_url, timeout=5)
            logger.info(
                set_color(
                    "connected successfully to '{}'...".format(true_url)))
            connections.add(true_url)
        except HTTPError as e:
            data = str(e).split(" ")
            if verbose:
                if "Access Denied" in str(e):
                    logger.warning(
                        set_color(
                            "got access denied, possible control panel found without external access on '{}'..."
                            .format(true_url),
                            level=30))
                    possible_connections.add(true_url)
                else:
                    logger.error(
                        set_color(
                            "failed to connect got error code {}...".format(
                                data[2]),
                            level=40))
        except Exception as e:
            if verbose:
                if "<urlopen error timed out>" or "timeout: timed out" in str(
                        e):
                    logger.warning(
                        set_color(
                            "connection timed out after five seconds "
                            "assuming won't connect and skipping...",
                            level=30))
                else:
                    logger.exception(
                        set_color(
                            "failed to connect with unexpected error '{}'...".
                            format(str(e)),
                            level=50))
                    request_issue_creation()
    possible_connections, connections = list(possible_connections), list(
        connections)
    data_msg = "found {} possible connections(s) and {} successful connection(s)..."
    logger.info(
        set_color(data_msg.format(len(possible_connections),
                                  len(connections))))
    if len(connections) != 0:
        logger.info(set_color("creating connection tree..."))
        create_tree(url, connections)
    else:
        logger.fatal(
            set_color(
                "did not receive any successful connections to the admin page of "
                "{}...".format(url),
                level=50))
    if show_possibles:
        if len(possible_connections) != 0:
            logger.info(set_color("creating possible connection tree..."))
            create_tree(url, possible_connections)
        else:
            logger.fatal(
                set_color(
                    "did not find any possible connections to {}'s "
                    "admin page",
                    level=50))
示例#26
0
def detect_protection(url, **kwargs):
    verbose = kwargs.get("verbose", False)
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)

    url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD)

    if verbose:
        logger.debug(set_color(
            "attempting connection to '{}'...".format(url), level=10
        ))
    try:
        _, status, html, headers = get_page(url, agent=agent, proxy=proxy, xforward=xforward)

        # make sure there are no DBMS errors in the HTML
        for dbms in DBMS_ERRORS:
            for regex in DBMS_ERRORS[dbms]:
                if re.compile(regex).search(html) is not None:
                    logger.warning(set_color(
                        "it appears that the WAF/IDS/IPS check threw a DBMS error and may be vulnerable "
                        "to SQL injection attacks. it appears the backend DBMS is '{}', site will be "
                        "saved for further processing...".format(dbms), level=30
                    ))
                    write_to_log_file(url, SQLI_SITES_FILEPATH, SQLI_FOUND_FILENAME)
                    return None

        retval = []
        file_list = [f for f in os.listdir(DETECT_FIREWALL_PATH) if not any(ex in f for ex in ["__init__", ".pyc"])]
        for item in file_list:
            item = item[:-3]
            if verbose:
                logger.debug(set_color(
                    "loading script '{}'...".format(item), level=10
                ))
            detection_name = "lib.firewall.{}"
            detection_name = detection_name.format(item)
            detection_name = importlib.import_module(detection_name)
            if detection_name.detect(html, headers=headers, status=status) is True:
                retval.append(detection_name.__item__)
        if len(retval) != 0:
            if len(retval) >= 2:
                try:
                    del retval[retval.index("Generic (Unknown)")]
                except (Exception, IndexError):
                    logger.warning(set_color(
                        "multiple firewalls identified ({}), displaying most likely...".format(
                            ", ".join([item.split("(")[0] for item in retval])
                        ), level=30
                    ))
                    del retval[retval.index(retval[1])]
                    if len(retval) >= 2:
                        del retval[retval.index(retval[1])]
            if retval[0] == "Generic (Unknown)":
                logger.warning(set_color(
                    "discovered firewall is unknown to Zeus, saving fingerprint to file. "
                    "if you know the details or the context of the firewall please create "
                    "an issue ({}) with the fingerprint, or a pull request with the script...".format(
                        ISSUE_LINK
                    ), level=30
                ))
                fingerprint = "<!---\nHTTP 1.1\nStatus Code: {}\nHTTP Headers: {}\n--->\n{}".format(
                    status, headers, html
                )
                write_to_log_file(fingerprint, UNKNOWN_FIREWALL_FINGERPRINT_PATH, UNKNOWN_FIREWALL_FILENAME)
            return "".join(retval) if isinstance(retval, list) else retval
        else:
            return None

    except Exception as e:
        if "Read timed out." or "Connection reset by peer" in str(e):
            logger.warning(set_color(
                "detection request failed, assuming no protection and continuing...", level=30
            ))
            return None
        else:
            logger.exception(set_color(
                "Zeus ran into an unexpected error '{}'...".format(e), level=50
            ))
            request_issue_creation()
            return None
示例#27
0
def parse_search_results(query, url_to_search, verbose=False, **kwargs):
    """
      Parse a webpage from Google for URL's with a GET(query) parameter
    """
    possible_leftovers = URLParser(None).possible_leftovers
    splitter = "&amp;"
    retval = set()
    query_url = None

    parse_webcache, pull_all = kwargs.get("parse_webcache", False), kwargs.get(
        "pull_all", False)
    proxy_string, user_agent = kwargs.get("proxy",
                                          None), kwargs.get("agent", None)
    forward_for = kwargs.get("forward_for", False)
    tor = kwargs.get("tor", False)
    batch = kwargs.get("batch", False)
    show_success = kwargs.get("show_success", False)

    if verbose:
        logger.debug(set_color("parsing blacklist", level=10))
    parse_blacklist(query, BLACKLIST_FILE_PATH, batch=batch)

    if verbose:
        logger.debug(
            set_color("checking for user-agent and proxy configuration",
                      level=10))

    if not parse_webcache and "google" in url_to_search:
        logger.warning(
            set_color(
                "will not parse webcache URL's (to parse webcache pass -W)",
                level=30))
    if not pull_all:
        logger.warning(
            set_color(
                "only pulling URLs with GET(query) parameters (to pull all URL's pass -E)",
                level=30))

    user_agent_info = "adjusting user-agent header to {}"
    if user_agent is not DEFAULT_USER_AGENT:
        user_agent_info = user_agent_info.format(user_agent.strip())
    else:
        user_agent_info = user_agent_info.format(
            "default user agent '{}'".format(DEFAULT_USER_AGENT))

    proxy_string_info = "setting proxy to {}"
    if proxy_string is not None:
        proxy_string = proxy_string_to_dict(proxy_string)
        proxy_string_info = proxy_string_info.format(
            ''.join(proxy_string.keys()) + "://" +
            ''.join(proxy_string.values()))
    elif tor:
        proxy_string = proxy_string_to_dict("socks5://127.0.0.1:9050")
        proxy_string_info = proxy_string_info.format("tor proxy settings")
    else:
        proxy_string_info = "no proxy configuration detected"

    if forward_for:
        ip_to_use = (create_random_ip(), create_random_ip(),
                     create_random_ip())
        if verbose:
            logger.debug(
                set_color(
                    "random IP addresses generated for headers '{}'".format(
                        ip_to_use),
                    level=10))

        headers = {
            HTTP_HEADER.CONNECTION:
            "close",
            HTTP_HEADER.USER_AGENT:
            user_agent,
            HTTP_HEADER.X_FORWARDED_FOR:
            "{}, {}, {}".format(ip_to_use[0], ip_to_use[1], ip_to_use[2])
        }
    else:
        headers = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: user_agent
        }
    logger.info(set_color("attempting to gather query URL"))
    try:
        query_url = get_urls(query,
                             url_to_search,
                             verbose=verbose,
                             user_agent=user_agent,
                             proxy=proxy_string,
                             tor=tor,
                             batch=batch,
                             xforward=forward_for)
    except Exception as e:
        if "'/usr/lib/firefoxdriver/webdriver.xpi'" in str(e):
            logger.fatal(
                set_color(
                    "firefox was not found in the default location on your system, "
                    "check your installation and make sure it is in /usr/lib, if you "
                    "find it there, restart your system and try again",
                    level=50))
        elif "connection refused" in str(e).lower():
            logger.fatal(
                set_color(
                    "there are to many sessions of firefox opened and selenium cannot "
                    "create a new one",
                    level=50))
            run_fix(
                "would you like to attempt to auto clean the open sessions",
                "sudo sh {}".format(CLEANUP_TOOL_PATH),
                "kill off the open sessions of firefox and re-run Zeus",
                exit_process=True)
        elif "Program install error!" in str(e):
            logger.error(
                set_color(
                    "seems the program is having some trouble installing would you like "
                    "to try and automatically fix this issue",
                    level=40))
            run_fix(
                "would you like to attempt to fix this issue automatically",
                "sudo sh {}".format(FIX_PROGRAM_INSTALL_PATH),
                "you can manually try and re-install Xvfb to fix the problem",
                exit_process=True)
        elif "Message: Reached error page:" in str(e):
            logger.fatal(
                set_color(
                    "geckodriver has hit an error that usually means it needs to be reinstalled",
                    level=50))
            question = prompt(
                "would you like to attempt a reinstallation of the geckodriver",
                opts="yN")
            if question.lower().startswith("y"):
                logger.warning(
                    set_color(
                        "rewriting all executed information, path information, and removing geckodriver",
                        level=30))
                rewrite_all_paths()
                logger.info(
                    set_color(
                        "all paths rewritten, you will be forced to re-install everything next run of Zeus"
                    ))
            else:
                logger.fatal(
                    set_color(
                        "you will need to remove the geckodriver from /usr/bin and reinstall it",
                        level=50))
                shutdown()
        elif "Unable to find a matching set of capabilities" in str(e):
            logger.fatal(
                set_color(
                    "it appears that firefox, selenium, and geckodriver are not playing nice with one another",
                    level=50))
            run_fix(
                "would you like to attempt to resolve this issue automatically",
                "sudo sh {}".format(REINSTALL_TOOL),
                ("you will need to reinstall firefox to a later version, update selenium, and reinstall the "
                 "geckodriver to continue using Zeus"),
                exit_process=True)
        else:
            logger.exception(
                set_color(
                    "{} failed to gather the URL from search engine, caught exception '{}' "
                    "exception has been logged to current log file".format(
                        os.path.basename(__file__),
                        str(e).strip()),
                    level=50))
            request_issue_creation()
        shutdown()
    logger.info(
        set_color("URL successfully gathered, searching for GET parameters"))

    logger.info(set_color(proxy_string_info))

    try:
        req = requests.get(query_url, proxies=proxy_string, params=headers)
    except ConnectionError:
        logger.warning(
            set_color(
                "target machine refused connection, delaying and trying again",
                level=30))
        time.sleep(3)
        req = requests.get(query_url, proxies=proxy_string, params=headers)

    logger.info(set_color(user_agent_info))
    req.headers.update(headers)
    found_urls = URL_REGEX.findall(req.text)
    for urls in list(found_urls):
        for url in list(urls):
            url = unquote(url)
            if not any(u in url for u in URL_EXCLUDES):
                if not url == "http://" and not url == "https://":
                    if URL_REGEX.match(url):
                        if isinstance(url, unicode):
                            url = str(url).encode("utf-8")
                        if pull_all:
                            retval.add(url.split(splitter)[0])
                        else:
                            if URL_QUERY_REGEX.match(url.split(splitter)[0]):
                                retval.add(url.split(splitter)[0])
                        if verbose:
                            try:
                                logger.debug(
                                    set_color("found '{}'".format(
                                        url.split(splitter)[0]),
                                              level=10))
                            except TypeError:
                                logger.debug(
                                    set_color("found '{}'".format(
                                        str(url).split(splitter)[0]),
                                              level=10))
                            except AttributeError:
                                logger.debug(
                                    set_color("found '{}".format(str(url)),
                                              level=10))
                        if url is not None:
                            retval.add(url.split(splitter)[0])
    true_retval = set()
    for url in list(retval):
        if any(l in url for l in possible_leftovers):
            url = URLParser(url).strip_url_leftovers()
        if parse_webcache:
            if "webcache" in url:
                logger.info(set_color("found a webcache URL, extracting"))
                url = URLParser(url).extract_webcache_url()
                if verbose:
                    logger.debug(set_color("found '{}'".format(url), level=15))
                true_retval.add(url)
            else:
                true_retval.add(url)
        else:
            true_retval.add(url)

    if len(true_retval) != 0:
        file_path = write_to_log_file(true_retval, URL_LOG_PATH, URL_FILENAME)
        if show_success:
            amount_of_urls = len(open(file_path).readlines())
            success_rate = calculate_success(amount_of_urls)
            logger.info(
                set_color("provided query has a {} success rate".format(
                    success_rate)))
    else:
        logger.fatal(
            set_color(
                "did not find any URLs with given query '{}' writing query to blacklist"
                .format(query),
                level=50))
        write_to_log_file(query,
                          BLACKLIST_FILE_PATH,
                          BLACKLIST_FILENAME,
                          blacklist=True)
        shutdown()
    logger.info(
        set_color("found a total of {} URLs with given query '{}'".format(
            len(true_retval), query)))
    return list(true_retval) if len(true_retval) != 0 else None
示例#28
0
    def __run_attacks_main(**kwargs):
        """
        main method to run the attacks
        """
        log_to_use = kwargs.get("log", None)
        if log_to_use is None:
            options = (opt.dorkToUse, opt.useRandomDork, opt.dorkFileToUse)
            log_to_use = URL_LOG_PATH if any(o for o in options) else SPIDER_LOG_PATH
            try:
                urls_to_use = get_latest_log_file(log_to_use)
            except TypeError:
                urls_to_use = None
        else:
            urls_to_use = log_to_use

        if urls_to_use is None:
            logger.error(set_color(
                "unable to run attacks appears that no file was created for the retrieved data", level=40
            ))
            shutdown()
        options = [
            opt.runSqliScan, opt.runPortScan,
            opt.adminPanelFinder, opt.runXssScan,
            opt.performWhoisLookup, opt.performClickjackingScan,
            opt.pgpLookup
        ]
        if any(options):
            with open(urls_to_use) as urls:
                for i, url in enumerate(urls.readlines(), start=1):
                    current = i
                    if "webcache" in url:
                        logger.warning(set_color(
                            "ran into unexpected webcache URL skipping", level=30
                        ))
                        current -= 1
                    else:
                        if not url.strip() == "http://" or url == "https://":
                            logger.info(set_color(
                                "currently running on '{}' (target #{})".format(
                                    url.strip(), current
                                ), level=25
                            ))
                            logger.info(set_color(
                                "fetching target meta-data"
                            ))
                            identified = main_header_check(
                                url, verbose=opt.runInVerbose, agent=agent_to_use,
                                proxy=proxy_to_use, xforward=opt.forwardedForRandomIP,
                                identify_plugins=opt.identifyPlugin, identify_waf=opt.identifyProtection,
                                show_description=opt.showPluginDescription
                            )
                            if not identified:
                                logger.error(set_color(
                                    "target is refusing to allow meta-data dumping, skipping", level=40
                                ))
                            run_attacks(
                                url.strip(),
                                sqlmap=opt.runSqliScan, nmap=opt.runPortScan, pgp=opt.pgpLookup,
                                xss=opt.runXssScan, whois=opt.performWhoisLookup, admin=opt.adminPanelFinder,
                                clickjacking=opt.performClickjackingScan, github=opt.searchGithub,
                                verbose=opt.runInVerbose, batch=opt.runInBatch,
                                auto_start=opt.autoStartSqlmap, xforward=opt.forwardedForRandomIP,
                                sqlmap_args=opt.sqlmapArguments, nmap_args=opt.nmapArguments,
                                show_all=opt.showAllConnections, do_threading=opt.threadPanels,
                                tamper_script=opt.tamperXssPayloads, timeout=opt.controlTimeout,
                                proxy=proxy_to_use, agent=agent_to_use, conf_file=opt.sqlmapConfigFile,
                                threads=opt.amountOfThreads, force_ssl=opt.forceSSL
                            )
                            print("\n")
                        else:
                            logger.warning(set_color(
                                "malformed URL discovered, skipping", level=30
                            ))
示例#29
0
def detect_protection(url, **kwargs):
    verbose = kwargs.get("verbose", False)
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)

    if xforward:
        ip_list = (
            create_random_ip(),
            create_random_ip(),
            create_random_ip()
        )
        headers = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: agent,
            HTTP_HEADER.X_FORWARDED_FOR: "{}, {}, {}".format(ip_list[0], ip_list[1], ip_list[2])
        }
    else:
        headers = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: agent
        }

    url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD)

    if verbose:
        logger.debug(set_color(
            "attempting connection to '{}'...".format(url), level=10
        ))
    try:
        protection_check_req = requests.get(
            url, params=headers, proxies=proxy_string_to_dict(proxy), timeout=20
        )

        html, status, headers = protection_check_req.content, protection_check_req.status_code, protection_check_req.headers

        for dbms in DBMS_ERRORS:  # make sure there are no DBMS errors in the HTML
            for regex in DBMS_ERRORS[dbms]:
                if re.compile(regex).search(html) is not None:
                    logger.info(set_color(
                        "it appears that the WAF/IDS/IPS check threw a DBMS error and may be vulnerable "
                        "to SQL injection attacks. it appears the backend DBMS is '{}'...".format(dbms), level=25
                    ))
                    return None

        retval = []
        file_list = [f for f in os.listdir(DETECT_FIREWALL_PATH) if not any(ex in f for ex in ["__init__", ".pyc"])]
        for item in file_list:
            item = item[:-3]
            if verbose:
                logger.debug(set_color(
                    "loading script '{}'...".format(item), level=10
                ))
            detection_name = "lib.firewall.{}"
            detection_name = detection_name.format(item)
            detection_name = importlib.import_module(detection_name)
            if detection_name.detect(html, headers=headers, status=status) is True:
                retval.append(detection_name.__item__)
        if len(retval) != 0:
            if len(retval) >= 2:
                try:
                    del retval[retval.index("Generic (Unknown)")]
                except:
                    logger.warning(set_color(
                        "multiple firewalls identified ({}), displaying most likely...".format(
                            ", ".join(retval)
                        ), level=30
                    ))
                    del retval[retval.index(retval[1])]
            if retval[0] == "Generic (Unknown)":
                logger.warning(set_color(
                    "discovered firewall is unknown to Zeus, saving fingerprint to file. "
                    "if you know the details or the context of the firewall please create "
                    "an issue with the fingerprint, or a pull request with the script...", level=30
                ))
                fingerprint = "<!---\nStatus: {}\nHeaders: {}\n--->\n{}".format(
                    status, headers, html
                )
                write_to_log_file(fingerprint, UNKNOWN_FIREWALL_FINGERPRINT_PATH, UNKNOWN_FIREWALL_FILENAME)
            return "".join(retval) if isinstance(retval, list) else retval
        else:
            return None

    except Exception as e:
        if "Read timed out." or "Connection reset by peer" in str(e):
            logger.warning(set_color(
                "detection request failed, assuming no protection and continuing...", level=30
            ))
            return None
        else:
            logger.exception(set_color(
                "Zeus ran into an unexpected error '{}'...".format(e), level=50
            ))
            request_issue_creation()
            return None