def detect_plugins(html, headers, **kwargs): verbose = kwargs.get("verbose", False) try: retval = [] plugin_skip_schema = ("__init__", ".pyc") plugin_file_list = [f for f in os.listdir(DETECT_PLUGINS_PATH) if not any(s in f for s in plugin_skip_schema)] for plugin in plugin_file_list: plugin = plugin[:-3] if verbose: logger.debug(set_color( "loading script '{}'".format(plugin), level=10 )) plugin_detection = "lib.plugins.{}" plugin_detection = plugin_detection.format(plugin) plugin_detection = importlib.import_module(plugin_detection) if plugin_detection.search(html, headers=headers) is True: retval.append((plugin_detection.__product__, plugin_detection.__description__)) if len(retval) > 0: return retval return None except Exception as e: logger.exception(str(e)) if "Read timed out." or "Connection reset by peer" in str(e): logger.warning(set_color( "plugin request failed, assuming no plugins and continuing", level=30 )) return None else: logger.exception(set_color( "plugin detection has failed with error {}".format(str(e)) )) request_issue_creation()
def __config_search_engine(verbose=False): """ configure the search engine if a one different from google is given """ non_default_msg = "specified to use non-default search engine..." se_message = "using '{}' as the search engine..." if opt.useDDG: if verbose: logger.debug( set_color(se_message.format("DuckDuckGo"), level=10)) logger.info(set_color(non_default_msg)) se = AUTHORIZED_SEARCH_ENGINES["duckduckgo"] elif opt.useAOL: logger.warning( set_color("AOL will take a little longer due to pop-ups...", level=30)) if verbose: logger.debug(set_color(se_message.format("AOL"), level=10)) logger.info(set_color(non_default_msg)) se = AUTHORIZED_SEARCH_ENGINES["aol"] elif opt.useBing: if verbose: logger.debug(set_color(se_message.format("Bing"), level=10)) logger.info(set_color(non_default_msg)) se = AUTHORIZED_SEARCH_ENGINES["bing"] else: if verbose: logger.debug( set_color("using default search engine (Google)...", level=10)) logger.info(set_color("using default search engine...") ) if opt.fileToEnumerate is None else "" se = AUTHORIZED_SEARCH_ENGINES["google"] return se
def main(url, show=False, verbose=False, **kwargs): """ main method to be called """ do_threading = kwargs.get("do_threading", False) proc_num = kwargs.get("proc_num", 3) logger.info(set_color("parsing robots.txt...")) results = check_for_robots(url) if not results: logger.warning( set_color( "seems like this page is blocking access to robots.txt...", level=30)) logger.info(set_color("loading extensions...")) extensions = __load_extensions() if verbose: logger.debug( set_color("loaded a total of {} extensions...".format( len(extensions)), level=10)) logger.info(set_color("attempting to bruteforce admin panel...")) if do_threading: logger.warning( set_color("starting parallel processing with {} processes, this " "will depend on your GPU speed...".format(proc_num), level=30)) tasks = [] for _ in range(0, proc_num): p = multiprocessing.Process(target=check_for_admin_page, args=(url, extensions), kwargs={ "show_possibles": show, "verbose": verbose }) p.start() tasks.append(p) for proc in tasks: proc.join() else: check_for_admin_page(url, extensions, show_possibles=show, verbose=verbose)
def __config_headers(): """ configure the request headers, this will configure user agents and proxies """ if opt.proxyConfig is not None: proxy = opt.proxyConfig elif opt.proxyFileRand is not None: if opt.runInVerbose: logger.debug(set_color( "loading random proxy from '{}'...".format(opt.proxyFileRand), level=10 )) with open(opt.proxyFileRand) as proxies: possible = proxies.readlines() proxy = random.choice(possible).strip() else: proxy = None if opt.usePersonalAgent is not None: agent = opt.usePersonalAgent elif opt.useRandomAgent: agent = grab_random_agent(verbose=opt.runInVerbose) else: agent = DEFAULT_USER_AGENT return proxy, agent
def get_urls(query, url, verbose=False, warning=True, **kwargs): """ Bypass Google captchas and Google API by using selenium-webdriver to gather the Google URL. This will open a robot controlled browser window and attempt to get a URL from Google that will be used for scraping afterwards. """ proxy, user_agent = kwargs.get("proxy", None), kwargs.get("user_agent", None) if verbose: logger.debug(set_color( "setting up the virtual display to hide the browser...", level=10 )) ff_display = Display(visible=0, size=(800, 600)) ff_display.start() logger.info(set_color( "firefox browser display will be hidden while it performs the query..." )) if warning: logger.warning(set_color( "your web browser will be automated in order for Zeus to successfully " "bypass captchas and API calls. this is done in order to grab the URL " "from the search and parse the results. please give selenium time to " "finish it's task...", level=30 )) if verbose: logger.debug(set_color( "running selenium-webdriver and launching browser...", level=10 )) if verbose: logger.debug(set_color( "adjusting selenium-webdriver user-agent to '{}'...".format(user_agent), level=10 )) if proxy is not None: proxy_type = proxy.keys() proxy_to_use = Proxy({ "proxyType": ProxyType.MANUAL, "httpProxy": proxy[proxy_type[0]], "ftpProxy": proxy[proxy_type[0]], "sslProxy": proxy[proxy_type[0]], "noProxy": "" }) if verbose: logger.debug(set_color( "setting selenium proxy to '{}'...".format( ''.join(proxy_type) + "://" + ''.join(proxy.values()) ), level=10 )) else: proxy_to_use = None profile = webdriver.FirefoxProfile() profile.set_preference("general.useragent.override", user_agent) browser = webdriver.Firefox(profile, proxy=proxy_to_use) logger.info(set_color("browser will open shortly...")) browser.get(url) if verbose: logger.debug(set_color( "searching search engine for the 'q' element (search button)...", level=10 )) search = browser.find_element_by_name('q') logger.info(set_color( "searching '{}' using query '{}'...".format(url, query) )) try: search.send_keys(query) search.send_keys(Keys.RETURN) # hit return after you enter search text time.sleep(3) except ElementNotInteractableException: browser.execute_script("document.querySelectorAll('label.boxed')[1].click()") search.send_keys(query) search.send_keys(Keys.RETURN) # hit return after you enter search text time.sleep(3) if verbose: logger.debug(set_color( "obtaining URL from selenium..." )) try: retval = browser.current_url except UnexpectedAlertPresentException: logger.warning(set_color( "alert present, closing...", level=30 )) alert = browser.switch_to.alert alert.accept() retval = browser.current_url ban_url_schema = ["http://ipv6.google.com", "http://ipv4.google.com"] if any(u in retval for u in ban_url_schema): # if you got IP banned logger.warning(set_color( "it appears that Google is attempting to block your IP address, attempting bypass...", level=30 )) try: retval = bypass_ip_block(retval) do_continue = prompt( "zeus was able to successfully extract the URL from Google's ban URL " "it is advised to shutdown zeus and attempt to extract the URL's manually. " "failing to do so will most likely result in no results being found by zeus. " "would you like to shutdown", opts="yN" ) if not str(do_continue).lower().startswith("n"): # shutdown and write the URL to a file write_to_log_file(retval, EXTRACTED_URL_LOG, "extracted-url-{}.log") logger.info(set_color( "it is advised to use the built in blackwidow crawler with the extracted URL " "(IE -b '{}')".format(retval) )) shutdown() except Exception as e: browser.close() # stop all the random rogue processes ff_display.stop() logger.exception(set_color( "zeus was unable to extract the correct URL from the ban URL '{}', " "got exception '{}'...".format( unquote(retval), e ), level=50 )) request_issue_creation() shutdown() if verbose: logger.debug(set_color( "found current URL from selenium browser...", level=10 )) logger.info(set_color( "closing the browser and continuing process.." )) browser.close() ff_display.stop() return retval
def search_multiple_pages(query, link_amount, verbose=False, **kwargs): def __config_proxy(proxy_string): proxy_type_schema = { "http": httplib2.socks.PROXY_TYPE_HTTP, "socks4": httplib2.socks.PROXY_TYPE_SOCKS4, "socks5": httplib2.socks.PROXY_TYPE_SOCKS5 } proxy_type = get_proxy_type(proxy_string)[0] proxy_dict = proxy_string_to_dict(proxy_string) proxy_config = httplib2.ProxyInfo( proxy_type=proxy_type_schema[proxy_type], proxy_host="".join(proxy_dict.keys()), proxy_port="".join(proxy_dict.values()) ) return proxy_config proxy, agent = kwargs.get("proxy", None), kwargs.get("agent", None) if proxy is not None: if verbose: logger.debug(set_color( "configuring to use proxy '{}'...".format(proxy), level=10 )) __config_proxy(proxy) if agent is not None: if verbose: logger.debug(set_color( "settings user-agent to '{}'...".format(agent), level=10 )) logger.warning(set_color( "multiple pages will be searched using Google's API client, searches may be blocked after a certain " "amount of time...", level=30 )) results, limit, found, index = set(), link_amount, 0, google_api.search(query, user_agent=agent, safe="on") try: while limit > 0: results.add(next(index)) limit -= 1 found += 1 except Exception as e: if "Error 503" in str(e): logger.fatal(set_color( "Google is blocking the current IP address, dumping already found URL's...", level=50 )) results = results pass retval = set() for url in results: if URL_REGEX.match(url) and URL_QUERY_REGEX.match(url): if verbose: logger.debug(set_color( "found '{}'...".format(url), level=10 )) retval.add(url) if len(retval) != 0: logger.info(set_color( "a total of {} links found out of requested {}...".format( len(retval), link_amount ) )) write_to_log_file(list(retval), URL_LOG_PATH, "url-log-{}.log") else: logger.error(set_color( "unable to extract URL's from results...", level=40 ))
def parse_search_results( query, url_to_search, verbose=False, **kwargs): """ Parse a webpage from Google for URL's with a GET(query) parameter """ splitter = "&" retval = set() query_url = None proxy_string, user_agent = kwargs.get("proxy", None), kwargs.get("agent", None) if verbose: logger.debug(set_color( "checking for user-agent and proxy configuration...", level=10 )) user_agent_info = "adjusting user-agent header to {}..." if user_agent is not DEFAULT_USER_AGENT: user_agent_info = user_agent_info.format(user_agent.strip()) else: user_agent_info = user_agent_info.format("default user agent '{}'".format(DEFAULT_USER_AGENT)) proxy_string_info = "setting proxy to {}..." if proxy_string is not None: proxy_string_info = proxy_string_info.format( ''.join(proxy_string.keys()) + "://" + ''.join(proxy_string.values())) else: proxy_string_info = "no proxy configuration detected..." headers = { "Connection": "close", "user-agent": user_agent } logger.info(set_color( "attempting to gather query URL..." )) try: query_url = get_urls(query, url_to_search, verbose=verbose, user_agent=user_agent, proxy=proxy_string) except Exception as e: if "WebDriverException" in str(e): logger.exception(set_color( "it seems that you exited the browser, please allow the browser " "to complete it's run so that Zeus can bypass captchas and API " "calls", level=50 )) elif "'/usr/lib/firefoxdriver/webdriver.xpi'" in str(e): logger.fatal(set_color( "firefox was not found in the default location on your system, " "check your installation and make sure it is in /usr/lib, if you " "find it there, restart your system and try again...", level=50 )) else: logger.exception(set_color( "{} failed to gather the URL from search engine, caught exception '{}' " "exception has been logged to current log file...".format( os.path.basename(__file__), str(e).strip()), level=50) ) request_issue_creation() shutdown() logger.info(set_color( "URL successfully gathered, searching for GET parameters..." )) logger.info(set_color(proxy_string_info)) req = requests.get(query_url, proxies=proxy_string) logger.info(set_color(user_agent_info)) req.headers.update(headers) found_urls = URL_REGEX.findall(req.text) url_skip_schema = ("maps.google", "play.google", "youtube") for urls in list(found_urls): for url in list(urls): url = unquote(url) if not any(u in url for u in url_skip_schema): if URL_QUERY_REGEX.match(url) and not any(l in url for l in URL_EXCLUDES): if isinstance(url, unicode): url = str(url).encode("utf-8") if "webcache" in url: logger.info(set_color( "received webcache URL, extracting URL from webcache..." )) webcache_url = url url = extract_webcache_url(webcache_url) if url is None: logger.warning(set_color( "unable to extract url from given webcache URL '{}'...".format( webcache_url ), level=30 )) if verbose: try: logger.debug(set_color( "found '{}'...".format(url.split(splitter)[0]), level=10 )) except TypeError: logger.debug(set_color( "found '{}'...".format(str(url).split(splitter)[0]), level=10 )) except AttributeError: logger.debug(set_color( "found '{}...".format(str(url)), level=10 )) if url is not None: retval.add(url.split("&")[0]) logger.info(set_color( "found a total of {} URL's with a GET parameter...".format(len(retval)) )) if len(retval) != 0: write_to_log_file(retval, URL_LOG_PATH, "url-log-{}.log") else: logger.critical(set_color( "did not find any usable URL's with the given query '{}' " "using search engine '{}'...".format(query, url_to_search), level=50 )) shutdown() return list(retval) if len(retval) != 0 else None
def main_header_check(url, **kwargs): """ main function """ verbose = kwargs.get("verbose", False) agent = kwargs.get("agent", None) proxy = kwargs.get("proxy", None) xforward = kwargs.get("xforward", False) identify_waf = kwargs.get("identify_waf", True) identify_plugins = kwargs.get("identify_plugins", True) show_description = kwargs.get("show_description", False) attempts = kwargs.get("attempts", 3) default_sleep_time = 5 protection = {"hostname": url} definition = { "x-xss": ("protection against XSS attacks", "XSS"), "strict-transport": ("protection against unencrypted connections (force HTTPS connection)", "HTTPS"), "x-frame": ("protection against clickjacking vulnerabilities", "CLICKJACKING"), "x-content": ("protection against MIME type attacks", "MIME"), "x-csrf": ("protection against Cross-Site Forgery attacks", "CSRF"), "x-xsrf": ("protection against Cross-Site Forgery attacks", "CSRF"), "public-key": ("protection to reduce success rates of MITM attacks", "MITM"), "content-security": ("header protection against multiple attack types", "ALL") } try: req, status, html, headers = get_page(url, proxy=proxy, agent=agent, xforward=xforward) logger.info(set_color( "detecting target charset" )) charset = get_charset(html, headers) if charset is not None: logger.info(set_color( "target charset appears to be '{}'".format(charset), level=25 )) else: logger.warning(set_color( "unable to detect target charset", level=30 )) if identify_waf: waf_url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD) _, waf_status, waf_html, waf_headers = get_page(waf_url, xforward=xforward, proxy=proxy, agent=agent) logger.info(set_color( "checking if target URL is protected by some kind of WAF/IPS/IDS" )) if verbose: logger.debug(set_color( "attempting connection to '{}'".format(waf_url), level=10 )) identified_waf = detect_protection(url, waf_status, waf_html, waf_headers, verbose=verbose) if identified_waf is None: logger.info(set_color( "no WAF/IDS/IPS has been identified on target URL", level=25 )) else: logger.warning(set_color( "the target URL WAF/IDS/IPS has been identified as '{}'".format(identified_waf), level=35 )) if identify_plugins: logger.info(set_color( "attempting to identify plugins" )) identified_plugin = detect_plugins(html, headers, verbose=verbose) if identified_plugin is not None: for plugin in identified_plugin: if show_description: logger.info(set_color( "possible plugin identified as '{}' (description: '{}')".format( plugin[0], plugin[1] ), level=25 )) else: logger.info(set_color( "possible plugin identified as '{}'".format( plugin[0] ), level=25 )) else: logger.warning(set_color( "no known plugins identified on target", level=30 )) if verbose: logger.debug(set_color( "loading XML data", level=10 )) comparable_headers = load_xml_data(HEADER_XML_DATA) logger.info(set_color( "attempting to get request headers for '{}'".format(url.strip()) )) try: found_headers = load_headers(url, req) except (ConnectionError, Exception) as e: if "Read timed out." or "Connection reset by peer" in str(e): found_headers = None else: logger.exception(set_color( "Zeus has hit an unexpected error and cannot continue '{}'".format(e), level=50 )) request_issue_creation() if found_headers is not None: if verbose: logger.debug(set_color( "fetched {}".format(found_headers), level=10 )) headers_established = [str(h) for h in compare_headers(found_headers, comparable_headers)] for key in definition.iterkeys(): if any(key in h.lower() for h in headers_established): logger.warning(set_color( "provided target has {}".format(definition[key][0]), level=30 )) for key in found_headers.iterkeys(): protection[key] = found_headers[key] logger.info(set_color( "writing found headers to log file", level=25 )) return write_to_log_file(protection, HEADER_RESULT_PATH, HEADERS_FILENAME.format(replace_http(url))) else: logger.error(set_color( "unable to retrieve headers for site '{}'".format(url.strip()), level=40 )) except ConnectionError: attempts = attempts - 1 if attempts == 0: return False logger.warning(set_color( "target actively refused the connection, sleeping for {}s and retrying the request".format( default_sleep_time ), level=30 )) time.sleep(default_sleep_time) main_header_check( url, proxy=proxy, agent=agent, xforward=xforward, show_description=show_description, identify_plugins=identify_plugins, identify_waf=identify_waf, verbose=verbose, attempts=attempts ) except ReadTimeout: logger.error(set_color( "meta-data retrieval failed due to target URL timing out, skipping", level=40 )) except KeyboardInterrupt: if not pause(): shutdown() except Exception as e: logger.exception(set_color( "meta-data retrieval failed with unexpected error '{}'".format( str(e) ), level=50 ))
def main_xss(start_url, verbose=False, proxy=None, agent=None, tamper=None): """ main attack method to be called """ if tamper: logger.info(set_color( "tampering payloads with '{}'...".format(tamper))) find_xss_script(start_url) logger.info(set_color("loading payloads...")) payloads = __load_payloads() if verbose: logger.debug( set_color("a total of {} payloads loaded...".format(len(payloads)), level=10)) logger.info( set_color( "payloads will be written to a temporary file and read from there..." )) filename = create_urls(start_url, payloads, tamper=tamper) logger.info( set_color("loaded URL's have been saved to '{}'...".format(filename))) logger.info( set_color("testing for XSS vulnerabilities on host '{}'...".format( start_url))) if proxy is not None: logger.info(set_color("using proxy '{}'...".format(proxy))) success = set() with open(filename) as urls: for i, url in enumerate(urls.readlines(), start=1): url = url.strip() result = scan_xss(url, proxy=proxy, agent=agent) payload = find_xss_script(url) if verbose: logger.info(set_color( "trying payload '{}'...".format(payload))) if result[0] != "sqli" and result[0] is True: success.add(url) if verbose: logger.debug( set_color( "payload '{}' appears to be usable...".format( payload), level=10)) elif result[0] is "sqli": if i <= 1: logger.error( set_color( "loaded URL '{}' threw a DBMS error and appears to be injectable, test for SQL injection, " "backend DBMS appears to be '{}'...".format( url, result[1]), level=40)) else: if verbose: logger.error( set_color("SQL error discovered...", level=40)) else: if verbose: logger.debug( set_color( "host '{}' does not appear to be vulnerable to XSS attacks with payload '{}'..." .format(start_url, payload), level=10)) if len(success) != 0: logger.info(set_color("possible XSS scripts to be used:")) create_tree(start_url, list(success)) else: logger.error( set_color( "host '{}' does not appear to be vulnerable to XSS attacks...". format(start_url))) save = prompt("would you like to keep the URL's saved for further testing", opts="yN") if save.lower().startswith("n"): os.remove(filename)
def get_urls(query, url, verbose=False, **kwargs): """ Bypass Google captchas and Google API by using selenium-webdriver to gather the Google URL. This will open a robot controlled browser window and attempt to get a URL from Google that will be used for scraping afterwards. """ query = query.decode('unicode_escape').encode('utf-8') proxy, user_agent = kwargs.get("proxy", None), kwargs.get("user_agent", None) tor, tor_port = kwargs.get("tor", False), kwargs.get("tor_port", None) batch = kwargs.get("batch", False) xforward = kwargs.get("xforward", False) logger.info(set_color("setting up virtual display to hide the browser")) ff_display = Display(visible=0, size=(800, 600)) ff_display.start() browser = var.search.SetBrowser(agent=user_agent, proxy=proxy, tor=tor, xforward=xforward).set_browser() logger.info(set_color("browser will open shortly", level=25)) browser.get(url) if verbose: logger.debug( set_color( "searching search engine for the 'q' element (search button)", level=10)) search = browser.find_element_by_name('q') logger.info( set_color("searching search engine using query '{}'".format(query))) try: # enter the text you want to search and hit enter search.send_keys(query) search.send_keys(Keys.RETURN) if not tor: time.sleep(3) else: logger.warning( set_color( "sleep time has been increased to 10 seconds due to tor being used", level=30)) time.sleep(10) except ElementNotInteractableException: # get rid of the popup box and hit enter after entering the text to search browser.execute_script( "document.querySelectorAll('label.boxed')[1].click()") search.send_keys(query) search.send_keys(Keys.RETURN) time.sleep(3) except UnicodeDecodeError: logger.error( set_color( "your query '{}' appears to have unicode characters in it, selenium is not " "properly formatted to handle unicode characters, this dork will be skipped" .format(query), level=40)) if verbose: logger.debug(set_color("obtaining URL from selenium")) try: retval = browser.current_url except UnexpectedAlertPresentException: logger.warning(set_color("alert present, closing", level=30)) # discover the alert and close it before continuing alert = browser.switch_to.alert alert.accept() retval = browser.current_url # if you have been IP banned, we'll extract the URL from it if IP_BAN_REGEX.search(retval) is not None: logger.warning( set_color( "it appears that Google is attempting to block your IP address, attempting bypass", level=30)) try: retval = URLParser(retval).extract_ip_ban_url() question_msg = ( "zeus was able to successfully extract the URL from Google's ban URL " "it is advised to shutdown zeus and attempt to extract the URL's manually. " "failing to do so will most likely result in no results being found by zeus. " "would you like to shutdown") if not batch: do_continue = prompt(question_msg, opts="yN") else: do_continue = prompt(question_msg, opts="yN", default="y") # shutdown and write the URL to a file if not str(do_continue).lower().startswith("n"): write_to_log_file(retval, EXTRACTED_URL_LOG, EXTRACTED_URL_FILENAME) logger.info( set_color( "it is advised to extract the URL's from the produced URL written to the above " "(IE open the log, copy the url into firefox)".format( retval))) shutdown() except Exception as e: # stop all the random rogue processes, this isn't guaranteed to stop the processes # that's why we have the clean up script in case this fails browser.close() ff_display.stop() logger.exception( set_color( "zeus was unable to extract the correct URL from the ban URL '{}', " "got exception '{}'".format(unquote(retval), e), level=50)) request_issue_creation() shutdown() if verbose: logger.debug( set_color("found current URL from selenium browser", level=10)) logger.info(set_color("closing the browser and continuing process..")) browser.close() ff_display.stop() return retval
def parse_search_results(query, url_to_search, verbose=False, **kwargs): """ Parse a webpage from Google for URL's with a GET(query) parameter """ possible_leftovers = URLParser(None).possible_leftovers splitter = "&" retval = set() query_url = None parse_webcache, pull_all = kwargs.get("parse_webcache", False), kwargs.get( "pull_all", False) proxy_string, user_agent = kwargs.get("proxy", None), kwargs.get("agent", None) forward_for = kwargs.get("forward_for", False) tor = kwargs.get("tor", False) batch = kwargs.get("batch", False) show_success = kwargs.get("show_success", False) if verbose: logger.debug(set_color("parsing blacklist", level=10)) parse_blacklist(query, BLACKLIST_FILE_PATH, batch=batch) if verbose: logger.debug( set_color("checking for user-agent and proxy configuration", level=10)) if not parse_webcache and "google" in url_to_search: logger.warning( set_color( "will not parse webcache URL's (to parse webcache pass -W)", level=30)) if not pull_all: logger.warning( set_color( "only pulling URLs with GET(query) parameters (to pull all URL's pass -E)", level=30)) user_agent_info = "adjusting user-agent header to {}" if user_agent is not DEFAULT_USER_AGENT: user_agent_info = user_agent_info.format(user_agent.strip()) else: user_agent_info = user_agent_info.format( "default user agent '{}'".format(DEFAULT_USER_AGENT)) proxy_string_info = "setting proxy to {}" if proxy_string is not None: proxy_string = proxy_string_to_dict(proxy_string) proxy_string_info = proxy_string_info.format( ''.join(proxy_string.keys()) + "://" + ''.join(proxy_string.values())) elif tor: proxy_string = proxy_string_to_dict("socks5://127.0.0.1:9050") proxy_string_info = proxy_string_info.format("tor proxy settings") else: proxy_string_info = "no proxy configuration detected" if forward_for: ip_to_use = (create_random_ip(), create_random_ip(), create_random_ip()) if verbose: logger.debug( set_color( "random IP addresses generated for headers '{}'".format( ip_to_use), level=10)) headers = { HTTP_HEADER.CONNECTION: "close", HTTP_HEADER.USER_AGENT: user_agent, HTTP_HEADER.X_FORWARDED_FOR: "{}, {}, {}".format(ip_to_use[0], ip_to_use[1], ip_to_use[2]) } else: headers = { HTTP_HEADER.CONNECTION: "close", HTTP_HEADER.USER_AGENT: user_agent } logger.info(set_color("attempting to gather query URL")) try: query_url = get_urls(query, url_to_search, verbose=verbose, user_agent=user_agent, proxy=proxy_string, tor=tor, batch=batch, xforward=forward_for) except Exception as e: if "'/usr/lib/firefoxdriver/webdriver.xpi'" in str(e): logger.fatal( set_color( "firefox was not found in the default location on your system, " "check your installation and make sure it is in /usr/lib, if you " "find it there, restart your system and try again", level=50)) elif "connection refused" in str(e).lower(): logger.fatal( set_color( "there are to many sessions of firefox opened and selenium cannot " "create a new one", level=50)) run_fix( "would you like to attempt to auto clean the open sessions", "sudo sh {}".format(CLEANUP_TOOL_PATH), "kill off the open sessions of firefox and re-run Zeus", exit_process=True) elif "Program install error!" in str(e): logger.error( set_color( "seems the program is having some trouble installing would you like " "to try and automatically fix this issue", level=40)) run_fix( "would you like to attempt to fix this issue automatically", "sudo sh {}".format(FIX_PROGRAM_INSTALL_PATH), "you can manually try and re-install Xvfb to fix the problem", exit_process=True) elif "Message: Reached error page:" in str(e): logger.fatal( set_color( "geckodriver has hit an error that usually means it needs to be reinstalled", level=50)) question = prompt( "would you like to attempt a reinstallation of the geckodriver", opts="yN") if question.lower().startswith("y"): logger.warning( set_color( "rewriting all executed information, path information, and removing geckodriver", level=30)) rewrite_all_paths() logger.info( set_color( "all paths rewritten, you will be forced to re-install everything next run of Zeus" )) else: logger.fatal( set_color( "you will need to remove the geckodriver from /usr/bin and reinstall it", level=50)) shutdown() elif "Unable to find a matching set of capabilities" in str(e): logger.fatal( set_color( "it appears that firefox, selenium, and geckodriver are not playing nice with one another", level=50)) run_fix( "would you like to attempt to resolve this issue automatically", "sudo sh {}".format(REINSTALL_TOOL), ("you will need to reinstall firefox to a later version, update selenium, and reinstall the " "geckodriver to continue using Zeus"), exit_process=True) else: logger.exception( set_color( "{} failed to gather the URL from search engine, caught exception '{}' " "exception has been logged to current log file".format( os.path.basename(__file__), str(e).strip()), level=50)) request_issue_creation() shutdown() logger.info( set_color("URL successfully gathered, searching for GET parameters")) logger.info(set_color(proxy_string_info)) try: req = requests.get(query_url, proxies=proxy_string, params=headers) except ConnectionError: logger.warning( set_color( "target machine refused connection, delaying and trying again", level=30)) time.sleep(3) req = requests.get(query_url, proxies=proxy_string, params=headers) logger.info(set_color(user_agent_info)) req.headers.update(headers) found_urls = URL_REGEX.findall(req.text) for urls in list(found_urls): for url in list(urls): url = unquote(url) if not any(u in url for u in URL_EXCLUDES): if not url == "http://" and not url == "https://": if URL_REGEX.match(url): if isinstance(url, unicode): url = str(url).encode("utf-8") if pull_all: retval.add(url.split(splitter)[0]) else: if URL_QUERY_REGEX.match(url.split(splitter)[0]): retval.add(url.split(splitter)[0]) if verbose: try: logger.debug( set_color("found '{}'".format( url.split(splitter)[0]), level=10)) except TypeError: logger.debug( set_color("found '{}'".format( str(url).split(splitter)[0]), level=10)) except AttributeError: logger.debug( set_color("found '{}".format(str(url)), level=10)) if url is not None: retval.add(url.split(splitter)[0]) true_retval = set() for url in list(retval): if any(l in url for l in possible_leftovers): url = URLParser(url).strip_url_leftovers() if parse_webcache: if "webcache" in url: logger.info(set_color("found a webcache URL, extracting")) url = URLParser(url).extract_webcache_url() if verbose: logger.debug(set_color("found '{}'".format(url), level=15)) true_retval.add(url) else: true_retval.add(url) else: true_retval.add(url) if len(true_retval) != 0: file_path = write_to_log_file(true_retval, URL_LOG_PATH, URL_FILENAME) if show_success: amount_of_urls = len(open(file_path).readlines()) success_rate = calculate_success(amount_of_urls) logger.info( set_color("provided query has a {} success rate".format( success_rate))) else: logger.fatal( set_color( "did not find any URLs with given query '{}' writing query to blacklist" .format(query), level=50)) write_to_log_file(query, BLACKLIST_FILE_PATH, BLACKLIST_FILENAME, blacklist=True) shutdown() logger.info( set_color("found a total of {} URLs with given query '{}'".format( len(true_retval), query))) return list(true_retval) if len(true_retval) != 0 else None
def __find_running_opts(): """ display the running options if verbose is used """ opts_being_used = [] for o, v in opt.__dict__.items(): if v is not None: opts_being_used.append((o, v)) return dict(opts_being_used) if opt.runInVerbose: being_run = __find_running_opts() logger.debug(set_color( "running with options '{}'...".format(being_run), level=10 )) logger.info(set_color( "log file being saved to '{}'...".format(get_latest_log_file(CURRENT_LOG_FILE_PATH)) )) if opt.showRequestInfo: logger.debug(set_color( "showing all HTTP requests because --show-requests flag was used...", level=10 )) http_client.HTTPConnection.debuglevel = 1 def __choose_attack(choice, attacks): while True: if int(choice) in range(len(attacks)):
def check_for_admin_page(url, exts, protocol="http://", **kwargs): verbose = kwargs.get("verbose", False) show_possibles = kwargs.get("show_possibles", False) possible_connections, connections = set(), set() stripped_url = replace_http(str(url).strip()) for ext in exts: ext = ext.strip() true_url = "{}{}{}".format(protocol, stripped_url, ext) if verbose: logger.debug(set_color("trying '{}'...".format(true_url), level=10)) try: urlopen(true_url, timeout=5) logger.info( set_color( "connected successfully to '{}'...".format(true_url))) connections.add(true_url) except HTTPError as e: data = str(e).split(" ") if verbose: if "Access Denied" in str(e): logger.warning( set_color( "got access denied, possible control panel found without external access on '{}'..." .format(true_url), level=30)) possible_connections.add(true_url) else: logger.error( set_color( "failed to connect got error code {}...".format( data[2]), level=40)) except Exception as e: if verbose: if "<urlopen error timed out>" or "timeout: timed out" in str( e): logger.warning( set_color( "connection timed out after five seconds " "assuming won't connect and skipping...", level=30)) else: logger.exception( set_color( "failed to connect with unexpected error '{}'...". format(str(e)), level=50)) request_issue_creation() possible_connections, connections = list(possible_connections), list( connections) data_msg = "found {} possible connections(s) and {} successful connection(s)..." logger.info( set_color(data_msg.format(len(possible_connections), len(connections)))) if len(connections) != 0: logger.info(set_color("creating connection tree...")) create_tree(url, connections) else: logger.fatal( set_color( "did not receive any successful connections to the admin page of " "{}...".format(url), level=50)) if show_possibles: if len(possible_connections) != 0: logger.info(set_color("creating possible connection tree...")) create_tree(url, possible_connections) else: logger.fatal( set_color( "did not find any possible connections to {}'s " "admin page", level=50))
def main_header_check(url, **kwargs): """ main function """ verbose = kwargs.get("verbose", False) agent = kwargs.get("agent", None) proxy = kwargs.get("proxy", None) xforward = kwargs.get("xforward", False) identify = kwargs.get("identify", True) protection = {"hostname": url} definition = { "x-xss": ("protection against XSS attacks", "XSS"), "strict-transport": ("protection against unencrypted connections (force HTTPS connection)", "HTTPS"), "x-frame": ("protection against clickjacking vulnerabilities", "CLICKJACKING"), "x-content": ("protection against MIME type attacks", "MIME"), "x-csrf": ("protection against Cross-Site Forgery attacks", "CSRF"), "x-xsrf": ("protection against Cross-Site Forgery attacks", "CSRF"), "public-key": ("protection to reduce success rates of MITM attacks", "MITM"), "content-security": ("header protection against multiple attack types", "ALL") } try: if identify: logger.info(set_color( "checking if target URL is protected by some kind of WAF/IPS/IDS..." )) identified = detect_protection(url, proxy=proxy, agent=agent, verbose=verbose, xforward=xforward) if identified is None: logger.info(set_color( "no WAF/IDS/IPS has been identified on target URL...", level=25 )) else: logger.warning(set_color( "the target URL WAF/IDS/IPS has been identified as '{}'...".format(identified), level=35 )) if verbose: logger.debug(set_color( "loading XML data...", level=10 )) comparable_headers = load_xml_data(HEADER_XML_DATA) logger.info(set_color( "attempting to get request headers for '{}'...".format(url.strip()) )) try: found_headers = load_headers(url, proxy=proxy, agent=agent, xforward=xforward) except (ConnectionError, Exception) as e: if "Read timed out." or "Connection reset by peer" in str(e): found_headers = None else: logger.exception(set_color( "Zeus has hit an unexpected error and cannot continue '{}'...".format(e), level=50 )) request_issue_creation() if found_headers is not None: if verbose: logger.debug(set_color( "fetched {}...".format(found_headers), level=10 )) headers_established = [str(h) for h in compare_headers(found_headers, comparable_headers)] for key in definition.iterkeys(): if any(key in h.lower() for h in headers_established): logger.warning(set_color( "provided target has {}...".format(definition[key][0]), level=30 )) for key in found_headers.iterkeys(): protection[key] = found_headers[key] logger.info(set_color( "writing found headers to log file...", level=25 )) return write_to_log_file(protection, HEADER_RESULT_PATH, HEADERS_FILENAME.format(replace_http(url))) else: logger.error(set_color( "unable to retrieve headers for site '{}'...".format(url.strip()), level=40 )) except KeyboardInterrupt: if not pause(): shutdown()
def search_multiple_pages(query, link_amount, verbose=False, **kwargs): """ search multiple pages for a lot of links, this will not be done via Google """ proxy = kwargs.get("proxy", None) agent = kwargs.get("agent", None) xforward = kwargs.get("xforward", False) batch = kwargs.get("batch", False) show_success = kwargs.get("show_success", False) attrib, desc = "a", "href" retval = set() search_engine = AUTHORIZED_SEARCH_ENGINES["search-results"] logger.warning( set_color("searching multiple pages will not be done on Google".format( search_engine), level=30)) if not parse_blacklist(query, BLACKLIST_FILE_PATH, batch=batch): shutdown() if not xforward: params = {"Connection": "close", "user-agent": agent} else: ip_list = (create_random_ip(), create_random_ip(), create_random_ip()) params = { "Connection": "close", "user-agent": agent, "X-Forwarded-For": "{}, {}, {}".format(ip_list[0], ip_list[1], ip_list[2]) } page_number = 1 try: while len(retval) <= link_amount: if verbose: logger.debug( set_color("searching page number {}".format(page_number), level=10)) if page_number % 10 == 0: logger.info( set_color("currently on page {} of search results".format( page_number))) page_request = requests.get(search_engine.format( page_number, query, page_number), params=params, proxies=proxy_string_to_dict(proxy)) if page_request.status_code == 200: html_page = page_request.content soup = BeautifulSoup(html_page, "html.parser") if not NO_RESULTS_REGEX.findall(str(soup)): for link in soup.findAll(attrib): redirect = link.get(desc) if redirect is not None: if not any(ex in redirect for ex in URL_EXCLUDES): if URL_REGEX.match(redirect): retval.add(redirect) if page_number < MAX_PAGE_NUMBER: page_number += 1 else: logger.warning( set_color("hit max page number {}".format( MAX_PAGE_NUMBER), level=30)) break else: logger.warning( set_color("no more results found for given query '{}'". format(query), level=30)) break except KeyboardInterrupt: logger.error( set_color("user aborted, dumping already found URL(s)", level=40)) write_to_log_file(retval, URL_LOG_PATH, URL_FILENAME) logger.info( set_color("found a total of {} URL(s)".format(len(retval)), level=25)) shutdown() except Exception as e: logger.exception( set_color("Zeus ran into an unexpected error '{}'".format(e), level=50)) request_issue_creation() shutdown() if len(retval) > 0: logger.info( set_color( "a total of {} URL(s) found out of the requested {}".format( len(retval), link_amount), level=25)) file_path = write_to_log_file(retval, URL_LOG_PATH, URL_FILENAME) if show_success: amount_of_urls = len(open(file_path).readlines()) success_rate = calculate_success(amount_of_urls) logger.info( set_color("provided query has a {} success rate".format( success_rate))) return list(retval) else: logger.warning( set_color( "did not find any links with given query '{}' writing to blacklist" .format(query), level=30)) write_to_log_file(query, BLACKLIST_FILE_PATH, BLACKLIST_FILENAME)
def detect_protection(url, **kwargs): verbose = kwargs.get("verbose", False) agent = kwargs.get("agent", None) proxy = kwargs.get("proxy", None) xforward = kwargs.get("xforward", False) url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD) if verbose: logger.debug(set_color( "attempting connection to '{}'...".format(url), level=10 )) try: _, status, html, headers = get_page(url, agent=agent, proxy=proxy, xforward=xforward) # make sure there are no DBMS errors in the HTML for dbms in DBMS_ERRORS: for regex in DBMS_ERRORS[dbms]: if re.compile(regex).search(html) is not None: logger.warning(set_color( "it appears that the WAF/IDS/IPS check threw a DBMS error and may be vulnerable " "to SQL injection attacks. it appears the backend DBMS is '{}', site will be " "saved for further processing...".format(dbms), level=30 )) write_to_log_file(url, SQLI_SITES_FILEPATH, SQLI_FOUND_FILENAME) return None retval = [] file_list = [f for f in os.listdir(DETECT_FIREWALL_PATH) if not any(ex in f for ex in ["__init__", ".pyc"])] for item in file_list: item = item[:-3] if verbose: logger.debug(set_color( "loading script '{}'...".format(item), level=10 )) detection_name = "lib.firewall.{}" detection_name = detection_name.format(item) detection_name = importlib.import_module(detection_name) if detection_name.detect(html, headers=headers, status=status) is True: retval.append(detection_name.__item__) if len(retval) != 0: if len(retval) >= 2: try: del retval[retval.index("Generic (Unknown)")] except (Exception, IndexError): logger.warning(set_color( "multiple firewalls identified ({}), displaying most likely...".format( ", ".join([item.split("(")[0] for item in retval]) ), level=30 )) del retval[retval.index(retval[1])] if len(retval) >= 2: del retval[retval.index(retval[1])] if retval[0] == "Generic (Unknown)": logger.warning(set_color( "discovered firewall is unknown to Zeus, saving fingerprint to file. " "if you know the details or the context of the firewall please create " "an issue ({}) with the fingerprint, or a pull request with the script...".format( ISSUE_LINK ), level=30 )) fingerprint = "<!---\nHTTP 1.1\nStatus Code: {}\nHTTP Headers: {}\n--->\n{}".format( status, headers, html ) write_to_log_file(fingerprint, UNKNOWN_FIREWALL_FINGERPRINT_PATH, UNKNOWN_FIREWALL_FILENAME) return "".join(retval) if isinstance(retval, list) else retval else: return None except Exception as e: if "Read timed out." or "Connection reset by peer" in str(e): logger.warning(set_color( "detection request failed, assuming no protection and continuing...", level=30 )) return None else: logger.exception(set_color( "Zeus ran into an unexpected error '{}'...".format(e), level=50 )) request_issue_creation() return None
def detect_protection(url, **kwargs): verbose = kwargs.get("verbose", False) agent = kwargs.get("agent", None) proxy = kwargs.get("proxy", None) xforward = kwargs.get("xforward", False) if xforward: ip_list = (create_random_ip(), create_random_ip(), create_random_ip()) headers = { HTTP_HEADER.CONNECTION: "close", HTTP_HEADER.USER_AGENT: agent, HTTP_HEADER.X_FORWARDED_FOR: "{}, {}, {}".format(ip_list[0], ip_list[1], ip_list[2]) } else: headers = { HTTP_HEADER.CONNECTION: "close", HTTP_HEADER.USER_AGENT: agent } url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD) if verbose: logger.debug( set_color("attempting connection to '{}'...".format(url), level=10)) try: protection_check_req = requests.get( url, params=headers, proxies=proxy_string_to_dict(proxy), timeout=20) html, status, headers = protection_check_req.content, protection_check_req.status_code, protection_check_req.headers for dbms in DBMS_ERRORS: # make sure there are no DBMS errors in the HTML for regex in DBMS_ERRORS[dbms]: if re.compile(regex).search(html) is not None: logger.info( set_color( "it appears that the WAF/IDS/IPS check threw a DBMS error and may be vulnerable " "to SQL injection attacks. it appears the backend DBMS is '{}'..." .format(dbms), level=25)) return None retval = [] if status != 200 and "not found" not in html.lower(): file_list = [ f for f in os.listdir(DETECT_FIREWALL_PATH) if not any(ex in f for ex in ["__init__", ".pyc"]) ] for item in file_list: item = item[:-3] detection_name = "lib.firewall.{}" detection_name = detection_name.format(item) detection_name = importlib.import_module(detection_name) if detection_name.detect(html, headers=headers, status=status): retval.append(detection_name.__item__) if len(retval) > 1: if "Generic (Unknown)" in retval: item = retval.index("Generic (Unknown)") del retval[item] else: if retval[0] == "Generic (Unknown)": logger.warning( set_color( "identified WAF/IDS/IPS is unknown to Zeus, if you know the firewall and the context " "of the firewall, please create an issue ({}), fingerprint of the firewall will be " "written to a log file...".format(ISSUE_LINK), level=30)) full_finger_print = "HTTP/1.1 {}\n{}\n{}".format( status, headers, html) write_to_log_file( full_finger_print, UNKNOWN_FIREWALL_FINGERPRINT_PATH, UNKNOWN_FIREWALL_FILENAME.format(replace_http(url))) else: retval = None return ''.join(retval) if isinstance(retval, list) else retval except Exception as e: if "Read timed out." or "Connection reset by peer" in str(e): logger.warning( set_color( "detection request timed out, assuming no protection and continuing...", level=30)) return None else: logger.exception( set_color( "Zeus ran into an unexpected error '{}'...".format(e), level=50)) request_issue_creation() return None
opt = ZeusParser.cmd_parser() ZeusParser().single_show_args(opt) # run the setup on the program setup(verbose=opt.runInVerbose) if not opt.hideBanner: print(BANNER) start_up() if opt.runInVerbose: being_run = find_running_opts(opt) logger.debug(set_color( "running with options '{}'".format(being_run), level=10 )) logger.info(set_color( "log file being saved to '{}'".format(get_latest_log_file(CURRENT_LOG_FILE_PATH)) )) def __run_attacks_main(**kwargs): """ main method to run the attacks """ log_to_use = kwargs.get("log", None) if log_to_use is None: options = (opt.dorkToUse, opt.useRandomDork, opt.dorkFileToUse) log_to_use = URL_LOG_PATH if any(o for o in options) else SPIDER_LOG_PATH
def detect_protection(url, **kwargs): verbose = kwargs.get("verbose", False) agent = kwargs.get("agent", None) proxy = kwargs.get("proxy", None) xforward = kwargs.get("xforward", False) if xforward: ip_list = ( create_random_ip(), create_random_ip(), create_random_ip() ) headers = { HTTP_HEADER.CONNECTION: "close", HTTP_HEADER.USER_AGENT: agent, HTTP_HEADER.X_FORWARDED_FOR: "{}, {}, {}".format(ip_list[0], ip_list[1], ip_list[2]) } else: headers = { HTTP_HEADER.CONNECTION: "close", HTTP_HEADER.USER_AGENT: agent } url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD) if verbose: logger.debug(set_color( "attempting connection to '{}'...".format(url), level=10 )) try: protection_check_req = requests.get( url, params=headers, proxies=proxy_string_to_dict(proxy), timeout=20 ) html, status, headers = protection_check_req.content, protection_check_req.status_code, protection_check_req.headers for dbms in DBMS_ERRORS: # make sure there are no DBMS errors in the HTML for regex in DBMS_ERRORS[dbms]: if re.compile(regex).search(html) is not None: logger.info(set_color( "it appears that the WAF/IDS/IPS check threw a DBMS error and may be vulnerable " "to SQL injection attacks. it appears the backend DBMS is '{}'...".format(dbms), level=25 )) return None retval = [] file_list = [f for f in os.listdir(DETECT_FIREWALL_PATH) if not any(ex in f for ex in ["__init__", ".pyc"])] for item in file_list: item = item[:-3] if verbose: logger.debug(set_color( "loading script '{}'...".format(item), level=10 )) detection_name = "lib.firewall.{}" detection_name = detection_name.format(item) detection_name = importlib.import_module(detection_name) if detection_name.detect(html, headers=headers, status=status) is True: retval.append(detection_name.__item__) if len(retval) != 0: if len(retval) >= 2: try: del retval[retval.index("Generic (Unknown)")] except: logger.warning(set_color( "multiple firewalls identified ({}), displaying most likely...".format( ", ".join(retval) ), level=30 )) del retval[retval.index(retval[1])] if retval[0] == "Generic (Unknown)": logger.warning(set_color( "discovered firewall is unknown to Zeus, saving fingerprint to file. " "if you know the details or the context of the firewall please create " "an issue with the fingerprint, or a pull request with the script...", level=30 )) fingerprint = "<!---\nStatus: {}\nHeaders: {}\n--->\n{}".format( status, headers, html ) write_to_log_file(fingerprint, UNKNOWN_FIREWALL_FINGERPRINT_PATH, UNKNOWN_FIREWALL_FILENAME) return "".join(retval) if isinstance(retval, list) else retval else: return None except Exception as e: if "Read timed out." or "Connection reset by peer" in str(e): logger.warning(set_color( "detection request failed, assuming no protection and continuing...", level=30 )) return None else: logger.exception(set_color( "Zeus ran into an unexpected error '{}'...".format(e), level=50 )) request_issue_creation() return None