def check_for_robots(url, ext="/robots.txt", data_sep="-" * 30): """ check if the URL has a robots.txt in it and collect `interesting` information out of the page """ url = replace_http(url) interesting = set() full_url = "{}{}{}".format("http://", url, ext) conn = requests.get(full_url) data = conn.content code = conn.status_code if code == 404: return False for line in data.split("\n"): if "Allow" in line: interesting.add(line.strip()) if len(interesting) > 0: create_tree(full_url, list(interesting)) else: to_display = prompt( "nothing interesting found in robots.txt would you like to display the entire page", opts="yN") if to_display.lower().startswith("y"): print("{}\n{}\n{}".format(data_sep, data, data_sep)) logger.info(set_color("robots.txt page will be saved into a file...")) write_to_log_file(data, ROBOTS_PAGE_PATH, "robots-{}.log".format(url))
def run_xss_scan(url, url_file=None, proxy=None, user_agent=False): """ Pointer to run a XSS Scan on a given URL """ proxy = proxy if proxy is not None else None header = RANDOM_USER_AGENT if user_agent is not False else None if proxy is not None: LOGGER.info("Proxy configured, running through: {}".format(proxy)) if user_agent is True: LOGGER.info("Grabbed random user agent: {}".format(header)) if url_file is not None: # Scan a given file full of URLS file_path = url_file total = len(open(url_file).readlines()) done = 0 LOGGER.info("Found a total of {} URLS to scan..".format(total)) with open(file_path) as urls: for url in urls.readlines(): if QUERY_REGEX.match(url.strip()): question = prompt( "Would you like to scan '{}' for XSS vulnerabilities[y/N]: " .format(url.strip())) if question.lower().startswith("y"): done += 1 if not xss.main( url.strip(), proxy=proxy, headers=header): LOGGER.info( "URL '{}' does not appear to be vulnerable to XSS" .format(url.strip())) else: LOGGER.info( "URL '{}' appears to be vulnerable to XSS". format(url.strip())) LOGGER.info("URLS scanned: {}, URLS left: {}".format( done, total - done)) else: pass else: LOGGER.warn( "URL '{}' does not contain a query (GET) parameter, skipping" .format(url.strip())) LOGGER.info("All URLS in file have been scanned, shutting down..") else: # Scan a single URL if QUERY_REGEX.match(url): LOGGER.info("Searching: {} for XSS vulnerabilities..".format( url, proxy=proxy, headers=header)) if not xss.main(url, proxy=proxy, headers=header): LOGGER.error( "{} does not appear to be vulnerable to XSS".format(url)) else: LOGGER.info("{} seems to be vulnerable to XSS.".format(url)) else: error_message = "The URL you provided does not contain a query " error_message += "(GET) parameter. In order for this scan you run " error_message += "successfully you will need to provide a URL with " error_message += "A query (GET) parameter example: http://127.0.0.1/php?id=2" LOGGER.fatal(error_message)
def run_sqli_scan(url, url_file=None, proxy=None, user_agent=False, tamper=None): """ Pointer to run a SQLi Scan on a given URL """ error_message = "URL: '{}' threw an exception ".format(url) error_message += "and Pybelt is unable to resolve the URL, " error_message += "this could mean that the URL is not allowing connections " error_message += "or that the URL is bad. Attempt to connect " error_message += "to the URL manually, if a connection occurs " error_message += "make an issue." if url_file is not None: # Run through a file list file_path = url_file total = len(open(file_path).readlines()) done = 0 LOGGER.info("Found a total of {} urls in file {}..".format( total, file_path)) with open(file_path) as urls: for url in urls.readlines(): try: if QUERY_REGEX.match(url.strip()): question = prompt( "Would you like to scan '{}' for SQLi vulnerabilities[y/N]: " .format(url.strip())) if question.lower().startswith("y"): LOGGER.info("Starting scan on url: '{}'".format( url.strip())) LOGGER.info(SQLiScanner(url.strip()).sqli_search()) done += 1 LOGGER.info( "URLS scanned: {}, URLS left: {}".format( done, total - done)) else: pass else: LOGGER.warn( "URL '{}' does not contain a query (GET) parameter, skipping.." .format(url.strip())) pass except HTTPError: LOGGER.fatal(error_message) LOGGER.info("No more URLS found in file, shutting down..") else: # Run a single URL try: if QUERY_REGEX.match(url): LOGGER.info("Starting SQLi scan on '{}'..".format(url)) LOGGER.info(SQLiScanner(url).sqli_search()) else: LOGGER.error( "URL does not contain a query (GET) parameter. Example: http://example.com/php?id=2" ) except HTTPError: LOGGER.fatal(error_message)
def run_hash_verification(hash_to_verify, hash_ver_file=None): """ Pointer to run the Hash Verification system""" if hash_ver_file is not None and hash_to_verify is None: try: total = len(open(hash_ver_file).readlines()) LOGGER.info("Found a total of {} hashes in file..".format(total)) except IOError: LOGGER.critical( "That file does not exist, check path and try again.") with open(hash_ver_file, "r+") as hashes: for h in hashes.readlines(): question = prompt("Attempt to verify '{}'[y/N]: ".format( h.strip())) if question.startswith("y"): LOGGER.info("Analyzing hash: '{}'".format(h.strip())) HashChecker(h.strip()).obtain_hash_type() print("\n") else: LOGGER.warning("Skipping '{}'..".format(h.strip())) else: LOGGER.info("Analyzing hash: '{}'".format(hash_to_verify)) HashChecker(hash_to_verify).obtain_hash_type()
def run_dork_checker(dork, dork_file=None, proxy=None): """ Pointer to run a Dork Check on a given Google Dork """ if dork is not None: LOGGER.info("Starting dork scan, using query: '{}'..".format(dork)) try: LOGGER.info( DorkScanner(dork, dork_file=dork_file, proxy=proxy).check_urls_for_queries()) except HTTPError: LOGGER.fatal(GoogleBlockException(GOOGLE_TEMP_BLOCK_ERROR_MESSAGE)) elif dork is None and dork_file is not None: if proxy is None: proxy_warn = "It is advised to use proxies while running " proxy_warn += "a dork list due to the temporary Google " proxy_warn += "bans.." LOGGER.warning(proxy_warn) question = prompt( "Would you like to find proxies with the built in finder first[y/N]: " ) if question.upper().startswith("Y"): subprocess.call(["python", "pybelt.py", "-f"]) else: pass try: with open("{}".format(dork_file)) as dork_list: for dork in dork_list.readlines(): LOGGER.info("Starting dork scan on {}..".format( dork.strip())) LOGGER.info( DorkScanner(dork, dork_file=dork_file, proxy=proxy).check_urls_for_queries()) except HTTPError: LOGGER.fatal(GoogleBlockException(GOOGLE_TEMP_BLOCK_ERROR_MESSAGE)) except IOError: LOGGER.fatal( "The filename {} does not exist, please verify path and try again" .format(dork_file))
hide_banner(hide=True if args.banner else False, legal=True if args.legal else False) if args.version is False else hide_banner(hide=True) LOGGER.info("Checking program integrity..") try: integrity_check() except HTTPError: check_fail = "Integrity check failed to connect " check_fail += "you are running a non verified " check_fail += "Pybelt, this may or may not be insecure. " check_fail += "Suggestion would be to re-download Pybelt from " check_fail += "{}" LOGGER.error(check_fail.format(CLONE_LINK)) answer = prompt("Would you like to continue anyways[y/N] ") if answer.upper().startswith("Y"): pass else: err_msg = "Please download the latest version from " err_msg += "{}" LOGGER.critical(err_msg.format(CLONE_LINK)) try: if len(sys.argv) == 1: # If you failed to provide an argument prompt = pybelt_shell.PybeltConsole() # Launch the shell prompt.prompt = "{}@pybelt > ".format(getpass.getuser()) info_message = "You have failed to provide a flag so you have been " info_message += "redirected to the Pybelt Console. For available " info_message += "flags type: 'run -hh', to see help type: 'help' " info_message += "to exit the console type: 'quit'"
def main_xss(start_url, verbose=False, proxy=None, agent=None, tamper=None): """ main attack method to be called """ if tamper: logger.info(set_color( "tampering payloads with '{}'...".format(tamper))) find_xss_script(start_url) logger.info(set_color("loading payloads...")) payloads = __load_payloads() if verbose: logger.debug( set_color("a total of {} payloads loaded...".format(len(payloads)), level=10)) logger.info( set_color( "payloads will be written to a temporary file and read from there..." )) filename = create_urls(start_url, payloads, tamper=tamper) logger.info( set_color("loaded URL's have been saved to '{}'...".format(filename))) logger.info( set_color("testing for XSS vulnerabilities on host '{}'...".format( start_url))) if proxy is not None: logger.info(set_color("using proxy '{}'...".format(proxy))) success = set() with open(filename) as urls: for i, url in enumerate(urls.readlines(), start=1): url = url.strip() result = scan_xss(url, proxy=proxy, agent=agent) payload = find_xss_script(url) if verbose: logger.info(set_color( "trying payload '{}'...".format(payload))) if result[0] != "sqli" and result[0] is True: success.add(url) if verbose: logger.debug( set_color( "payload '{}' appears to be usable...".format( payload), level=10)) elif result[0] is "sqli": if i <= 1: logger.error( set_color( "loaded URL '{}' threw a DBMS error and appears to be injectable, test for SQL injection, " "backend DBMS appears to be '{}'...".format( url, result[1]), level=40)) else: if verbose: logger.error( set_color("SQL error discovered...", level=40)) else: if verbose: logger.debug( set_color( "host '{}' does not appear to be vulnerable to XSS attacks with payload '{}'..." .format(start_url, payload), level=10)) if len(success) != 0: logger.info(set_color("possible XSS scripts to be used:")) create_tree(start_url, list(success)) else: logger.error( set_color( "host '{}' does not appear to be vulnerable to XSS attacks...". format(start_url))) save = prompt("would you like to keep the URL's saved for further testing", opts="yN") if save.lower().startswith("n"): os.remove(filename)
def get_urls(query, url, verbose=False, warning=True, **kwargs): """ Bypass Google captchas and Google API by using selenium-webdriver to gather the Google URL. This will open a robot controlled browser window and attempt to get a URL from Google that will be used for scraping afterwards. """ proxy, user_agent = kwargs.get("proxy", None), kwargs.get("user_agent", None) if verbose: logger.debug(set_color( "setting up the virtual display to hide the browser...", level=10 )) ff_display = Display(visible=0, size=(800, 600)) ff_display.start() logger.info(set_color( "firefox browser display will be hidden while it performs the query..." )) if warning: logger.warning(set_color( "your web browser will be automated in order for Zeus to successfully " "bypass captchas and API calls. this is done in order to grab the URL " "from the search and parse the results. please give selenium time to " "finish it's task...", level=30 )) if verbose: logger.debug(set_color( "running selenium-webdriver and launching browser...", level=10 )) if verbose: logger.debug(set_color( "adjusting selenium-webdriver user-agent to '{}'...".format(user_agent), level=10 )) if proxy is not None: proxy_type = proxy.keys() proxy_to_use = Proxy({ "proxyType": ProxyType.MANUAL, "httpProxy": proxy[proxy_type[0]], "ftpProxy": proxy[proxy_type[0]], "sslProxy": proxy[proxy_type[0]], "noProxy": "" }) if verbose: logger.debug(set_color( "setting selenium proxy to '{}'...".format( ''.join(proxy_type) + "://" + ''.join(proxy.values()) ), level=10 )) else: proxy_to_use = None profile = webdriver.FirefoxProfile() profile.set_preference("general.useragent.override", user_agent) browser = webdriver.Firefox(profile, proxy=proxy_to_use) logger.info(set_color("browser will open shortly...")) browser.get(url) if verbose: logger.debug(set_color( "searching search engine for the 'q' element (search button)...", level=10 )) search = browser.find_element_by_name('q') logger.info(set_color( "searching '{}' using query '{}'...".format(url, query) )) try: search.send_keys(query) search.send_keys(Keys.RETURN) # hit return after you enter search text time.sleep(3) except ElementNotInteractableException: browser.execute_script("document.querySelectorAll('label.boxed')[1].click()") search.send_keys(query) search.send_keys(Keys.RETURN) # hit return after you enter search text time.sleep(3) if verbose: logger.debug(set_color( "obtaining URL from selenium..." )) try: retval = browser.current_url except UnexpectedAlertPresentException: logger.warning(set_color( "alert present, closing...", level=30 )) alert = browser.switch_to.alert alert.accept() retval = browser.current_url ban_url_schema = ["http://ipv6.google.com", "http://ipv4.google.com"] if any(u in retval for u in ban_url_schema): # if you got IP banned logger.warning(set_color( "it appears that Google is attempting to block your IP address, attempting bypass...", level=30 )) try: retval = bypass_ip_block(retval) do_continue = prompt( "zeus was able to successfully extract the URL from Google's ban URL " "it is advised to shutdown zeus and attempt to extract the URL's manually. " "failing to do so will most likely result in no results being found by zeus. " "would you like to shutdown", opts="yN" ) if not str(do_continue).lower().startswith("n"): # shutdown and write the URL to a file write_to_log_file(retval, EXTRACTED_URL_LOG, "extracted-url-{}.log") logger.info(set_color( "it is advised to use the built in blackwidow crawler with the extracted URL " "(IE -b '{}')".format(retval) )) shutdown() except Exception as e: browser.close() # stop all the random rogue processes ff_display.stop() logger.exception(set_color( "zeus was unable to extract the correct URL from the ban URL '{}', " "got exception '{}'...".format( unquote(retval), e ), level=50 )) request_issue_creation() shutdown() if verbose: logger.debug(set_color( "found current URL from selenium browser...", level=10 )) logger.info(set_color( "closing the browser and continuing process.." )) browser.close() ff_display.stop() return retval
def request_issue_creation(): question = prompt( "would you like to create an anonymous issue and post it to Zeus's Github", opts="yN") if question.lower().startswith("n"): logger.error( set_color( "Zeus has experienced an internal error and cannot continue, shutting down...", level=40)) shutdown() fix_log_file() logger.info( set_color( "Zeus got an unexpected error and will automatically create an issue for this error, please wait..." )) def __extract_stacktrace(file_data): logger.info(set_color("extracting traceback from log file...")) retval, buff_mode, _buffer = [], False, "" with open(file_data, "r+") as log: for line in log: if "Traceback" in line: buff_mode = True if line and len(line) < 5: buff_mode = False retval.append(_buffer) _buffer = "" if buff_mode: if len(line) > 400: line = line[:400] + "...\n" _buffer += line return "".join(retval) logger.info(set_color("getting authorization...")) encoded = __get_encoded_string() n = get_decode_num(encoded) token = decode(n, encoded) current_log_file = get_latest_log_file(CURRENT_LOG_FILE_PATH) stacktrace = __extract_stacktrace(current_log_file) issue_title = stacktrace.split("\n")[-2] issue_data = { "title": issue_title, "body": "Zeus version:\n`{}`\n\n" "Error info:\n```{}````\n\n" "Running details:\n`{}`\n\n" "Commands used:\n`{}`\n\n" "Log file info:\n```{}```".format(VERSION, str(stacktrace), str(platform.platform()), " ".join(sys.argv), open(current_log_file).read()), } _json_data = json.dumps(issue_data) if sys.version_info > (3, ): _json_data = _json_data.encode("utf-8") try: req = urllib2.Request( url="https://api.github.com/repos/ekultek/zeus-scanner/issues", data=_json_data, headers={"Authorization": "token {}".format(token)}) urllib2.urlopen(req, timeout=10).read() logger.info( set_color( "issue has been created successfully with the following name '{}'..." .format(issue_title))) except Exception as e: logger.exception( set_color("failed to auto create the issue, got exception '{}', " "you may manually create an issue...".format(e), level=50))
def __run_attacks(url, **kwargs): """ run the attacks if any are requested """ nmap = kwargs.get("nmap", False) sqlmap = kwargs.get("sqlmap", False) intel = kwargs.get("intel", False) xss = kwargs.get("xss", False) admin = kwargs.get("admin", False) verbose = kwargs.get("verbose", False) batch = kwargs.get("batch", False) auto_start = kwargs.get("auto_start", False) __enabled_attacks = { "sqlmap": opt.runSqliScan, "port": opt.runPortScan, "xss": opt.runXssScan, "admin": opt.adminPanelFinder, "intel": opt.intelCheck } enabled = set() for key in __enabled_attacks.keys(): if __enabled_attacks[key] is True: enabled.add(key) if len(enabled) > 1: logger.error(set_color( "it appears that you have enabled multiple attack types, " "as of now only 1 attack is supported at a time, choose " "your attack and try again. You can use the -f flag if " "you do not want to complete an entire search again...", level=40 )) shutdown() if not batch: question = prompt( "would you like to process found URL: '{}'".format(url), opts=["y", "N"] ) else: question = "y" if question.lower().startswith("y"): if sqlmap: return sqlmap_scan.sqlmap_scan_main( url.strip(), verbose=verbose, opts=__create_arguments(sqlmap=True), auto_start=auto_start) elif nmap: url_ip_address = replace_http(url.strip()) return nmap_scan.perform_port_scan( url_ip_address, verbose=verbose, opts=__create_arguments(nmap=True) ) elif intel: url = get_true_url(url) return intel_me.main_intel_amt( url, agent=agent_to_use, proxy=proxy_to_use, do_ip=opt.runAgainstIpAddress ) elif admin: main( url, show=opt.showAllConnections, verbose=verbose, do_threading=opt.threadPanels ) elif xss: main_xss( url, verbose=verbose, proxy=proxy_to_use, agent=agent_to_use, tamper=opt.tamperXssPayloads ) else: pass else: logger.warning(set_color( "skipping '{}'...".format(url), level=30 ))
elif opt.spiderWebSite: problem_identifiers = ["http://", "https://"] if not URL_REGEX.match(opt.spiderWebSite): err_msg = "URL did not match a true URL{}..." if not any(m in opt.spiderWebSite for m in problem_identifiers): err_msg = err_msg.format(" issue seems to be that http:// " "or https:// is not present in the URL") else: err_msg = err_msg.format("") raise InvalidInputProvided( err_msg ) else: if URL_QUERY_REGEX.match(opt.spiderWebSite): is_sure = prompt( "it is recomened to not use a URL that has a GET(query) parameter in it, " "would you like to continue", "yN" ) if is_sure.lower().startswith("y"): pass else: shutdown() blackwidow.blackwidow_main(opt.spiderWebSite, agent=agent_to_use, proxy=proxy_to_use, verbose=opt.runInVerbose) __run_attacks_main() elif opt.fileToEnumerate is not None: __run_attacks_main() else:
elif opt.spiderWebSite: problem_identifiers = ["http://", "https://"] if not URL_REGEX.match(opt.spiderWebSite): err_msg = "URL did not match a true URL{}..." if not any(m in opt.spiderWebSite for m in problem_identifiers): err_msg = err_msg.format( " issue seems to be that http:// " "or https:// is not present in the URL") else: err_msg = err_msg.format("") raise InvalidInputProvided(err_msg) else: if URL_QUERY_REGEX.match(opt.spiderWebSite): is_sure = prompt( "it is recomened to not use a URL that has a GET(query) parameter in it, " "would you like to continue", "yN") if is_sure.lower().startswith("y"): pass else: shutdown() blackwidow.blackwidow_main(opt.spiderWebSite, agent=agent_to_use, proxy=proxy_to_use, verbose=opt.runInVerbose) __run_attacks_main() elif opt.fileToEnumerate is not None: __run_attacks_main()