Example #1
0
    def __run_attacks_main():
        which_log_to_use = {
            "dork": URL_LOG_PATH,
            "spider": SPIDER_LOG_PATH
        }

        options = (opt.useRandomDork, opt.dorkToUse, opt.dorkFileToUse, opt.fileToEnumerate)
        to_use = which_log_to_use["dork"] if any(arg for arg in options) is True else which_log_to_use["spider"]
        try:
            urls_to_use = get_latest_log_file(to_use)
        except TypeError:
            urls_to_use = None

        if urls_to_use is None:
            logger.error(set_color(
                "unable to run attacks appears that no file was created for the retrieved data...", level=40
            ))
            shutdown()
        options = [
            opt.runSqliScan, opt.runPortScan,
            opt.intelCheck, opt.adminPanelFinder,
            opt.runXssScan
        ]
        if any(options):
            with open(urls_to_use) as urls:
                for url in urls.readlines():
                    __run_attacks(
                        url.strip(),
                        sqlmap=opt.runSqliScan, nmap=opt.runPortScan,
                        intel=opt.intelCheck, xss=opt.runXssScan,
                        admin=opt.adminPanelFinder, verbose=opt.runInVerbose,
                        batch=opt.runInBatch, auto_start=opt.autoStartSqlmap
                    )
Example #2
0
def create_urls(url, payload_list, tamper=None):
    """
    create the tampered URL's, write them to a temporary file and read them from there
    """
    tf = tempfile.NamedTemporaryFile(delete=False)
    tf_name = tf.name
    with tf as tmp:
        for i, payload in enumerate(payload_list):
            if tamper:
                try:
                    if i < 1:
                        payload = __tamper_payload(payload,
                                                   tamper_type=tamper,
                                                   warning=True)
                    else:
                        payload = __tamper_payload(payload,
                                                   tamper_type=tamper,
                                                   warning=False)
                except InvalidTamperProvided:
                    logger.error(
                        set_color(
                            "you provided and invalid tamper script, acceptable tamper scripts are: {}..."
                            .format(" | ".join(list_tamper_scripts()),
                                    level=40)))
                    shutdown()
            loaded_url = "{}{}\n".format(url.strip(), payload.strip())
            tmp.write(loaded_url)
    return tf_name
Example #3
0
def get_urls(query, url, verbose=False, warning=True, **kwargs):
    """
      Bypass Google captchas and Google API by using selenium-webdriver to gather
      the Google URL. This will open a robot controlled browser window and attempt
      to get a URL from Google that will be used for scraping afterwards.
    """
    proxy, user_agent = kwargs.get("proxy", None), kwargs.get("user_agent", None)
    if verbose:
        logger.debug(set_color(
            "setting up the virtual display to hide the browser...", level=10
        ))
    ff_display = Display(visible=0, size=(800, 600))
    ff_display.start()
    logger.info(set_color(
        "firefox browser display will be hidden while it performs the query..."
    ))
    if warning:
        logger.warning(set_color(
            "your web browser will be automated in order for Zeus to successfully "
            "bypass captchas and API calls. this is done in order to grab the URL "
            "from the search and parse the results. please give selenium time to "
            "finish it's task...", level=30
        ))
    if verbose:
        logger.debug(set_color(
            "running selenium-webdriver and launching browser...", level=10
        ))

    if verbose:
        logger.debug(set_color(
            "adjusting selenium-webdriver user-agent to '{}'...".format(user_agent), level=10
        ))
    if proxy is not None:
        proxy_type = proxy.keys()
        proxy_to_use = Proxy({
            "proxyType": ProxyType.MANUAL,
            "httpProxy": proxy[proxy_type[0]],
            "ftpProxy": proxy[proxy_type[0]],
            "sslProxy": proxy[proxy_type[0]],
            "noProxy": ""
        })
        if verbose:
            logger.debug(set_color(
                "setting selenium proxy to '{}'...".format(
                    ''.join(proxy_type) + "://" + ''.join(proxy.values())
                ), level=10
            ))
    else:
        proxy_to_use = None

    profile = webdriver.FirefoxProfile()
    profile.set_preference("general.useragent.override", user_agent)
    browser = webdriver.Firefox(profile, proxy=proxy_to_use)
    logger.info(set_color("browser will open shortly..."))
    browser.get(url)
    if verbose:
        logger.debug(set_color(
            "searching search engine for the 'q' element (search button)...", level=10
        ))
    search = browser.find_element_by_name('q')
    logger.info(set_color(
        "searching '{}' using query '{}'...".format(url, query)
    ))
    try:
        search.send_keys(query)
        search.send_keys(Keys.RETURN)  # hit return after you enter search text
        time.sleep(3)
    except ElementNotInteractableException:
        browser.execute_script("document.querySelectorAll('label.boxed')[1].click()")
        search.send_keys(query)
        search.send_keys(Keys.RETURN)  # hit return after you enter search text
        time.sleep(3)
    if verbose:
        logger.debug(set_color(
            "obtaining URL from selenium..."
        ))
    try:
        retval = browser.current_url
    except UnexpectedAlertPresentException:
        logger.warning(set_color(
            "alert present, closing...", level=30
        ))
        alert = browser.switch_to.alert
        alert.accept()
        retval = browser.current_url
    ban_url_schema = ["http://ipv6.google.com", "http://ipv4.google.com"]
    if any(u in retval for u in ban_url_schema):  # if you got IP banned
        logger.warning(set_color(
            "it appears that Google is attempting to block your IP address, attempting bypass...", level=30
        ))
        try:
            retval = bypass_ip_block(retval)
            do_continue = prompt(
                "zeus was able to successfully extract the URL from Google's ban URL "
                "it is advised to shutdown zeus and attempt to extract the URL's manually. "
                "failing to do so will most likely result in no results being found by zeus. "
                "would you like to shutdown", opts="yN"
            )
            if not str(do_continue).lower().startswith("n"):  # shutdown and write the URL to a file
                write_to_log_file(retval, EXTRACTED_URL_LOG, "extracted-url-{}.log")
                logger.info(set_color(
                    "it is advised to use the built in blackwidow crawler with the extracted URL "
                    "(IE -b '{}')".format(retval)
                ))
                shutdown()
        except Exception as e:
            browser.close()  # stop all the random rogue processes
            ff_display.stop()
            logger.exception(set_color(
                "zeus was unable to extract the correct URL from the ban URL '{}', "
                "got exception '{}'...".format(
                    unquote(retval), e
                ), level=50
            ))
            request_issue_creation()
            shutdown()
    if verbose:
        logger.debug(set_color(
            "found current URL from selenium browser...", level=10
        ))
    logger.info(set_color(
        "closing the browser and continuing process.."
    ))
    browser.close()
    ff_display.stop()
    return retval
Example #4
0
def parse_search_results(
        query, url_to_search, verbose=False, **kwargs):
    """
      Parse a webpage from Google for URL's with a GET(query) parameter
    """
    splitter = "&amp;"
    retval = set()
    query_url = None

    proxy_string, user_agent = kwargs.get("proxy", None), kwargs.get("agent", None)

    if verbose:
        logger.debug(set_color(
            "checking for user-agent and proxy configuration...", level=10
        ))

    user_agent_info = "adjusting user-agent header to {}..."
    if user_agent is not DEFAULT_USER_AGENT:
        user_agent_info = user_agent_info.format(user_agent.strip())
    else:
        user_agent_info = user_agent_info.format("default user agent '{}'".format(DEFAULT_USER_AGENT))

    proxy_string_info = "setting proxy to {}..."
    if proxy_string is not None:
        proxy_string_info = proxy_string_info.format(
            ''.join(proxy_string.keys()) + "://" + ''.join(proxy_string.values()))
    else:
        proxy_string_info = "no proxy configuration detected..."

    headers = {
        "Connection": "close",
        "user-agent": user_agent
    }
    logger.info(set_color(
        "attempting to gather query URL..."
    ))
    try:
        query_url = get_urls(query, url_to_search, verbose=verbose, user_agent=user_agent, proxy=proxy_string)
    except Exception as e:
        if "WebDriverException" in str(e):
            logger.exception(set_color(
                "it seems that you exited the browser, please allow the browser "
                "to complete it's run so that Zeus can bypass captchas and API "
                "calls", level=50
            ))
        elif "'/usr/lib/firefoxdriver/webdriver.xpi'" in str(e):
            logger.fatal(set_color(
                "firefox was not found in the default location on your system, "
                "check your installation and make sure it is in /usr/lib, if you "
                "find it there, restart your system and try again...", level=50
            ))
        else:
            logger.exception(set_color(
                "{} failed to gather the URL from search engine, caught exception '{}' "
                "exception has been logged to current log file...".format(
                    os.path.basename(__file__), str(e).strip()), level=50)
            )
            request_issue_creation()
        shutdown()
    logger.info(set_color(
        "URL successfully gathered, searching for GET parameters..."
    ))

    logger.info(set_color(proxy_string_info))
    req = requests.get(query_url, proxies=proxy_string)
    logger.info(set_color(user_agent_info))
    req.headers.update(headers)
    found_urls = URL_REGEX.findall(req.text)
    url_skip_schema = ("maps.google", "play.google", "youtube")
    for urls in list(found_urls):
        for url in list(urls):
            url = unquote(url)
            if not any(u in url for u in url_skip_schema):
                if URL_QUERY_REGEX.match(url) and not any(l in url for l in URL_EXCLUDES):
                    if isinstance(url, unicode):
                        url = str(url).encode("utf-8")
                    if "webcache" in url:
                        logger.info(set_color(
                            "received webcache URL, extracting URL from webcache..."
                        ))
                        webcache_url = url
                        url = extract_webcache_url(webcache_url)
                        if url is None:
                            logger.warning(set_color(
                                "unable to extract url from given webcache URL '{}'...".format(
                                    webcache_url
                                ), level=30
                            ))
                    if verbose:
                        try:
                            logger.debug(set_color(
                                "found '{}'...".format(url.split(splitter)[0]), level=10
                            ))
                        except TypeError:
                            logger.debug(set_color(
                                "found '{}'...".format(str(url).split(splitter)[0]), level=10
                            ))
                        except AttributeError:
                            logger.debug(set_color(
                                "found '{}...".format(str(url)), level=10
                            ))
                    if url is not None:
                        retval.add(url.split("&amp;")[0])
    logger.info(set_color(
        "found a total of {} URL's with a GET parameter...".format(len(retval))
    ))
    if len(retval) != 0:
        write_to_log_file(retval, URL_LOG_PATH, "url-log-{}.log")
    else:
        logger.critical(set_color(
            "did not find any usable URL's with the given query '{}' "
            "using search engine '{}'...".format(query, url_to_search), level=50
        ))
        shutdown()
    return list(retval) if len(retval) != 0 else None
Example #5
0
def request_issue_creation():
    question = prompt(
        "would you like to create an anonymous issue and post it to Zeus's Github",
        opts="yN")
    if question.lower().startswith("n"):
        logger.error(
            set_color(
                "Zeus has experienced an internal error and cannot continue, shutting down...",
                level=40))
        shutdown()

    fix_log_file()
    logger.info(
        set_color(
            "Zeus got an unexpected error and will automatically create an issue for this error, please wait..."
        ))

    def __extract_stacktrace(file_data):
        logger.info(set_color("extracting traceback from log file..."))
        retval, buff_mode, _buffer = [], False, ""
        with open(file_data, "r+") as log:
            for line in log:
                if "Traceback" in line:
                    buff_mode = True
                if line and len(line) < 5:
                    buff_mode = False
                    retval.append(_buffer)
                    _buffer = ""
                if buff_mode:
                    if len(line) > 400:
                        line = line[:400] + "...\n"
                    _buffer += line
        return "".join(retval)

    logger.info(set_color("getting authorization..."))

    encoded = __get_encoded_string()
    n = get_decode_num(encoded)
    token = decode(n, encoded)

    current_log_file = get_latest_log_file(CURRENT_LOG_FILE_PATH)
    stacktrace = __extract_stacktrace(current_log_file)
    issue_title = stacktrace.split("\n")[-2]

    issue_data = {
        "title":
        issue_title,
        "body":
        "Zeus version:\n`{}`\n\n"
        "Error info:\n```{}````\n\n"
        "Running details:\n`{}`\n\n"
        "Commands used:\n`{}`\n\n"
        "Log file info:\n```{}```".format(VERSION, str(stacktrace),
                                          str(platform.platform()),
                                          " ".join(sys.argv),
                                          open(current_log_file).read()),
    }

    _json_data = json.dumps(issue_data)
    if sys.version_info > (3, ):
        _json_data = _json_data.encode("utf-8")

    try:
        req = urllib2.Request(
            url="https://api.github.com/repos/ekultek/zeus-scanner/issues",
            data=_json_data,
            headers={"Authorization": "token {}".format(token)})
        urllib2.urlopen(req, timeout=10).read()
        logger.info(
            set_color(
                "issue has been created successfully with the following name '{}'..."
                .format(issue_title)))
    except Exception as e:
        logger.exception(
            set_color("failed to auto create the issue, got exception '{}', "
                      "you may manually create an issue...".format(e),
                      level=50))
Example #6
0
    def __run_attacks(url, **kwargs):
        """
        run the attacks if any are requested
        """
        nmap = kwargs.get("nmap", False)
        sqlmap = kwargs.get("sqlmap", False)
        intel = kwargs.get("intel", False)
        xss = kwargs.get("xss", False)
        admin = kwargs.get("admin", False)
        verbose = kwargs.get("verbose", False)
        batch = kwargs.get("batch", False)
        auto_start = kwargs.get("auto_start", False)

        __enabled_attacks = {
            "sqlmap": opt.runSqliScan,
            "port": opt.runPortScan,
            "xss": opt.runXssScan,
            "admin": opt.adminPanelFinder,
            "intel": opt.intelCheck
        }

        enabled = set()
        for key in __enabled_attacks.keys():
            if __enabled_attacks[key] is True:
                enabled.add(key)
            if len(enabled) > 1:
                logger.error(set_color(
                    "it appears that you have enabled multiple attack types, "
                    "as of now only 1 attack is supported at a time, choose "
                    "your attack and try again. You can use the -f flag if "
                    "you do not want to complete an entire search again...", level=40
                ))
                shutdown()

        if not batch:
            question = prompt(
                "would you like to process found URL: '{}'".format(url), opts=["y", "N"]
            )
        else:
            question = "y"

        if question.lower().startswith("y"):
            if sqlmap:
                return sqlmap_scan.sqlmap_scan_main(
                    url.strip(), verbose=verbose,
                    opts=__create_arguments(sqlmap=True), auto_start=auto_start)
            elif nmap:
                url_ip_address = replace_http(url.strip())
                return nmap_scan.perform_port_scan(
                    url_ip_address, verbose=verbose,
                    opts=__create_arguments(nmap=True)
                )
            elif intel:
                url = get_true_url(url)
                return intel_me.main_intel_amt(
                    url, agent=agent_to_use,
                    proxy=proxy_to_use, do_ip=opt.runAgainstIpAddress
                )
            elif admin:
                main(
                    url, show=opt.showAllConnections,
                    verbose=verbose, do_threading=opt.threadPanels
                )
            elif xss:
                main_xss(
                    url, verbose=verbose, proxy=proxy_to_use,
                    agent=agent_to_use, tamper=opt.tamperXssPayloads
                )
            else:
                pass
        else:
            logger.warning(set_color(
                "skipping '{}'...".format(url), level=30
            ))
Example #7
0
        logger.info(set_color(
            "there are a total of {} arguments understood by sqlmap API, "
            "they include:".format(len(SQLMAP_API_OPTIONS))
        ))
        print("\n")
        for arg in SQLMAP_API_OPTIONS:
            print(
                "[*] {}".format(arg)
            )
        print("\n")
        logger.info(set_color(
            "for more information about sqlmap arguments, see here '{}'...".format(
                SQLMAP_MAN_PAGE_URL
            )
        ))
        shutdown()

    if opt.showNmapArgs:
        logger.info(set_color(
            "there are a total of {} arguments understood by nmap, they include:".format(
                len(NMAP_API_OPTS)
            )
        ))
        print("\n")
        for arg in NMAP_API_OPTS:
            print(
                "[*] {}".format(arg)
            )
        print("\n")
        logger.info(set_color(
            "for more information on what the arguments do please see here '{}'...".format(