コード例 #1
0
    def load_scripts(self):
        """
        加载所有 plugins 目录下的 plugin, 并返回包含所有加载成功的 plugin 的 list
        包含所有已导入 plugin 的列表

        :return: retval 包含所有已导入 plugin 的列表
        """
        retval = []
        # file_list 是包含了所有的 plugins 目录下的 py 文件的列表
        file_list = [
            f for f in os.listdir(self.files)
            if not any(s in f for s in self.skip_schema)
        ]

        for script in sorted(file_list):
            script = script[:-3]
            if self.verbose:
                debug("loading {} script '{}'".format(self.script_type,
                                                      script))
            try:
                script = importlib.import_module(self.path.format(script))
                retval.append(script)
            except Exception:
                warning(
                    "failed to load tamper '{}', pretending it doesn't exist".
                    format(script))

        # 包含所有已导入 plugin 的列表
        return retval
コード例 #2
0
def produce_results(found_tampers):
    """
    produce the results of the tamper scripts, if any this
    输出更好看的格式

    :param found_tampers: 发现的所有 tampers
    :return: None
    """

    spacer = "-" * 30
    if len(found_tampers) > 0:
        success("apparent working tampers for target:")

        print(spacer)
        for i, tamper in enumerate(found_tampers, start=1):
            description, example, load = tamper
            try:
                load = str(load).split(" ")[1].split("'")[1]
            except IndexError:
                pass
            print(
                "(#{}) description: tamper payload by {}\nexample: '{}'\nload path: {}"
                .format(i, description, example, load))
            if i != len(found_tampers):
                print("\n")
        print(spacer)
    else:
        warning("no valid bypasses discovered with provided payloads")
コード例 #3
0
def create_fingerprint(url,
                       content_obj,
                       status,
                       headers,
                       req_data=None,
                       speak=False):
    """
    create the unknown firewall fingerprint file

    :param url: url
    :param content_obj: BeautifulSoup Object
    :param status: response.status_code
    :param headers: response.headers
    :param req_data: 请求方法 + url 形如 -> "GET https://example.org"
    :param speak: default False if speak=True, stdout "fingerprint saved to '{}'" information
    :return: 返回完整的 fingerprint 文件保存路径, 例如 -> ~/.whatwaf/fingerprinters/FILENAME
    """

    # UNKNOWN_PROTECTION_FINGERPRINT_PATH = "{}/fingerprints".format(HOME)
    # 为什么是指定了 --fingerprint 保存的路径却是 UNKNOW_PROTECTION_FINGERPRINT?
    if not os.path.exists(UNKNOWN_PROTECTION_FINGERPRINT_PATH):
        os.makedirs(UNKNOWN_PROTECTION_FINGERPRINT_PATH)

    __replace_http = lambda x: x.split("/")
    # 会把 url 中的请求方法给割掉 请求 http? 如果是 https 呢?
    __replace_specifics = lambda u: "http://{}".format(u.split("/")[2])

    try:
        # 假设原 url -> http://www.example.com/index?id=5
        # 返回形如 -> http://www.example.com
        url = __replace_specifics(url)
    except Exception:
        warning(
            "full URL will be displayed to the public if an issue is created")
        url = url

    # 形如 <!--\n{}\nStatus code: {}\n{}\n-->\n{} 这里拼接的一样
    fingerprint = "<!--\n{}\nStatus code: {}\n{}\n-->\n{}".format(
        "GET {} HTTP/1.1".format(url)
        if req_data is None else "{} HTTP/1.1".format(req_data), str(status),
        '\n'.join("{}: {}".format(h, k) for h, k in headers.items()),
        str(content_obj))

    # 形如 -> www.example.org 获取hostname
    filename = __replace_http(url)[2]
    # 不需要吧, 有些的域名就不用加 www
    # if "www" not in filename:
    #     filename = "www.{}".format(filename)

    full_file_path = "{}/{}".format(UNKNOWN_PROTECTION_FINGERPRINT_PATH,
                                    filename)

    if not os.path.exists(full_file_path):
        with open(full_file_path, "a+") as log:
            log.write(fingerprint)
        if speak:
            success("fingerprint saved to '{}'".format(full_file_path))
    # 返回完整的 fingerprint 文件保存路径
    return full_file_path
コード例 #4
0
    def threader(self):
        # not sure why this is wrapped in parentheses
        while True:
            url_thread, waf_vector = self.threading_queue.get()
            try:
                if self.verbose:
                    debug("trying: '{}'".format(url_thread))

                response_retval = get_page(
                    url_thread,
                    user_agent=self.agent,
                    proxy=self.proxy,
                    provided_headers=self.provided_headers,
                    throttle=self.throttle,
                    timeout=self.timeout,
                    request_method=self.request_type,
                    post_data=self.post_data)
                self.response_retval_list.append(response_retval)

                _, response_status_code, _, _ = response_retval
                if self.verbose:
                    info('response status code: {}'.format(
                        response_status_code))

            except Exception as e:
                if "ECONNRESET" in str(e):
                    warning(
                        "possible network level firewall detected (hardware), received an aborted connection"
                    )
                    self.response_retval_list.append(None)
                else:
                    error(
                        "failed to obtain target meta-data with payload {}, error: '{}'"
                        .format(waf_vector.strip(), str(e)))
                    self.response_retval_list.append(None)

                    # 暂时关闭
                    # if self.save_fingerprint:
                    #     create_fingerprint(
                    #         self.url,
                    #         self.response_retval_list[0][2],
                    #         self.response_retval_list[0][1],
                    #         self.response_retval_list[0][3],
                    #         req_data=self.response_retval_list[0][0],
                    #         speak=True
                    #     )

            self.threading_queue.task_done()
コード例 #5
0
def check_version(speak=True):
    """
    check the version number for updates
    """
    version_url = "https://raw.githubusercontent.com/Ekultek/WhatWaf/master/lib/settings.py"
    req = requests.get(version_url)
    content = req.text
    current_version = re.search(
        "VERSION.=.(.......)?",
        content).group().split("=")[-1].strip().strip('"')
    my_version = VERSION
    if not current_version == my_version:
        if speak:
            warning("new version: {} is available".format(current_version))
        else:
            return False
    else:
        if not speak:
            return True
コード例 #6
0
def normalization_url(url, ssl=False):
    """
    check if a protocol is given in the URL if it isn't we'll auto assign it
    """

    # PROTOCOL_DETECTION.search(url) 是检测 url 有没有加上 http 或者 https
    if PROTOCOL_DETECTION.search(url) is None:
        if ssl:
            warning("no protocol discovered, assigning HTTPS (SSL)")
            return "https://{}".format(url.strip())
        else:
            warning("no protocol discovered assigning HTTP")
            return "http://{}".format(url.strip())
    else:
        if ssl:
            info("forcing HTTPS (SSL) connection")
            items = PROTOCOL_DETECTION.split(url)
            item = items[-1].split("://")
            item[0] = "https://"
            return ''.join(item)
        else:
            return url.strip()
コード例 #7
0
def display_cached(urls, payloads):
    """
    display the database information in a neat format

    其实就是将 cached_payload 和 cached_url 两个表的数据 全部展示, 把他们展示的漂亮点而已
    """
    if urls is not None:
        if len(urls) != 0:
            info("cached URLs:")
            # enumerate 是将一个可迭代的对象,例如列表或者元组, 加上一个序号索引(从0开始的)
            for i, cached in enumerate(urls):
                # i 为索引值, 从0开始的
                _, netlock, prots, tamps, server = cached
                print colored("{}".format("-" * 20), 'white')
                # 这段的意思是输出表格一样的东西,可视化url
                print(
                    "{sep} URL: {url}\n{sep} Identified Protections: {protect}\n"
                    "{sep} Working tampers: {tamp}\n{sep} Web server: {server}"
                    .format(sep=colored("|", 'white'),
                            url=netlock,
                            protect=prots,
                            tamp=tamps,
                            server=server))
            print colored("{}".format("-" * 20), 'white')
        else:
            warning("there are no cached URLs in the database")

    if payloads is not None:
        if len(payloads) != 0:
            info("cached payloads:")
            print colored("{}".format("-" * 20), 'white')
            for i, payload in enumerate(payloads, start=1):
                print("{} {} {}".format(colored("#" + str(i), 'white'),
                                        colored("-->", 'white'), payload[-1]))
            print colored("{}".format("-" * 20), 'white')
        else:
            warning("no payloads have been cached into the database")
コード例 #8
0
def display_found_tampers(found_tampers):
    """
    produce the results of the tamper scripts, if any this
    这个函数是用来美化输出的, 加入 found_tampers 里面有东西, 就美化输出出来

    :param found_tampers: 是个集合, 形如 ->
                           working_tampers.add((
                               tamper.__type__,
                               tamper.tamper(tamper.__example_payload__),
                               tamper
                           ))
    :return: None
    """

    spacer = colored("-" * 30, 'white')

    if len(found_tampers) > 0:
        success("apparent working tampers for target:")

        print(spacer)
        for i, tamper in enumerate(found_tampers, start=1):
            description, example, load_tamper = tamper
            # # 会有极大的概率出错
            # try:
            #     load_tamper = str(load_tamper).split(" ")[1].split("'")[1]
            # except IndexError:
            #     pass

            print(
                "(#{}) description: tamper payload by {}\nexample: '{}'\nload tamper: {}"
                .format(i, description, example, load_tamper))

            if i != len(found_tampers):
                print("\n")
        print(spacer)
    else:
        warning("no valid bypasses discovered with provided payloads")
コード例 #9
0
def get_working_tampers(url, norm_response, payloads, **kwargs):
    """
    gather working tamper scripts

    working_tampers = set()
    working_tampers.add((tamper.__type__, tamper.tamper(tamper.__example_payload__), tamper)), 是个元组

    :param url: url
    :param norm_response: 正常不带 * 的 url
    :param payloads: payloads
    :param kwargs: 各种参数
    :return: working_tampers, 见简介
    """

    proxy = kwargs.get("proxy", None)
    agent = kwargs.get("agent", None)
    verbose = kwargs.get("verbose", False)
    # 最多搞定到 5 个有效 payload 就退出, 否则全部执行完
    tamper_int = kwargs.get("tamper_int", 5)
    provided_headers = kwargs.get("provided_headers", None)
    throttle = kwargs.get("throttle", 0)
    request_timeout = kwargs.get("timeout", 15)
    if request_timeout is None:
        warning(
            "issue occured and the timeout resolved to None, defaulting to 15")
        request_timeout = 15

    failed_schema = (re.compile("404", re.I), re.compile("captcha", re.I),
                     re.compile("illegal", re.I), re.compile("blocked", re.I),
                     re.compile("ip.logged",
                                re.I), re.compile("ip.address.logged", re.I),
                     re.compile("not.acceptable",
                                re.I), re.compile("access.denied", re.I),
                     re.compile("forbidden", re.I), re.compile("400", re.I))

    info("loading payload tampering scripts")
    # 返回包含所有 tampers 目录下的 py 文件 list
    tampers = ScriptQueue(TAMPERS_DIRECTORY,
                          TAMPERS_IMPORT_TEMPLATE,
                          verbose=verbose).load_scripts()
    success("loading payload tampering scripts success")

    if tamper_int > len(tampers):
        warning(
            "the amount of tampers provided is higher than the amount of tampers available, "
            "ALL tampers will be tried (slow!)")

        tamper_int = len(tampers)

    # working_tampers.add((tamper.__type__, tamper.tamper(tamper.__example_payload__), tamper)), 是个元组
    working_tampers = set()
    _, normal_status, _, _ = norm_response

    info("running tampering bypass checks")

    for tamper in tampers:
        if verbose:
            try:
                # 这里会出错 str(tamper).split(" ")[1] 会报 list index out of range 错误
                # debug("currently tampering with script '{}".format(str(tamper).split(" ")[1].split(".")[-1]))
                debug("currently tampering with script '{}".format(
                    str(tamper)))
            except:
                pass

        for vector in payloads:
            vector = tamper.tamper(vector)

            if verbose:
                payload('using payload: {}'.format(vector.strip()))

            payloaded_url = "{}{}".format(url, vector)

            # 去请求 带有 payload 的 url
            # 需要加总请求次数
            _, status, html, _ = get_page(payloaded_url,
                                          user_agent=agent,
                                          proxy=proxy,
                                          provided_headers=provided_headers,
                                          throttle=throttle,
                                          timeout=request_timeout)

            if not find_failures(str(html), failed_schema):
                if verbose:
                    if status != 0:
                        debug("response code: {}".format(status))
                    else:
                        debug("unknown response detected")

                if status != 404:
                    if status == 200:
                        try:
                            working_tampers.add(
                                (tamper.__type__,
                                 tamper.tamper(tamper.__example_payload__),
                                 tamper))
                        except:
                            pass
            else:
                if verbose:
                    warning("failure found in response content")

            if len(working_tampers) == tamper_int:
                break

        if len(working_tampers) == tamper_int:
            break

    return working_tampers
コード例 #10
0
def detection_main(
        url,
        payloads,
        cursor,
        request_type="GET",
        post_data=None,
        user_agent=get_random_agent(),
        provided_headers=None,
        proxy=None,
        verbose=False,
        skip_bypass_check=False,
        verification_number=None,
        # 暂时屏蔽, 没什么卵用
        # fingerprint_waf=False,
        formatted=False,
        tamper_int=5,
        use_yaml=False,
        use_json=False,
        use_csv=False,
        traffic_file=None,
        throttle=0,
        request_timeout=15,
        # 这个 determine_server 应该要默认开启
        # determine_server=False,
        threaded=None,
        force_file_creation=False,
        save_file_copy_path=None):
    """
    main detection function

    :param url: url
    :param payloads: payloads
    :param cursor: databse cursor
    :param request_type: get or post
    :param post_data: post data you given to
    :param user_agent: User Agent
    :param provided_headers: custom headers Dic type
    :param proxy: proxy
    :param verbose: verbose mode default False
    :param skip_bypass_check: skip payload bypass check
    :param verification_number:
    :param formatted:
    :param tamper_int:
    :param use_yaml:
    :param use_json:
    :param use_csv:
    :param traffic_file:
    :param throttle:
    :param request_timeout:
    :param threaded:
    :param force_file_creation:
    :param save_file_copy_path:
    :return: response count 发送的总请求的数量
    """

    # 保险, 还是初始化一下
    url = normalization_url(url)
    if url[-1] != "/":
        url += "/"

    current_url_netloc = urlparse.urlparse(url).netloc

    # 如果没有检测出 url 中的参数, 可能会干扰检测结果, 如果是 POST 请求呢?
    if URL_QUERY_REGEX.search(
            str(url)) is None and request_type.lower() == "get":
        warning(
            "URL does not appear to have a query (parameter), this may interfere with the detection results"
        )

    # 是否在 url 中有 * 的地方放置 attack payload?
    if '*' in url:
        choice = prompt(
            "custom placement marker found in URL `*` would you like to use it to place the attacks",
            "yN")
        if choice.lower().startswith("y"):
            use_placement = True
        else:
            use_placement = False
    else:
        use_placement = False

    if use_yaml:
        file_path = YAML_FILE_PATH
    elif use_json:
        file_path = JSON_FILE_PATH
    elif use_csv:
        file_path = CSV_FILE_PATH
    else:
        file_path = None

    try:
        file_start = url.split("/")[2].split(".")[1]

        if use_json:
            ext = ".json"
        elif use_yaml:
            ext = ".yaml"
        elif use_csv:
            ext = ".csv"
        else:
            ext = '.txt'

        filename = "{}{}".format(file_start, ext)
    except:
        if use_json:
            file_type = "json"
        elif use_csv:
            file_type = 'csv'
        elif use_yaml:
            file_type = 'yaml'
        else:
            file_type = 'txt'

        filename = random_string(length=10, file_type=file_type)

    info("request type: {}".format(request_type))

    # 检查是否为无效 POST data
    if request_type.lower() == 'post':
        if len(post_data) == 0:
            warning("no POST string supplied generating random")
            post_data = generate_random_post_string()
            info("random POST string to be sent: '{}'".format(post_data))
        elif post_data is not None and post_data != "":
            info("POST string to be sent: '{}'".format(post_data))

    # 如果不是有效 url, 就抛出异常
    if validate_url(url) is None:
        raise InvalidURLProvided

    info("gathering HTTP responses")

    if threaded:
        # 如果指定了 thread
        responses_list = DetectionQueue(
            url,
            payloads,
            proxy=proxy,
            agent=user_agent,
            verbose=verbose,
            provided_headers=provided_headers,
            traffic_file=traffic_file,
            throttle=throttle,
            timeout=request_timeout,
            request_type=request_type,
            post_data=post_data,
            threaded=threaded,
            placement=use_placement).threaded_get_response()
    else:
        # 这个 response 是形如多个 -> ('GET https://example.org', '200 OK', 'soup对象', "{'User_Agent': 'f**k'}")
        # 这样的集合
        responses_list = DetectionQueue(
            url,
            payloads,
            request_type=request_type,
            post_data=post_data,
            provided_headers=provided_headers,
            agent=user_agent,
            proxy=proxy,
            verbose=verbose,
            # save_fingerprint=fingerprint_waf,
            # --traffic FILENAME
            # traffic_file=traffic_file,
            throttle=throttle,
            timeout=request_timeout,
            placement=use_placement).get_response()

    # 指定了 --traffic, 保存进文件
    if traffic_file is not None:
        with open(traffic_file, "a+") as traffic:
            for i, item in enumerate(responses_list, start=1):
                param, status_code, content, headers = item
                traffic.write(
                    "HTTP Request #{}\n{}\nRequest Status Code: {}\n<!--\n{} HTTP/1.1\n{}\n-->{}\n\n\n"
                    .format(
                        i, "-" * 30, status_code, param, "\n".join([
                            "{}: {}".format(h, v) for h, v in headers.items()
                        ]), content))

    info("gathering normal response to compare against")

    # 上面是请求的带有 payload 的路径和 爆破 admin 路径的 url, 这里是请求原有的 url, 但是那url中的 * 怎么办?
    normal_response = get_page(url,
                               proxy=proxy,
                               user_agent=user_agent,
                               provided_headers=provided_headers,
                               throttle=throttle,
                               timeout=request_timeout,
                               request_method=request_type,
                               post_data=post_data)

    # --determine-webserver
    # 就是检查 response headers 中的 server, 例如 Apache2 什么的
    # 默认带上
    # if determine_server:
    found_webserver = None
    # 这个 response_list 是形如多个 -> ('GET https://example.org', '200 OK', 'soup对象', "{'User_Agent': 'f**k'}")
    # 这样的集合
    headers = {}
    for resp in responses_list:
        headers = resp[-1]

    for k in headers.keys():
        if k.lower() == "server":
            found_webserver = headers[k]
            break
    if found_webserver is None:
        warning("unable to determine web server")
    else:
        success("web server determined as: {}".format(found_webserver))

    # 加载 所有的 plugins, 然后返回所有的已导入的 plugin 的列表
    info("loading firewall detection scripts")
    loaded_plugins = ScriptQueue(PLUGINS_DIRECTORY,
                                 PLUGINS_IMPORT_TEMPLATE,
                                 verbose=verbose).load_scripts()
    success("loading firewall detection scripts success")

    info("running firewall detection checks")

    # plus one for get_page() call
    request_count = len(responses_list) + 1
    amount_of_products = 0
    detected_protections = set()
    # temp = []
    for item in responses_list:
        item = item if item is not None else normal_response
        _, status, html, headers = item

        for plugin in loaded_plugins:
            try:
                if plugin.detect(str(html), status=status,
                                 headers=headers) is True:
                    # 先丢着
                    # temp.append(plugin.__product__)
                    # plugin 的介绍不可能是 Unknown Firewall
                    # if plugin.__product__ == UNKNOWN_FIREWALL_NAME and len(temp) == 1 and status != 0:
                    #     warning("unknown firewall detected saving fingerprint to log file")
                    #     path = create_fingerprint(url, html, status, headers)
                    #     return request_firewall_issue_creation(path)
                    # else:
                    #     detected_protections.add(plugin.__product__)
                    detected_protections.add(plugin.__product__)
            except Exception:
                pass

    if len(detected_protections) > 0:
        if UNKNOWN_FIREWALL_NAME not in detected_protections:
            amount_of_products += 1

        if len(detected_protections) > 1:
            for i, _ in enumerate(list(detected_protections)):
                amount_of_products += 1

    if amount_of_products == 1:
        # 获取检测到的产品的 __product__ 一般只有一个
        detected_protections = list(detected_protections)[0]

        success(
            "detected website protection identified as '{}', searching for bypasses"
            .format(detected_protections))

        # 如果没有指定 --skip
        if not skip_bypass_check:
            # get_working_tampers() 返回一个 working_tampers 集合
            # working_tampers = set()
            # working_tampers.add((tamper.__type__, tamper.tamper(tamper.__example_payload__), tamper)), 是个元组
            found_working_tampers = get_working_tampers(
                url,
                normal_response,
                payloads,
                proxy=proxy,
                agent=user_agent,
                verbose=verbose,
                tamper_int=tamper_int,
                provided_headers=provided_headers,
                throttle=throttle,
                timeout=request_timeout)

            # 没加 --format 就只是美化输出
            if not formatted:
                # display_found_tampers 是美化输出的, 输出 found_working_tampers
                display_found_tampers(found_working_tampers)
            else:
                # dictify_output return json_retval 是由一个字典组成的
                # 这个字典包含 {
                #     "url": url,
                #     "identified firewall": detect_firewalls,
                #     "is protected": True,
                #     "apparent working tampers": "自己输入的 payload"
                # }
                dict_data_output = dictify_output(url, detected_protections,
                                                  found_working_tampers)

                # 写入文件
                # 注意, 这个 filename 可能是 None, 不一定会指定 CSV、JSON 或者 YAML
                if file_path:
                    written_file_path = write_to_file(
                        filename,
                        file_path,
                        dict_data_output,
                        write_csv=use_csv,
                        write_yaml=use_yaml,
                        write_json=use_json,
                        save_copy_to=save_file_copy_path)
                    if written_file_path is not None:
                        info("data has been written to file: '{}'".format(
                            written_file_path))
            """
            cached_urls table field ->
            id
            uri
            working_tampers DEFAULT 'N/A', "
            identified_protections DEFAULT 'N/A',"
            identified_webserver DEFAULT 'N/A'"
            """
            inserted_into_database_results = insert_url(
                # found_webserver 是检查是否 response 的 header 中有 server 字段
                # found_working_tampers 和 detected_protections 如果不止一个, 就用 , 拼接
                cursor,
                current_url_netloc,
                found_working_tampers,
                detected_protections,
                webserver=found_webserver)
        else:
            # 指定了 --skip, 就会跳过 tamper 这个字段的写入
            warning("skipping bypass checks")

            # --format
            if formatted:
                # 格式化输出的
                dict_data_output = dictify_output(url, detected_protections,
                                                  [])

                # 写入文件
                # 注意, 这个 filename 可能是 None, 不一定会指定 CSV、JSON 或者 YAML
                written_file_path = write_to_file(
                    filename,
                    file_path,
                    dict_data_output,
                    write_csv=use_csv,
                    write_yaml=use_yaml,
                    write_json=use_json,
                    save_copy_to=save_file_copy_path)

                # 也就是 如果指定了 json csv yaml 中的任何一个
                if written_file_path is not None:
                    info("data has been written to file: '{}'".format(
                        written_file_path))

            if isinstance(detected_protections, str):
                # 在 list 的基础上再加个 []
                detected_protections = [detected_protections]

            # 因为选择了 --skip 所以跳过 tamper 阶段
            inserted_into_database_results = insert_url(
                cursor,
                current_url_netloc, [],
                detected_protections,
                webserver=found_webserver)

    elif amount_of_products == 0:
        # 没找到
        warning("no protection identified on target, verifying")

        if verification_number is None:
            verification_number = 5

        verification_normal_response = get_page(
            url,
            proxy=proxy,
            user_agent=user_agent,
            provided_headers=provided_headers,
            throttle=throttle,
            timeout=request_timeout,
            request_method=request_type,
            post_data=post_data)

        # 随便从默认的 payload 文件中拿第四个
        payloaded_url = "{}{}".format(url, WAF_REQUEST_DETECTION_PAYLOADS[3])

        verification_payloaded_response = get_page(
            payloaded_url,
            proxy=proxy,
            user_agent=user_agent,
            provided_headers=provided_headers,
            throttle=throttle,
            timeout=request_timeout,
            request_method=request_type,
            post_data=post_data)

        # check_if_matched 返回 response 集合 或者返回 None, 当 normal url 和 加了 payload 的 url 的返回头一样时候, 肯定就
        # 是返回的 None
        results = check_if_matched(verification_normal_response,
                                   verification_payloaded_response,
                                   verified=verification_number)

        if results is not None:
            data_sep = colored("-" * 30, 'white')
            info(
                "target seems to be behind some kind of protection for the following reasons:"
            )

            print(data_sep)
            for i, content in enumerate(results, start=1):
                print("[{}] {}".format(i, content))
            print(data_sep)

            # 暂时屏蔽
            # 这一段是说明, 如果 waf 的指纹没检测出来, 但是进行比较之后 又发现了不同, 说明指纹库不够强大, 这时候会发送 issues 到
            # 作者的 github 上
            # _, status, html, headers = verification_payloaded_response
            # if status != 0:
            #     path = create_fingerprint(url, html, status, headers)
            #     request_firewall_issue_creation(path)
            # else:
            #     warning(
            #         "status code returned as `0` meaning that there is no content in the webpage, "
            #         "issue will not be created"
            #     )

            inserted_into_database_results = insert_url(
                current_url_netloc, [], [], cursor, webserver=found_webserver)
        else:
            # 说明没有发现不同
            success("no protection identified on target")

            if formatted:
                if not force_file_creation:
                    warning(
                        "no data will be written to files since no protection could be identified, "
                        "to force file creation pass the `--force-file` argument"
                    )
                else:
                    # if the argument `--force-file` is passed we will create the file
                    # anyways, this should give users who are relying on the JSON files
                    # for thirdparty information a chance to get the data out of the directory
                    # then they can easily parse it without problems.
                    warning(
                        "forcing file creation without successful identification"
                    )
                    dict_data_output = dictify_output(url, None, [])
                    written_file_path = write_to_file(
                        filename,
                        file_path,
                        dict_data_output,
                        write_csv=use_csv,
                        write_yaml=use_yaml,
                        write_json=use_json,
                        save_copy_to=save_file_copy_path)

                    if written_file_path is not None:
                        info("data has been written to file: '{}'".format(
                            written_file_path))

            inserted_into_database_results = insert_url(
                current_url_netloc, [], [], cursor, webserver=found_webserver)

    else:
        # 不止一个 waf protections
        success("multiple protections identified on target{}:".format(
            " (unknown firewall will not be displayed)"
            if UNKNOWN_FIREWALL_NAME in detected_protections else ""))

        detected_protections = [item for item in list(detected_protections)]

        for i, protection in enumerate(detected_protections, start=1):
            if not protection == UNKNOWN_FIREWALL_NAME:
                success("#{} '{}'".format(i, protection))

        if not skip_bypass_check:
            info("searching for bypasses")

            found_working_tampers = get_working_tampers(
                url,
                normal_response,
                payloads,
                proxy=proxy,
                agent=user_agent,
                verbose=verbose,
                tamper_int=tamper_int,
                throttle=throttle,
                timeout=request_timeout,
                provided_headers=provided_headers)

            if not formatted:
                # 将 tampers 输出的更加好看
                produce_results(found_working_tampers)
            else:
                # dictify_ouput 的返回 -> json_retval 是由一个字典组成的
                dict_data_output = dictify_output(url, detected_protections,
                                                  found_working_tampers)

                written_file_path = write_to_file(
                    filename,
                    file_path,
                    dict_data_output,
                    write_csv=use_csv,
                    write_yaml=use_yaml,
                    write_json=use_json,
                    save_copy_to=save_file_copy_path)
                if written_file_path is not None:
                    info("data has been written to file: '{}'".format(
                        written_file_path))
            # 写入数据库
            inserted_into_database_results = insert_url(
                current_url_netloc,
                found_working_tampers,
                detected_protections,
                cursor,
                webserver=found_webserver)
        else:
            # 跳过 tampers 的检查
            warning("skipping bypass tests")
            if formatted:
                dict_data_output = dictify_output(url, detected_protections,
                                                  [])
                written_file_path = write_to_file(
                    filename,
                    file_path,
                    dict_data_output,
                    write_csv=use_csv,
                    write_yaml=use_yaml,
                    write_json=use_json,
                    save_copy_to=save_file_copy_path)
                if written_file_path is not None:
                    info("data has been written to file: '{}'".format(
                        written_file_path))
            inserted_into_database_results = insert_url(
                current_url_netloc, [],
                detected_protections,
                cursor,
                webserver=found_webserver)

    if inserted_into_database_results:
        info("URL has been cached for future use")

    # 返回请求的总数量
    return request_count
コード例 #11
0
    def get_response(self):
        for i, waf_vector in enumerate(self.payloads):
            # 如果给出的 url 没有 * 可 payload 点, 就直接追加到 url 的后面
            if not self.placement:
                primary_url = self.url + "{}".format(waf_vector)
            else:
                # 那就是在一个 url 中只会有一个 *, 只有一个可 payload 的点, 会把 * 替换成 payload
                # 这里需要加强一下, 如果有 多个 *
                url = self.url.split("*")
                primary_url = "{}{}{}".format(url[0], waf_vector,
                                              url[len(url) - 1])
            # secondary_url 是爆破 admin 路径的 但是只选择了一个, 就没什么卵用
            secondary_url = strip_url(self.url)
            secondary_url = "{}//{}".format(secondary_url[0], secondary_url[1])
            secondary_url = "{}/{}".format(secondary_url,
                                           random.choice(RAND_HOMEPAGES))

            # 如果指定了 --verbose, 就打印出传入的 payload 参数
            if self.verbose:
                payload("using payload: {}".format(waf_vector.strip()))

            try:
                if self.verbose:
                    debug("trying: '{}'".format(primary_url))
                response_retval = get_page(
                    # get_page return 形如 ->
                    # ('GET https://example.org', '200 OK', 'soup对象', "{'User_Agent': 'f**k'}")
                    primary_url,
                    user_agent=self.agent,
                    proxy=self.proxy,
                    provided_headers=self.provided_headers,
                    throttle=self.throttle,
                    timeout=self.timeout,
                    request_method=self.request_type,
                    post_data=self.post_data)
                self.response_retval_list.append(response_retval)

                _, response_status_code, _, _ = response_retval
                if self.verbose:
                    info("response status code: {}".format(
                        response_status_code))

                if self.verbose:
                    debug("trying: {}".format(secondary_url))

                # 请求给出 url 的主目录, 用了admin爆破字典, 因为是从 RAND_HOMEPAGES 中随机抽取了一个, 所以不一定会成功返回 200
                # response_retval 是一个列表, 列表的元素是元组
                response_retval = get_page(
                    # return -> ('GET https://example.org', '200 OK', 'soup对象', "{'User_Agent': 'f**k'}")
                    secondary_url,
                    user_agent=self.agent,
                    proxy=self.proxy,
                    provided_headers=self.provided_headers,
                    # throttle -> Provide a sleep time per request (*default=0), 发送各种请求的时间间隔
                    throttle=self.throttle,
                    timeout=self.timeout,
                    request_method=self.request_type,
                    post_data=self.post_data)
                self.response_retval_list.append(response_retval)

                _, response_status_code, _, _ = response_retval
                if self.verbose:
                    info('response status code: {}'.format(
                        response_status_code))

            except Exception as e:
                # 事实上应该返回各种请求码, 例如拒绝, 找不到页面, 而不应该出错
                if "ECONNRESET" in str(e):
                    warning(
                        "possible network level firewall detected (hardware), received an aborted connection"
                    )
                    self.response_retval_list.append(None)
                else:
                    error(
                        "failed to obtain target meta-data with payload {}, error: '{}'"
                        .format(waf_vector.strip(), str(e)))
                    self.response_retval_list.append(None)

            # 暂时关闭
            # 如果指定了 --fingerprint, 为什么只保存一个?
            # 这里只会保存第一个 payload 请求和爆破 index 页面请求
            # if self.save_fingerprint:
            #     create_fingerprint(
            #         self.url,
            #         # get_page return 形如 ->
            #         # ('GET https://example.org', '200 OK', 'soup对象', "{'User_Agent': 'f**k'}")
            #         # Soup object
            #         response_retval[0][2],
            #         # response code
            #         response_retval[0][1],
            #         # User Agent
            #         response_retval[0][3],
            #         # 形如 -> GET https://www.example.com
            #         req_data=response_retval[0][0],
            #         speak=True
            #     )

        # get_page return 形如多个 -> ('GET https://example.org', '200 OK', 'soup对象', "{'User_Agent': 'f**k'}")
        # 这样的集合
        # 有几个 payload 就请求了几次
        return self.response_retval_list
コード例 #12
0
def configure_request_headers(user_agent=None,
                              proxy=None,
                              random_agent=False,
                              tor=None,
                              tor_port=9050):
    """
    configure the HTTP request headers with a user defined
    proxy, Tor, or a random User-Agent from the user-agent
    file

    :param user_agent: User Agent from custom request
    :param proxy: Whether use proxy, default None
    :param random_agent: Whether use random User Agent, strong recommend
    :param tor: Whether use tor proxy, default None
    :param tor_port: setting custom tor port
    :return: proxy, user_agent
    """

    supported_proxies = ("socks5", "socks4", "http", "https")

    invalid_msg = "invalid switches detected, switch {} cannot be used in conjunction with switch {}"
    proxy_msg = "running behind proxy '{}'"

    # --proxy 和 --tor 不能同时被设置
    if proxy is not None and tor:
        error(invalid_msg.format("--tor", "--proxy"))
        exit(1)

    # --ra 和 --pa 不能在一起
    if user_agent is not None and random_agent:
        error(invalid_msg.format("--ra", "--pa"))
        exit(1)

    # tor 默认是 socket5://127.0.0.1:9050 端口
    if tor:
        proxy = "socks5://127.0.0.1:{}".format(tor_port)

    if user_agent is None:
        user_agent = get_random_agent()

    if random_agent:
        # 从 whatwaf/content/data/user_agent.txt 中随便挑一个 user_agent 请求头
        user_agent = get_random_agent()

    # proxy 不为空
    if proxy is not None:
        if any(item in proxy for item in supported_proxies):
            info(proxy_msg.format(proxy))
        else:
            error(
                "you did not provide a supported proxy protocol, "
                "supported protocols are '{}'. check your proxy and try again".
                format(", ".join([p for p in supported_proxies])))
            exit(1)
    else:
        # proxy 为 Null
        warning(
            "it is highly advised to use a proxy when using WhatWaf. do so by passing the proxy flag "
            "(eg `--proxy http://127.0.0.1:9050`) or by passing the Tor flag (eg `--tor`)"
        )

    #  如果 user_agent 请求头不为空
    if user_agent is not None:
        info("using User-Agent '{}'".format(user_agent))

    return proxy, user_agent
コード例 #13
0
def main():
    spacer = colored("-" * 30, 'white')

    print BANNER
    opts = CmdLineParser().cmd_parser()

    if not len(sys.argv[1:]):
        error("You failed to provide an option, redirecting to help menu")
        # 停顿2秒之后再显示 help banner
        time.sleep(2)
        print
        CmdLineParser().cmd_parser(get_help=True)
    else:
        # if you feel that you have to many folders or data in the whatwaf home folder
        # we'll give you an option to clean it free of charge
        if opts.cleanHomeFolder:
            # 对文件的权限或者 移动 拷贝什么的
            import shutil
            try:
                warning(
                    "cleaning the home folder: {home}, if you have installed with setup.sh, "
                    "this will erase the executable script along with everything inside "
                    "of the {home} directory (fingerprints, scripts, copies of whatwaf, etc) "
                    "if you are sure you want to do this press ENTER now. If you changed "
                    "your mind press CNTRL-C now".format(home=HOME))
                # you have three seconds to change your mind
                raw_input("")
                info("attempting to clean up home folder")
                # 这个 HOME 是程序根目录下的 .whatwaf 文件夹, 例如 /root/.whatwaf
                # 删了这个 .whatwaf 隐藏目录
                shutil.rmtree(HOME)
                info("home folder removed")
            except KeyboardInterrupt:
                fatal("cleaning aborted")
            except OSError:
                fatal("no home folder detected, already cleaned?")
            exit(0)

        # 初始化 sqlite3 数据库, 创建~/.whatwaf/whatwaf.sqlite
        # 如果没有 cached_payloads 或者 cached_urls 表,就创建,否则跳过, 然后函数 return 一个 cursor 指针操作数据库的
        cursor = initialize()

        # 如果指定了 --export FILE-TYPE 选项
        # 这里只导出 cached_payloads 表中的数据
        if opts.exportEncodedToFile is not None:
            # fetch_data(cursor, table_name=None)
            # return 一个列表, 包含一行一行的数据, 然后列表里面镶嵌着 每一列的元组
            payloads = fetch_data(cursor, table_name='cached_payloads')
            if len(payloads) != 0:
                # export_payloads() 把 payload 列的数据写入文件,然后返回这个文件的 filename
                exported_payloads_path = export_payloads(
                    payloads, opts.exportEncodedToFile)

                success(
                    "payloads exported to: {}".format(exported_payloads_path))
            else:
                # 数据库里面没有数据
                warning(
                    "there appears to be no payloads stored in the database, to create payloads use the "
                    "following options:")
                proc = subprocess.check_output(
                    ["python", "whatwaf.py", "--help"])
                parsed_help = CmdLineParser.parse_help_menu(
                    str(proc), "encoding options:", "output options:")
                print(parsed_help)
                exit(1)

        # 如果指定了 -vC --view-cache, 这个选项展示 cached_payload 和 cached_url 两张表的内容
        if opts.viewAllCache:
            cached_payloads = fetch_data(cursor, table_name='cached_payloads')
            cached_urls = fetch_data(cursor, table_name='cached_urls')

            # 其实就是将 cached_payload 和 cached_url 两个表的数据 全部展示, 把他们展示的漂亮点而已
            display_cached(cached_urls, cached_payloads)
            exit(0)

        # 指定了 -pC --payload-cache, 这个选项仅仅只展示 cached_payload 表的内容
        if opts.viewCachedPayloads:
            payloads = fetch_data(cursor, table_name='cached_payloads')
            if len(payloads) != 0:
                display_cached(None, payloads)
            else:
                warning(
                    "there appears to be no payloads stored in the database, to create payloads use the"
                    " following options:")
                proc = subprocess.check_output(
                    ["python", "whatwaf.py", "--help"])
                parsed_help = CmdLineParser.parse_help_menu(
                    proc, "encoding options:", "output options:")
                print(parsed_help)
            exit(0)

        # 指定了 -uC --view-url-cache, 这个选项仅仅只展示 cached_url 表的内容
        if opts.viewUrlCache:
            cached_urls = fetch_data(cursor, table_name='cached_urls')
            display_cached(cached_urls, None)
            exit(0)

        # 指定了 -e --encode
        # -e PAYLOAD [TAMPER-SCRIPT-LOAD-PATH ...], --encode PAYLOAD [TAMPER-SCRIPT-LOAD-PATH ...]
        # 这个地方 没有说 要如何指定 payload 和 payload 的路径, 先丢着
        if opts.encodePayload is not None:
            payload = opts.encodePayload[0]
            # opt.encodePayload[1:] -> payload 的加载路径, 例如 tampers.lowlevelunicodecharencode
            load_path = opts.encodePayload[1:]
            # 有可能加载好几个 payload 路径
            payload_list = []
            for load in load_path:
                try:
                    # encode(payload, script) 参数, script 应该就是 payload 位置参数
                    # eccode() 函数返回的是 根据 payload 产生的 绕过 字符串
                    payload = encode(payload, load)
                    payload_list.append(payload)
                except (AttributeError, ImportError):
                    warning(
                        "invalid load path given: '{}', skipping it and continuing"
                        .format(load))

            success("encoded successfully:")
            print spacer
            result = False
            for i, payload in enumerate(payload_list):
                # 上面得到 encoded successfully 之后,就把 payload 写入 database
                result = insert_payload(payload, cursor)
                print "{}{}".format(colored("#" + str(i) + " ", 'white'),
                                    payload)
            print spacer

            if result:
                info("payload has been cached for future use")
                exit(0)
            else:
                fatal("payload throwing error, see below")
                print colored(result, 'red')
                exit(1)

        # 指定 -el --encode-list 指定 payload 文件, payload 要用一行一行的隔开
        # -el PATH TAMPER-SCRIPT-LOAD-PATH, --encode-list PATH TAMPER-SCRIPT-LOAD-PATH
        if opts.encodePayloadList is not None:
            try:
                file_path, load_path = opts.encodePayloadList
                info(
                    "encoding payloads from given file '{}' using given tamper '{}'"
                    .format(colored(file_path, 'white'),
                            colored(load_path, 'white')))

                with open(file_path) as payloads:
                    # encode(payload, tamper_path)
                    encoded = [
                        encode(p.strip(), load_path)
                        for p in payloads.readlines()
                    ]

                    # 如果指定了 --save FILENAME
                    if opts.saveEncodedPayloads is not None:
                        with open(opts.saveEncodedPayloads, "a+") as save:
                            for item in encoded:
                                save.write(item + "\n")
                        success(
                            "saved encoded payloads to file '{}' successfully".
                            format(opts.saveEncodedPayloads))
                    else:
                        success("payloads encoded successfully:")
                        print(spacer)
                        for i, item in enumerate(encoded, start=1):
                            # 写入数据库
                            insert_payload(item, cursor)
                            print("{} {}".format(
                                colored("#" + str(i), 'white'), item))
                        print(spacer)
                info("payloads have been cached for future use")
            except IOError:
                fatal(
                    "provided file '{}' appears to not exist, check the path and try again"
                    .format(file_path))
            except (AttributeError, ImportError):
                fatal(
                    "invalid load path given, check the load path and try again"
                )
            exit(0)

        # 暂时先屏蔽
        # 指定了 --update
        # if opts.updateWhatWaf:
        #     info("update in progress")
        #     cmd = shlex.split("git pull origin master")
        #     subprocess.call(cmd)
        #     exit(0)

        # 指定了 --tampers
        # 这个 options 的命令是 列出所有的 tamper 可用列表
        if opts.listEncodingTechniques:
            info("gathering available tamper script load paths")
            # 返回的是所有的 tamper 的名字的集合 -> set()
            # is_tampers=True 就是返回 tampers 目录下的所有 tamper 名字集合
            # is_wafs=True 就是返回 plugins 目录下的所有 plugin 名字的集合

            print spacer
            tamper_list = get_encoding_list(TAMPERS_DIRECTORY,
                                            is_tampers=True,
                                            is_wafs=False)
            for tamper in sorted(tamper_list):
                print(tamper)
            print spacer
            exit(0)

        # 指定了 --wafs
        # 列出所有的 plugins 目录下的所有的 列表
        if opts.viewPossibleWafs:
            import importlib

            info("gathering a list of possible detectable wafs")

            print spacer
            wafs_list = get_encoding_list(PLUGINS_DIRECTORY,
                                          is_tampers=False,
                                          is_wafs=True)
            for i, waf in enumerate(wafs_list, start=1):
                try:
                    imported = importlib.import_module(waf)
                    print("{}".format(imported.__product__))
                except ImportError:
                    pass
            print spacer
            exit(0)

        # 在运行大型扫面之前先检查 更新, 先暂时关闭
        # gotta find a better way to check for updates so im a hotfix it
        # info("checking for updates")
        # check_version()

        # -Y --yaml sendToYAML
        # -C --cvs sendToCSV
        # -J --json sendToJSON
        format_opts = [opts.sendToYAML, opts.sendToCSV, opts.sendToJSON]
        # 指定了 -F --format
        if opts.formatOutput:
            amount_used = 0
            for item in format_opts:
                if item is True:
                    amount_used += 1
            if amount_used > 1:
                warning(
                    "multiple file formats have been detected, there is a high probability that this will cause "
                    "issues while saving file information. please use only one format at a time"
                )
            elif amount_used == 0:
                warning(
                    "output will not be saved to a file as no file format was provided. to save output to file "
                    "pass one of the file format flags (eg `-J` for JSON format)"
                )
        elif any(format_opts) and not opts.formatOutput:
            warning(
                "you've chosen to send the output to a file, but have not formatted the output, no file will be saved "
                "do so by passing the format flag (eg `-F -J` for JSON format)"
            )

        # 指定了 --skip skipBypassChecks 和 --tamper-int amountOfTampersToDisplay
        if opts.skipBypassChecks and opts.amountOfTampersToDisplay is not None:
            warning(
                "you've chosen to skip bypass checks and chosen an amount of tamper to display, tampers will be skipped"
            )

        # there is an extra dependency that you need in order
        # for requests to run behind socks proxies, we'll just
        # do a little check to make sure you have it installed
        # --tor     opt.runBehindTor
        # --proxy   opt.runBehindProxy
        # --tor --proxy 不为空 and socks in opt.runBehindProxy
        # 如果指定了 --tor --proxy --proxy 必须为 sock 就导入 socks 模块
        if opts.runBehindTor or opts.runBehindProxy is not None and "socks" in opts.runBehindProxy:
            try:
                import socks
            except ImportError:
                # if you don't we will go ahead and exit the system with an error message
                error(
                    "to run behind socks proxies (like Tor) you need to install pysocks `pip install pysocks`, "
                    "otherwise use a different proxy protocol")
                sys.exit(1)

        # configure_request_headers(user_agent=None, proxy=None, random_agent=False, tor=None, tor_port=9050)
        # return proxy, user_agent
        # configure_request_headers 判断如果没有指定 --proxy 会出现警告信息, 但是不会出错
        # 配置请求头和User Agent, 检查是否符合规范的, 符合的会返回 proxy 和 user_agent
        proxy, user_agent = configure_request_headers(
            # opts.usePersonalAgent -pa
            user_agent=opts.usePersonalAgent,
            # opts.runBehindProxy --proxy
            proxy=opts.runBehindProxy,
            # opts.useRandomAgent -ra 从 whatwaf/content/data/user_agents.txt 中随机挑选一个作为 USER_AGENT 客户请求头
            random_agent=opts.useRandomAgent,
            # opt.runBehindTor --tor, tor 默认是 socket5://127.0.0.1:9050,
            # 所以如果要指定使用 tor 代理, 直接 --tor 用默认的即可
            tor=opts.runBehindTor,
            # opt.configTorPort -tP --tor-port default=9050
            tor_port=opts.configTorPort)

        # 这个要先丢着
        # 如果指定了 --tor 就根据 https://check.torproject.org response返回的信息确定是否启用了 Tor
        # if opts.runBehindTor:
        #     import re
        #
        #     info("checking Tor connection")
        #     check_url = "https://check.torproject.org/"
        #     check_regex = re.compile("This browser is configured to use Tor.", re.I)
        #
        #     # 这里没判断,如果没有指定 --proxy 应该是要报错的
        #     # get_page() 是使用request 模块请求, BeautifulSoup 解析的
        #     _, _, content, _ = get_page(check_url, proxy=proxy, user_agent=user_agent)
        #
        #     if check_regex.search(str(content)) is not None:
        #         success("it appears that Tor is working properly")
        #     else:
        #         warning("it appears Tor is not configured properly")

        # 指定 -p --payload
        # 直说要使用 payload, 却没有说明如何使用 payload 用哪些payload
        if opts.providedPayloads is not None:
            # 如果指定了多个 payload 就丢入 payload_list 列表
            # 支持输入多个 payload 用逗号隔开存入 payload_list
            payload_list = [
                p.strip() if p[0] == " " else p
                for p in str(opts.providedPayloads).split(",")
            ]
            info("using provided payloads")
        elif opts.payloadList is not None:
            # 如果指定了 payload list 文件
            # --pl PAYLOAD-LIST-PATH
            try:
                open(opts.payloadList).close()
            except:
                fatal(
                    "provided file '{}' does not exists, check the path and try again"
                    .format(opts.payloadList))
                exit(1)
            payload_list = [
                p.strip("\n") for p in open(opts.payloadList).readlines()
            ]
            info("using provided payload file '{}'".format(opts.payloadList))
        else:
            # 如果都没有指定, 就使用默认的 whatwaf/content/data/default_payloads.lst 文件
            payload_list = WAF_REQUEST_DETECTION_PAYLOADS
            info("using default payloads")

        # 如果指定了 verbose
        if opts.runInVerbose:
            for payload in payload_list:
                info("using payload: {}".format(payload))

        # 指定了 --fingerprint 会保存指纹
        if opts.saveFingerprints:
            warning(
                "fingerprinting is enabled, all fingerprints (WAF related or not) will be saved for further analysis "
                "if the fingerprint already exists it will be skipped")

        # 指定了 --traffic FILENAME
        if opts.trafficFile is not None:
            info("saving HTTP traffic to '{}'".format(opts.trafficFile))
        # 指定了 --throttle INT, default 0
        if opts.sleepTimeThrottle != 0:
            info("sleep throttle has been set to {}s".format(
                opts.sleepTimeThrottle))

        try:
            if opts.postRequest:
                request_type = "POST"
            else:
                request_type = "GET"

            request_count = 0

            # -u --url
            if opts.runSingleWebsite:
                # opt.forceSSL --force-ssl
                # normalization_url(url, ssl=False) -> 是实现自动添加 http 或者 https 头的
                url_to_use = normalization_url(opts.runSingleWebsite,
                                               ssl=opts.forceSSL)

                # 在指定了 -u 的前提下, 指定 -c --url-cache default=False, 默认检查
                # if opts.checkCachedUrls:

                # check_url_against_cached(url, cursor) 如果 cached_urls 表里面有 给的 url 参数的话,
                # 就返回这个 url 在数据库中的数据行, 如果没有就是返回 Null
                checked_results = check_url_cached(url_to_use, cursor)

                # 如果数据库里面有
                if checked_results is not None:
                    print(
                        RESULTS_TEMPLATE.format(
                            spacer,
                            # uri
                            str(checked_results[1]),
                            # Identified Protections
                            str(checked_results[2]),
                            # Identified Tampers
                            str(checked_results[3]),
                            # Identified Web Server
                            str(checked_results[4]),
                            spacer))
                    exit(0)

                # 在指定了 -u 的前提下, 指定 -T --test default = True, 默认 去测试, 不用特意指定
                # if opts.testTargetConnection:
                info("testing connection to target URL before starting attack")
                # opt.extraHeaders -H --headers 注意这个headers要字典类型
                # 给参数的时候要形如这样的:
                # --headers {"Content-Length": "23", "User-Agent": "python-requests/2.10.0"}
                results = test_target_connection(
                    url=url_to_use,
                    post_data=opts.postRequestData,
                    proxy=proxy,
                    user_agent=user_agent,
                    headers=opts.extraHeaders)
                if results == "nogo":
                    fatal(
                        "connection to target URL failed multiple times, check connection and try again"
                    )
                    exit(1)
                elif results == "acceptable":
                    warning(
                        "there appears to be some latency on the connection, this may interfere with results"
                    )
                else:
                    success("connection succeeded, continuing")

                info("running single web application '{}'".format(url_to_use))

                # 指定了 -u 然后发送请求, detection_main 是主请求函数
                # detection_main(url, payload_list, cursor, **kwargs)
                # detection_main 返回 response_count 总请求的数量
                amount_of_requests = detection_main(
                    url_to_use,
                    payload_list,
                    cursor,
                    request_type=request_type,
                    post_data=opts.postRequestData,
                    user_agent=user_agent,
                    # --headers 后面要跟字典参数
                    provided_headers=opts.extraHeaders,
                    proxy=proxy,
                    verbose=opts.runInVerbose,
                    skip_bypass_check=opts.skipBypassChecks,
                    # verifyNumber --verify-num INT
                    verification_number=opts.verifyNumber,
                    # --fingerprint
                    # fingerprint_waf=opts.saveFingerprints,
                    formatted=opts.formatOutput,
                    # --tamper-int INT
                    tamper_int=opts.amountOfTampersToDisplay,
                    use_json=opts.sendToJSON,
                    use_yaml=opts.sendToYAML,
                    use_csv=opts.sendToCSV,
                    # --traffic FILENAME
                    traffic_file=opts.trafficFile,
                    throttle=opts.sleepTimeThrottle,
                    request_timeout=opts.requestTimeout,
                    # -W --determine-webserver default=False
                    # 这个应该默认开启
                    # determine_server=opts.determineWebServer,
                    # 线程数
                    threaded=opts.threaded,
                    #  --force-file default=False
                    force_file_creation=opts.forceFileCreation,
                    # -o --output
                    save_file_copy_path=opts.outputDirectory)

                request_count = amount_of_requests if amount_of_requests is not None else request_count

            elif any(
                    o is not None
                    for o in [opts.runMultipleWebsites, opts.burpRequestFile]):
                # 如果不指定 -u 而是指定了 -l --list 或者 --burp FILE-PATH
                info("reading from '{}'".format(opts.runMultipleWebsites
                                                or opts.burpRequestFile))

                try:
                    open(opts.runMultipleWebsites or opts.burpRequestFile)
                except IOError:
                    fatal("file: '{}' did not open, does it exist?".format(
                        opts.runMultipleWebsites))
                    exit(-1)

                if opts.runMultipleWebsites is not None:
                    # 需要检测的 url 列表
                    site_runners = []

                    with open(opts.runMultipleWebsites) as urls:
                        for url in urls:
                            possible_url = normalization_url(url.strip(),
                                                             ssl=opts.forceSSL)

                            if opts.checkCachedUrls:
                                url_is_cached = check_url_cached(
                                    possible_url, cursor)

                                if url_is_cached is not None:
                                    # 数据库里面有
                                    print(
                                        RESULTS_TEMPLATE.format(
                                            "-" * 20, str(url_is_cached[1]),
                                            str(url_is_cached[2]),
                                            str(url_is_cached[3]),
                                            str(url_is_cached[4]), "-" * 20))

                                else:
                                    site_runners.append(possible_url)
                            else:
                                site_runners.append(possible_url)
                elif opts.burpRequestFile is not None:
                    # parse_burp_request return: retval -> 包含 url 的列表
                    site_runners = parse_burp_request(opts.burpRequestFile)
                else:
                    site_runners = []

                if len(site_runners) == 0:
                    fatal("no targets parsed from file, exiting")
                    exit(1)
                else:
                    info("parsed a total of {} target(s) from file".format(
                        len(site_runners)))

                for i, url in enumerate(site_runners, start=1):
                    if opts.testTargetConnection:
                        info(
                            "testing connection to target URL before starting attack"
                        )
                        results = test_target_connection(
                            url,
                            proxy=proxy,
                            user_agent=user_agent,
                            headers=opts.extraHeaders)
                        if results == "nogo":
                            fatal(
                                "connection to target URL failed multiple times, check connection and try again"
                            )
                            exit(1)
                        elif results == "acceptable":
                            warning(
                                "there appears to be some latency on the connection, this may interfere with results"
                            )
                        else:
                            success("connection succeeded, continuing")

                    info("currently running on site #{} ('{}')".format(i, url))
                    requests = detection_main(
                        url,
                        payload_list,
                        cursor,
                        user_agent=user_agent,
                        proxy=proxy,
                        verbose=opts.runInVerbose,
                        skip_bypass_check=opts.skipBypassChecks,
                        verification_number=opts.verifyNumber,
                        formatted=opts.formatOutput,
                        tamper_int=opts.amountOfTampersToDisplay,
                        use_json=opts.sendToJSON,
                        use_yaml=opts.sendToYAML,
                        use_csv=opts.sendToCSV,
                        # fingerprint_waf=opts.saveFingerprints,
                        provided_headers=opts.extraHeaders,
                        traffic_file=opts.trafficFile,
                        throttle=opts.sleepTimeThrottle,
                        request_timeout=opts.requestTimeout,
                        post_data=opts.postRequestData,
                        request_type=request_type,
                        # check_server=opts.determineWebServer,
                        threaded=opts.threaded,
                        force_file_creation=opts.forceFileCreation,
                        save_file_copy_path=opts.outputDirectory)
                    request_count = request_count + requests if requests is not None else request_count
                    print("\n\b")
                    time.sleep(0.5)

            if request_count != 0:
                info("total requests sent: {}".format(request_count))
            else:
                warning(
                    "request counter failed to count correctly, deactivating")

        except KeyboardInterrupt:
            fatal("user aborted scanning")
        except InvalidURLProvided:
            fatal(
                "the provided URL is unable to be validated, check the URL and try again (you may need to unquote the "
                "HTML entities)")
        except Exception as e:
            # traceback 是跟踪堆栈的
            import traceback

            sep = colored("-" * 30, 'white')

            fatal(
                "WhatWaf has caught an unhandled exception with the error message: '{}'."
                .format(str(e)))

            exception_data = "Traceback (most recent call):\n{}{}".format(
                "".join(traceback.format_tb(sys.exc_info()[2])), str(e))

            error("\n{}\n{}\n{}".format(sep, exception_data, sep))

            request_issue_creation(exception_data)
コード例 #14
0
def write_to_file(filename, path, data, **kwargs):
    """
    write the data to a file

    :param filename:
    :param path:
    :param data:
    :param kwargs:
    :return:
    """

    write_yaml = kwargs.get("write_yaml", False)
    write_json = kwargs.get("write_json", False)
    write_csv = kwargs.get("write_csv", False)
    save_copy = kwargs.get("save_copy_to", None)

    full_path = "{}/{}".format(path, filename)

    if not os.path.exists(path):
        os.makedirs(path)
    if write_json and not write_yaml and not write_csv:
        with open(full_path, "a+") as _json:
            _json_data = json.loads(data)
            json.dump(_json_data, _json, sort_keys=True, indent=4)
    elif write_yaml and not write_json and not write_csv:
        try:
            # there is an extra dependency that needs to be installed for you to save to YAML
            # we'll check if you have it or not
            import yaml

            with open(full_path, "a+") as _yaml:
                _yaml_data = yaml.load(data)
                yaml.dump(_yaml_data, _yaml, default_flow_style=False)
        except ImportError:
            # if you don't we'll just skip the saving and warn you
            warning(
                "you do not have the needed dependency to save YAML data, to install the dependency run "
                "`pip install pyyaml`, skipping file writing")
            return None
    elif write_csv and not write_json and not write_yaml:
        import csv

        _json_data = json.loads(data)
        try:
            csv_data = [
                ["url", "is_protected", "protection", "working_tampers"],
                [
                    _json_data["url"], _json_data["is protected"],
                    _json_data["identified firewall"] if
                    _json_data["identified firewall"] is not None else "None",
                    _json_data["apparent working tampers"]
                    if _json_data["apparent working tampers"] is not None else
                    "None"
                ]
            ]
        except KeyError:
            pass
        with open(full_path, "a+") as _csv:
            writer = csv.writer(_csv)
            writer.writerows(csv_data)
    if save_copy is not None:
        import shutil
        try:
            shutil.copy(full_path, save_copy)
            info("copy of file saved to {}".format(save_copy))
        except Exception:
            error("failed to save copy of file, do you have permissions?")

    return full_path