예제 #1
0
def _check_password_reset(session: Session, element_name: Optional[str] = None):
    user = session.args.user
    if user is None:
        user = utils.prompt("What is a valid user? ")

    try:
        with Spinner():
            res = password_reset.check_resp_user_enum(session, user, element_name)

        if res:
            reporter.display_results(res, "\t")
    except WebDriverException as e:
        output.error("Selenium error encountered: " + e.msg)
    except PasswordResetElementNotFound as e:
        if element_name is not None:
            # we failed to find the element, and we had one specified - this isn't going to work
            output.error(
                "Unable to find a matching element to perform the User Enumeration via Password Reset: "
                + str(e)
            )
        else:
            # we failed, because we don't have the element - so we prompt for it.
            print(
                "Unable to find a known element to enter the user name. Please identify the proper element."
            )
            print(
                "If this element seems to be common, please request that it be added: https://github.com/adamcaudill/yawast/issues"
            )
            name = utils.prompt("What is the user/email entry element name? ")

            _check_password_reset(session, name)
    except Exception as e:
        output.error(
            "Failed to execute Password Reset Page User Enumeration: " + str(e)
        )
예제 #2
0
파일: main.py 프로젝트: rurbin3/yawast
def _shutdown():
    global _start_time, _monitor, _has_shutdown

    if _has_shutdown:
        return

    _has_shutdown = True
    output.debug("Shutting down...")

    elapsed = datetime.now() - _start_time
    mem_res = "{0:cM}".format(Size(_monitor.peak_mem_res))
    reporter.register_info("peak_memory", _monitor.peak_mem_res)

    output.empty()

    if _monitor.peak_mem_res > 0:
        output.norm(
            f"Completed (Elapsed: {str(elapsed)} - Peak Memory: {mem_res})")
    else:
        # if we don't have memory info - likely not running in a terminal, don't print junk
        output.norm(f"Completed (Elapsed: {str(elapsed)})")

    if reporter.get_output_file() != "":
        with Spinner() as spinner:
            reporter.save_output(spinner)
예제 #3
0
파일: network.py 프로젝트: sgnls/yawast
def _check_open_ports(domain: str, url: str, file: Optional[str] = None):
    try:
        output.empty()
        output.norm("Open Ports:")

        ips = basic.get_ips(domain)

        for ip in ips:
            with Spinner():
                res = port_scan.check_open_ports(url, ip, file)

            if len(res) > 0:
                reporter.display_results(res, "\t")
    except Exception as error:
        output.error(f"Error checking for open ports: {str(error)}")
예제 #4
0
def _shutdown():
    global _start_time, _monitor, _has_shutdown

    if _has_shutdown:
        return

    _has_shutdown = True
    output.debug("Shutting down...")

    elapsed = datetime.now() - _start_time
    mem_res = "{0:cM}".format(Size(_monitor.peak_mem_res))

    output.empty()

    output.norm(
        f"Completed (Elapsed: {str(elapsed)} - Peak Memory: {mem_res})")

    if reporter.get_output_file() != "":
        with Spinner():
            reporter.save_output()
예제 #5
0
def scan(session: Session):
    reporter.register_data("url", session.url)
    reporter.register_data("domain", session.domain)

    output.empty()
    output.norm("HEAD:")
    head = network.http_head(session.url)

    raw = network.http_build_raw_response(head)
    for line in raw.splitlines():
        output.norm(f"\t{line}")

    output.empty()

    res = http_basic.get_header_issues(head, raw, session.url)
    if res:
        output.norm("Header Issues:")

        reporter.display_results(res, "\t")
        output.empty()

    res = http_basic.get_cookie_issues(head, session.url)
    if res:
        output.norm("Cookie Issues:")

        reporter.display_results(res, "\t")
        output.empty()

    # check for WAF signatures
    res = waf.get_waf(head.headers, raw, session.url)
    if res:
        output.norm("WAF Detection:")

        reporter.display_results(res, "\t")
        output.empty()

    output.norm("Performing vulnerability scan (this will take a while)...")

    links: List[str] = []
    with Spinner():
        try:
            links, res = spider.spider(session.url)
        except Exception as error:
            output.debug_exception()
            output.error(f"Error running scan: {str(error)}")

    output.norm(f"Identified {len(links) + 1} pages.")
    output.empty()

    if res:
        output.norm("Issues Detected:")

        reporter.display_results(res, "\t")
        output.empty()

    # get files, and add those to the link list
    links += _file_search(session, links)

    if (
        session.args.pass_reset_page is not None
        and len(session.args.pass_reset_page) > 0
    ):
        _check_password_reset(session)

    with Spinner():
        res = http_basic.check_local_ip_disclosure(session)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = apache_httpd.check_all(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = apache_tomcat.check_all(session.url, links)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = nginx.check_all(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = iis.check_all(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = http_basic.check_propfind(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = http_basic.check_trace(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = http_basic.check_options(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = php.find_phpinfo(links)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res, jira_path = jira.check_for_jira(session)
    if res:
        reporter.display_results(res, "\t")

    if jira_path is not None:
        with Spinner():
            res = jira.check_jira_user_registration(jira_path)
        if res:
            reporter.display_results(res, "\t")

    with Spinner():
        wp_path, res = wordpress.identify(session.url)
    if res:
        reporter.display_results(res, "\t")

    if wp_path is not None:
        with Spinner():
            res = wordpress.check_json_user_enum(wp_path)
            res += wordpress.check_path_disclosure(wp_path)
        if res:
            reporter.display_results(res, "\t")
예제 #6
0
def _file_search(session: Session, orig_links: List[str]) -> List[str]:
    new_files: List[str] = []
    file_good, file_res, path_good, path_res = network.check_404_response(session.url)

    # these are here for data typing
    results: Union[List[Result], None]
    links: Union[List[str], None]

    if not file_good:
        reporter.display(
            "Web server does not respond properly to file 404 errors.",
            Issue(
                Vulnerabilities.SERVER_INVALID_404_FILE,
                session.url,
                Evidence.from_response(file_res),
            ),
        )
    if not path_good:
        reporter.display(
            "Web server does not respond properly to path 404 errors.",
            Issue(
                Vulnerabilities.SERVER_INVALID_404_PATH,
                session.url,
                Evidence.from_response(path_res),
            ),
        )

    if not (file_good or path_good):
        output.norm(
            "Site does not respond properly to non-existent file/path requests; skipping some checks."
        )

    if file_good:
        links, results = special_files.check_special_files(session.url)
        if results:
            reporter.display_results(results, "\t")

        new_files += links

        if session.args.files:
            output.empty()
            output.norm("Searching for common files (this will take a few minutes)...")

            with Spinner():
                try:
                    links, results = file_search.find_files(session.url)
                except Exception as error:
                    output.debug_exception()
                    output.error(f"Error running scan: {str(error)}")
                    results = None
                    links = None

            if results is not None and results:
                reporter.display_results(results, "\t")

            if links is not None and links:
                new_files += links

                for l in links:
                    if l not in orig_links:
                        output.norm(f"\tNew file found: {l}")

                output.empty()

        # check for common backup files
        all_links = orig_links + new_files
        with Spinner():
            backups, res = file_search.find_backups(all_links)
        if res:
            reporter.display_results(res, "\t")
        if backups:
            new_files += backups

    if path_good:
        links, results = special_files.check_special_paths(session.url)

        if results:
            reporter.display_results(results, "\t")

        new_files += links

        if session.args.dir:
            output.empty()
            output.norm(
                "Searching for common directories (this will take a few minutes)..."
            )

            with Spinner():
                try:
                    links, results = file_search.find_directories(
                        session.url,
                        session.args.dirlistredir,
                        session.args.dirrecursive,
                    )
                except Exception as error:
                    output.debug_exception()
                    output.error(f"Error running scan: {str(error)}")
                    results = None
                    links = None

            if results is not None and results:
                reporter.display_results(results, "\t")

            if links is not None and links:
                new_files += links

                for l in links:
                    if l not in orig_links:
                        output.norm(f"\tNew directory found: {l}")

                output.empty()

    # check for .DS_Store files
    if file_good:
        res = file_search.find_ds_store(new_files)

        if res:
            reporter.display_results(res, "\t")

    return new_files
예제 #7
0
파일: http.py 프로젝트: rurbin3/yawast
def scan(session: Session):
    reporter.register_data("url", session.url)
    reporter.register_data("domain", session.domain)

    output.empty()
    output.norm("HEAD:")
    head = network.http_head(session.url)

    raw = network.http_build_raw_response(head)
    for line in raw.splitlines():
        output.norm(f"\t{line}")

    output.empty()

    res = http_basic.get_header_issues(head, raw, session.url)
    if res:
        output.norm("Header Issues:")

        reporter.display_results(res, "\t")
        output.empty()

    res = http_basic.get_cookie_issues(head, session.url)
    if res:
        output.norm("Cookie Issues:")

        reporter.display_results(res, "\t")
        output.empty()

    # check for WAF signatures
    res = waf.get_waf(head.headers, raw, session.url)
    if res:
        output.norm("WAF Detection:")

        reporter.display_results(res, "\t")
        output.empty()

    # check the HSTS preload status
    results = http_basic.check_hsts_preload(session.url)
    if len(results) > 0:
        reporter.register_data("hsts_preload_status", results)

        output.norm("HSTS Preload Status:")
        for result in results:
            chrome = result["chrome"] is not None
            firefox = result["firefox"] is not None
            tor = result["tor"] is not None

            output.norm(
                f"\t({result['domain']}) Chrome: {chrome}\tFirefox: {firefox}\t\tTor: {tor}"
            )
        output.empty()

    methods, res = http_basic.check_http_methods(session.url)
    if len(methods) == 0:
        output.norm("Server responds to invalid HTTP methods - check skipped.")
    else:
        reporter.register_data({"http_methods_supported": methods})

        output.norm("Supported HTTP methods:")

        for method in methods:
            output.norm(f"\t{method}")

    output.empty()

    if res:
        reporter.display_results(res, "\t")
        output.empty()

    output.norm("Performing vulnerability scan (this will take a while)...")

    links: List[str] = []
    with Spinner():
        try:
            links, res = spider.spider(session.url)
        except Exception as error:
            output.debug_exception()
            output.error(f"Error running scan: {str(error)}")

    output.norm(f"Identified {len(links) + 1} pages.")
    output.empty()

    if res:
        output.norm("Issues Detected:")

        reporter.display_results(res, "\t")
        output.empty()

    # get files, and add those to the link list
    links += _file_search(session, links)

    if (
        session.args.pass_reset_page is not None
        and len(session.args.pass_reset_page) > 0
    ):
        _check_password_reset(session)

    with Spinner():
        res = http_basic.check_local_ip_disclosure(session)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = apache_httpd.check_all(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = apache_tomcat.check_all(session.url, links)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = nginx.check_all(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = iis.check_all(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = http_basic.check_propfind(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = http_basic.check_trace(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = http_basic.check_options(session.url)
    if res:
        reporter.display_results(res, "\t")

    with Spinner():
        res = php.find_phpinfo(links)
    if res:
        reporter.display_results(res, "\t")

    if session.args.php_page is not None and len(session.args.php_page) > 0:
        with Spinner():
            res = php.check_cve_2019_11043(session, links)
        if res:
            reporter.display_results(res, "\t")

    with Spinner():
        res, jira_path = jira.check_for_jira(session)
    if res:
        reporter.display_results(res, "\t")

    if jira_path is not None:
        with Spinner():
            res = jira.check_jira_user_registration(jira_path)
        if res:
            reporter.display_results(res, "\t")

    with Spinner():
        wp_path, res = wordpress.identify(session.url)
    if res:
        reporter.display_results(res, "\t")

    if wp_path is not None:
        with Spinner():
            res = wordpress.check_json_user_enum(wp_path)
            res += wordpress.check_path_disclosure(wp_path)
        if res:
            reporter.display_results(res, "\t")
예제 #8
0
def scan(session: Session):
    reporter.register_data("url", session.url)
    reporter.register_data("domain", session.domain)

    # check to see if this is an IP, if so, bail out
    if utils.is_ip(session.domain):
        return

    output.empty()
    output.norm("DNS Information:")

    # get the root domain, by looking up via the PSL
    psl = PublicSuffixList()
    root_domain = psl.privatesuffix(session.domain)
    reporter.register_data("root_domain", root_domain)

    # IP Addresses for the domain we are scanning
    ips = basic.get_ips(session.domain)
    reporter.register_data("ip", ips)
    for ip in ips:
        output.norm("\t%s (%s)" % (ip, basic.get_host(str(ip))))

        addr = ipaddress.ip_address(str(ip))

        if not addr.is_private:
            ni = network_info.network_info(str(ip))
            output.norm("\t\t%s" % ni)

            if addr.version == 4:
                output.norm("\t\thttps://www.shodan.io/host/%s" % ip)
                output.norm("\t\thttps://censys.io/ipv4/%s" % ip)
            else:
                output.norm("\t\thttps://www.shodan.io/host/%s" %
                            str(ip).lower())

        output.empty()

    # TXT records for the domain we are scanning
    try:
        txt = basic.get_text(session.domain)
        reporter.register_data("dns_txt", {session.domain: txt})
        for rec in txt:
            output.norm("\tTXT: %s" % rec)
    except Exception as err:
        output.error(f"Error getting TXT records: {str(err)}")

    # TXT records for the root domain
    try:
        if root_domain != session.domain:
            txt = basic.get_text(root_domain)
            reporter.register_data("dns_txt", {root_domain: txt})
            for rec in txt:
                output.norm("\tTXT (%s): %s" % (root_domain, rec))
    except Exception as err:
        output.error(f"Error getting TXT (root) records: {str(err)}")

    output.empty()

    # MX records for the domain we are scanning
    try:
        mx = basic.get_mx(session.domain)
        reporter.register_data("dns_mx", {session.domain: mx})
        for rec in mx:
            server_ip, ni = _get_ip_info(rec[0])

            info = "%s (%s) - %s (%s)" % (rec[0], rec[1], server_ip, ni)
            output.norm("\tMX: %s" % info)
    except Exception as err:
        output.error(f"Error getting MX records: {str(err)}")

    try:
        # MX records for the root domain
        if root_domain != session.domain:
            mx = basic.get_mx(root_domain)
            reporter.register_data("dns_mx", {root_domain: mx})
            for rec in mx:
                server_ip, ni = _get_ip_info(rec[0])

                info = "%s (%s) - %s (%s)" % (rec[0], rec[1], server_ip, ni)
                output.norm("\tMX (%s): %s" % (root_domain, info))
    except Exception as err:
        output.error(f"Error getting MX (root) records: {str(err)}")

    output.empty()

    # NS records for the root domain
    try:
        ns = basic.get_ns(root_domain)
        reporter.register_data("dns_ns", {root_domain: ns})
        for rec in ns:
            server_ip, ni = _get_ip_info(rec)

            info = "%s - %s (%s)" % (rec, server_ip, ni)
            output.norm("\tNS: %s" % info)
    except Exception as err:
        output.error(f"Error getting NS records: {str(err)}")

    output.empty()

    if session.args.srv:
        try:
            output.norm(
                "Searching for SRV records, this will take a minute...")
            output.empty()

            with Spinner():
                srv_records = srv.find_srv_records(root_domain)
                reporter.register_data("dns_srv", srv_records)

            for rec in srv_records:
                server_ip, ni = _get_ip_info(rec[1])

                info = "%s: %s:%s - %s (%s)" % (rec[0], rec[1], rec[2],
                                                server_ip, ni)
                output.norm("\tSRV: %s" % info)

                output.empty()
        except Exception as err:
            output.error(f"Error getting SRV records: {str(err)}")

    if session.args.subdomains:
        try:
            output.norm(
                "Searching for sub-domains, this will take a few minutes...")
            output.empty()

            with Spinner():
                sds = subdomains.find_subdomains(root_domain)
                reporter.register_data("dns_subdomains", sds)

            for rec in sds:
                info = ""

                if rec[0] == "CNAME":
                    server_ip, ni = _get_ip_info(rec[2])

                    info = "(CNAME) %s -> %s - %s (%s)" % (
                        rec[1],
                        rec[2],
                        server_ip,
                        ni,
                    )
                elif rec[0] == "A":
                    ni = network_info.network_info(rec[2])
                    info = "(A) %s: %s (%s)" % (rec[1], rec[2], ni)
                elif rec[0] == "AAAA":
                    ni = network_info.network_info(rec[2])
                    info = "(AAAA) %s: %s (%s)" % (rec[1], rec[2], ni)

                output.norm("\tSubdomain: %s" % info)
        except Exception as err:
            output.error(f"Error getting subdomain records: {str(err)}")

        output.empty()

    try:
        caa_count = 0
        carec = caa.get_caa(session.domain)
        reporter.register_data("dns_caa", carec)
        for rec in carec:
            curr = rec[0]

            if rec[1] == "CNAME":
                output.norm("\tCAA (%s): CNAME Found: -> %s" % (curr, rec[2]))
            elif rec[1] == "CAA":
                if len(rec[2]) > 0:
                    for line in rec[2]:
                        output.norm('\tCAA (%s): "%s"' % (curr, line))
                        caa_count += 1
                else:
                    output.norm("\tCAA (%s): No Records Found" % curr)

        # notify the user if there's an issue
        if caa_count == 0:
            reporter.display(
                "\tCAA: Domain does not have protection from CAA",
                issue.Issue(Vulnerabilities.DNS_CAA_MISSING, session.url,
                            {"caa_records": carec}),
            )
    except Exception as err:
        output.error(f"Error getting CAA records: {str(err)}")

    output.empty()

    try:
        dk = dnssec.get_dnskey(session.domain)
        reporter.register_data("dns_dnskey", dk)
        if len(dk) > 0:
            for rec in dk:
                output.norm(
                    "\tDNSKEY: Algorithm: '%s' - Flags: '%s' - Key Length: %s"
                    % (rec[2], rec[0], len(rec[3]) * 8))
        else:
            reporter.display(
                "\tDNSKEY: Domain does not use DNSSEC",
                issue.Issue(Vulnerabilities.DNS_DNSSEC_NOT_ENABLED,
                            session.url, {}),
            )
    except Exception as err:
        output.error(f"Error getting DNSKEY records: {str(err)}")

    output.empty()
예제 #9
0
파일: http.py 프로젝트: sasqwatch/yawast
def scan(args: Namespace, url: str, domain: str):
    reporter.register_data("url", url)
    reporter.register_data("domain", domain)

    output.empty()
    output.norm("HEAD:")
    head = network.http_head(url)

    raw = network.http_build_raw_response(head)
    for line in raw.splitlines():
        output.norm(f"\t{line}")

    output.empty()

    res = http_basic.get_header_issues(head, raw, url)
    if len(res) > 0:
        output.norm("Header Issues:")

        reporter.display_results(res, "\t")
        output.empty()

    res = http_basic.get_cookie_issues(head, raw, url)
    if len(res) > 0:
        output.norm("Cookie Issues:")

        reporter.display_results(res, "\t")
        output.empty()

    # check for WAF signatures
    res = waf.get_waf(head.headers, raw, url)
    if len(res) > 0:
        output.norm("WAF Detection:")

        reporter.display_results(res, "\t")
        output.empty()

    output.norm("Performing vulnerability scan (this will take a while)...")

    links: List[str] = []
    with Spinner():
        try:
            links, res = spider.spider(url)
        except Exception as error:
            output.debug_exception()
            output.error(f"Error running scan: {str(error)}")

    output.norm(f"Identified {len(links) + 1} pages.")
    output.empty()

    if len(res) > 0:
        output.norm("Issues Detected:")

        reporter.display_results(res, "\t")
        output.empty()

    # get files, and add those to the link list
    links += _file_search(args, url, links)

    res = apache_httpd.check_all(url)
    if len(res) > 0:
        reporter.display_results(res, "\t")

    res = apache_tomcat.check_all(url, links)
    if len(res) > 0:
        reporter.display_results(res, "\t")

    res = nginx.check_all(url)
    if len(res) > 0:
        reporter.display_results(res, "\t")

    res = iis.check_all(url)
    if len(res) > 0:
        reporter.display_results(res, "\t")

    res = http_basic.check_propfind(url)
    if len(res) > 0:
        reporter.display_results(res, "\t")

    res = http_basic.check_trace(url)
    if len(res) > 0:
        reporter.display_results(res, "\t")

    res = http_basic.check_options(url)
    if len(res) > 0:
        reporter.display_results(res, "\t")

    wp_path, res = wordpress.identify(url)
    if len(res) > 0:
        reporter.display_results(res, "\t")

    if wp_path is not None:
        res = wordpress.check_json_user_enum(wp_path)
        if len(res) > 0:
            reporter.display_results(res, "\t")