示例#1
0
 def confirm_traffic_routs_through_tor(cls):
     rh = RequestHandler()
     try:
         page = rh.send("GET", url="https://check.torproject.org")
         if "Congratulations. This browser is configured to use Tor." in page.text:
             return
         elif "Sorry. You are not using Tor" in page.text:
             raise RaccoonException("Traffic does not seem to be routed through Tor.\nExiting")
     except RequestHandlerException:
         raise RaccoonException("Tor service seems to be down - not able to connect to 127.0.0.1:9050.\nExiting")
示例#2
0
 def __init__(self, host):
     self.host = host
     self.cnames = host.dns_results.get('CNAME')
     self.request_handler = RequestHandler()
     self.web_server_validator = WebServerValidator()
     self.waf_present = False
     self.waf_cname_map = {
         "incapdns": "Incapsula",
         "edgekey": "Akamai",
         "akamai": "Akamai",
         "edgesuite": "Akamai",
         "distil": "Distil Networks",
         "cloudfront": "CloudFront",
         "netdna-cdn": "MaxCDN"
     }
     self.waf_app_method_map = {
         "CloudFront": WAFApplicationMethods.detect_cloudfront,
         "Cloudflare": WAFApplicationMethods.detect_cloudflare,
         "Incapsula": WAFApplicationMethods.detect_incapsula,
         "MaxCDN": WAFApplicationMethods.detect_maxcdn,
         "Edgecast": WAFApplicationMethods.detect_edgecast,
         "Distil Networks": WAFApplicationMethods.detect_distil,
         "Sucuri": WAFApplicationMethods.detect_sucuri,
         "Reblaze": WAFApplicationMethods.detect_reblaze
     }
     log_file = HelpUtilities.get_output_path("{}/WAF.txt".format(self.host.target))
     self.logger = Logger(log_file)
示例#3
0
 def validate_target_is_up(cls, host):
     cmd = "ping -c 1 {}".format(host)
     try:
         check_call(cmd.split(), stdout=PIPE, stderr=PIPE)
         return
     except CalledProcessError:
         # Maybe ICMP is blocked. Try web server
         try:
             if "http" not in host:
                 host = "http://" + host
             rh = RequestHandler()
             rh.send("GET", url=host, timeout=10)
             return
         except (ConnectionError, RequestHandlerException):
             raise RaccoonException("Target {} seems to be down.\n"
                                    "Run with --skip-health-check to ignore hosts considered as down.".format(host))
class WebServerValidator(metaclass=Singleton):

    def __init__(self):
        self.request_handler = RequestHandler()

    def validate_target_webserver(self, host):
        try:
            self.request_handler.send(
                "GET",
                timeout=20,
                url="{}://{}:{}".format(
                    host.protocol,
                    host.target,
                    host.port
                )
            )
            return True
        except RequestHandlerException:
            raise WebServerValidatorException
示例#5
0
 def query_dns_dumpster(cls, host):
     # Start DNS Dumpster session for the token
     request_handler = RequestHandler()
     dnsdumpster_session = request_handler.get_new_session()
     url = "https://dnsdumpster.com"
     if host.naked:
         target = host.naked
     else:
         target = host.target
     payload = {
         "targetip": target,
         "csrfmiddlewaretoken": None
     }
     try:
         dnsdumpster_session.get(url, timeout=10)
         jar = dnsdumpster_session.cookies
         for c in jar:
             if not c.__dict__.get("name") == "csrftoken":
                 continue
             payload["csrfmiddlewaretoken"] = c.__dict__.get("value")
             break
         return dnsdumpster_session.post(url, data=payload, headers={"Referer": "https://dnsdumpster.com/"})
     except ConnectionError:
         raise RaccoonException
示例#6
0
    def __init__(self,
                 host,
                 ignored_response_codes,
                 num_threads,
                 wordlist,
                 follow_redirects=False):

        self.target = host.target
        self.ignored_error_codes = ignored_response_codes
        self.proto = host.protocol
        self.port = host.port
        self.num_threads = num_threads
        self.wordlist = wordlist
        self.follow_redirects = follow_redirects
        self.request_handler = RequestHandler()  # Will get the single, already initiated instance
        self.logger = None
示例#7
0
 def __init__(self,
              host,
              sans,
              domain_list,
              ignored_response_codes,
              num_threads,
              follow_redirects,
              no_sub_enum):
     self.host = host
     self.target = host.target
     self.sans = sans
     self.domain_list = domain_list
     self.ignored_error_codes = ignored_response_codes
     self.num_threads = num_threads
     self.follow_redirects = follow_redirects
     self.no_sub_enum = no_sub_enum
     self.request_handler = RequestHandler()
     self.sub_domains = set()
     log_file = HelpUtilities.get_output_path("{}/subdomains.txt".format(self.target))
     self.logger = Logger(log_file)
示例#8
0
class WebApplicationScanner:
    def __init__(self, host):
        self.host = host
        self.request_handler = RequestHandler()
        self.web_server_validator = WebServerValidator()
        self.headers = None
        self.robots = None
        self.forms = None
        self.fuzzable_urls = set()
        log_file = HelpUtilities.get_output_path("{}/web_scan.txt".format(
            self.host.target))
        self.target_dir = "/".join(log_file.split("/")[:-1])
        self.logger = Logger(log_file)

    def _detect_cms(self, tries=0):
        """
        Detect CMS using whatcms.org.
        Has a re-try mechanism because false negatives may occur
        :param tries: Count of tries for CMS discovery
        """
        # WhatCMS is under CloudFlare which detects and blocks proxied/Tor traffic, hence normal request.
        page = requests.get(
            url="https://whatcms.org/?s={}".format(self.host.target))
        soup = BeautifulSoup(page.text, "lxml")
        found = soup.select(".panel.panel-success")
        if found:
            try:
                cms = [a for a in soup.select("a")
                       if "/c/" in a.get("href")][0]
                self.logger.info(
                    "{} CMS detected: target is using {}{}{}".format(
                        COLORED_COMBOS.GOOD, COLOR.GREEN, cms.get("title"),
                        COLOR.RESET))
            except IndexError:
                if tries >= 4:
                    return
                else:
                    self._detect_cms(tries=tries + 1)
        else:
            if tries >= 4:
                return
            else:
                self._detect_cms(tries=tries + 1)

    def _cookie_info(self, jar):
        for cookie in jar:
            key = cookie.__dict__.get("name")
            value = cookie.__dict__.get("value")
            domain = cookie.__dict__.get("domain")
            secure = cookie.__dict__.get("secure")
            http_only = cookie.has_nonstandard_attr("HttpOnly")
            try:
                if domain in self.host.target or self.host.target in domain:
                    if not secure or not http_only:
                        current = "%s Cookie: {%s: %s} -" % (
                            COLORED_COMBOS.GOOD, key, value)
                        if not secure and not http_only:
                            current += " both secure and HttpOnly flags are not set"
                        elif not secure:
                            current += " secure flag not set"
                        else:
                            current += " HttpOnly flag not set"
                        self.logger.info(current)

            except TypeError:
                continue

    def _server_info(self):
        if self.headers.get("server"):
            self.logger.info("{} Web server detected: {}{}{}".format(
                COLORED_COMBOS.GOOD, COLOR.GREEN, self.headers.get("server"),
                COLOR.RESET))

    def _x_powered_by(self):
        if self.headers.get("X-Powered-By"):
            self.logger.info("{} X-Powered-By header detected: {}{}{}".format(
                COLORED_COMBOS.GOOD, COLOR.GREEN,
                self.headers.get("X-Powered-By"), COLOR.RESET))

    def _anti_clickjacking(self):
        if not self.headers.get("X-Frame-Options"):
            self.logger.info(
                "{} X-Frame-Options header not detected - target might be vulnerable to clickjacking"
                .format(COLORED_COMBOS.GOOD))

    def _xss_protection(self):
        xss_header = self.headers.get("X-XSS-PROTECTION")
        if xss_header and "1" in xss_header:
            self.logger.info("{} Found X-XSS-PROTECTION header".format(
                COLORED_COMBOS.BAD))

    def _cors_wildcard(self):
        if self.headers.get("Access-Control-Allow-Origin") == "*":
            self.logger.info("{} CORS wildcard detected".format(
                COLORED_COMBOS.GOOD))

    def _robots(self):
        res = self.request_handler.send("GET",
                                        url="{}://{}:{}/robots.txt".format(
                                            self.host.protocol,
                                            self.host.target, self.host.port))
        if res.status_code != 404 and res.text and "<!DOCTYPE html>" not in res.text:
            self.logger.info("{} Found robots.txt".format(COLORED_COMBOS.GOOD))
            with open("{}/robots.txt".format(self.target_dir), "w") as file:
                file.write(res.text)

    def _sitemap(self):
        res = self.request_handler.send("GET",
                                        url="{}://{}:{}/sitemap.xml".format(
                                            self.host.protocol,
                                            self.host.target, self.host.port))
        if res.status_code != 404 and res.text and "<!DOCTYPE html>" not in res.text:
            self.logger.info("{} Found sitemap.xml".format(
                COLORED_COMBOS.GOOD))
            with open("{}/sitemap.xml".format(self.target_dir), "w") as file:
                file.write(res.text)

    def _find_fuzzable_urls(self, soup):
        urls = soup.select("a")
        if urls:
            for url in urls:
                href = url.get("href")
                if href and "?" in href and "=" in href:
                    self.fuzzable_urls.add(href)
            if self.fuzzable_urls:
                self.logger.info("{} {} fuzzable URLs discovered".format(
                    COLORED_COMBOS.NOTIFY, len(self.fuzzable_urls)))

                base_target = "{}://{}:{}".format(self.host.protocol,
                                                  self.host.target,
                                                  self.host.port)
                for url in self.fuzzable_urls:
                    if url.startswith("/"):
                        self.logger.debug("\t{}{}".format(base_target, url))
                    else:
                        self.logger.debug("\t{}".format(url))

    def _find_forms(self, soup):
        self.forms = soup.select("form")
        if self.forms:
            self.logger.info("{} {} HTML forms discovered".format(
                COLORED_COMBOS.NOTIFY, len(self.forms)))
            for form in self.forms:
                form_id = form.get("id")
                form_class = form.get("class")
                form_method = form.get("method")
                form_action = form.get("action")
                self.logger.debug(
                    "Form details: ID: {}, Class: {}, Method: {}, action: {}".
                    format(form_id, form_class, form_method, form_action))

    def _find_emails(self, soup):
        pass

    def get_web_application_info(self):
        session = self.request_handler.get_new_session()
        try:
            with session:
                # Test if target is serving HTTP requests
                response = session.get(timeout=20,
                                       url="{}://{}:{}".format(
                                           self.host.protocol,
                                           self.host.target, self.host.port))
                self.headers = response.headers
                self._detect_cms()
                self._robots()
                self._sitemap()
                self._server_info()
                self._x_powered_by()
                self._cors_wildcard()
                self._xss_protection()
                self._anti_clickjacking()
                self._cookie_info(session.cookies)

                soup = BeautifulSoup(response.text, "lxml")
                self._find_fuzzable_urls(soup)
                self._find_forms(soup)

        except (ConnectionError, TooManyRedirects) as e:
            raise WebAppScannerException("Couldn't get response from server.\n"
                                         "Caused due to exception: {}".format(
                                             str(e)))

    async def run_scan(self):
        self.logger.info("{} Trying to collect {} web application data".format(
            COLORED_COMBOS.INFO, self.host))
        try:
            self.web_server_validator.validate_target_webserver(self.host)
            self.get_web_application_info()
        except WebServerValidatorException:
            self.logger.info(
                "{} Target does not seem to have an active web server on port: {}. "
                "No web application data will be gathered.".format(
                    COLORED_COMBOS.NOTIFY, self.host.port))
            return
 def __init__(self):
     self.request_handler = RequestHandler()
示例#10
0
class WAF:

    def __init__(self, host):
        self.host = host
        self.cnames = host.dns_results.get('CNAME')
        self.request_handler = RequestHandler()
        self.web_server_validator = WebServerValidator()
        self.waf_present = False
        self.waf_cname_map = {
            "incapdns": "Incapsula",
            "edgekey": "Akamai",
            "akamai": "Akamai",
            "edgesuite": "Akamai",
            "distil": "Distil Networks",
            "cloudfront": "CloudFront",
            "netdna-cdn": "MaxCDN"
        }
        self.waf_app_method_map = {
            "CloudFront": WAFApplicationMethods.detect_cloudfront,
            "Cloudflare": WAFApplicationMethods.detect_cloudflare,
            "Incapsula": WAFApplicationMethods.detect_incapsula,
            "MaxCDN": WAFApplicationMethods.detect_maxcdn,
            "Edgecast": WAFApplicationMethods.detect_edgecast,
            "Distil Networks": WAFApplicationMethods.detect_distil,
            "Sucuri": WAFApplicationMethods.detect_sucuri,
            "Reblaze": WAFApplicationMethods.detect_reblaze
        }
        log_file = HelpUtilities.get_output_path("{}/WAF.txt".format(self.host.target))
        self.logger = Logger(log_file)

    def _waf_detected(self, name):
        self.logger.info(
            "{} Detected WAF presence in web application: {}{}{}".format(
                COLORED_COMBOS.BAD, COLOR.RED, name, COLOR.RESET))
        self.waf_present = True

    def _detect_by_cname(self):
        for waf in self.waf_cname_map:
            if any(waf in str(cname) for cname in self.cnames):
                self.logger.info("{} Detected WAF presence in CNAME: {}{}{}".format(
                    COLORED_COMBOS.BAD, COLOR.RED, self.waf_cname_map.get(waf), COLOR.RESET)
                )
                self.waf_present = True

    def _detect_by_application(self):
        try:
            session = self.request_handler.get_new_session()
            response = session.get(
                timeout=20,
                allow_redirects=True,
                url="{}://{}:{}".format(
                    self.host.protocol,
                    self.host.target,
                    self.host.port
                )
            )
            for waf, method in self.waf_app_method_map.items():
                result = method(response)
                if result:
                    self._waf_detected(waf)

        except (ConnectionError, TooManyRedirects) as e:
            raise WAFException("Couldn't get response from server.\n"
                               "Caused due to exception: {}".format(str(e)))

    async def detect(self):
        self.logger.info("{} Trying to detect WAF presence in {}".format(COLORED_COMBOS.INFO, self.host))
        if self.cnames:
            self._detect_by_cname()
        try:
            self.web_server_validator.validate_target_webserver(self.host)
            self._detect_by_application()

            if not self.waf_present:
                self.logger.info("{} Did not detect WAF presence in target".format(COLORED_COMBOS.GOOD))
        except WebServerValidatorException:
            self.logger.info(
                "{} Target does not seem to have an active web server on port {}. "
                "No WAF could be detected on an application level.".format(COLORED_COMBOS.NOTIFY, self.host.port))
示例#11
0
class SubDomainEnumerator:

    def __init__(self,
                 host,
                 sans,
                 domain_list,
                 ignored_response_codes,
                 num_threads,
                 follow_redirects,
                 no_sub_enum):
        self.host = host
        self.target = host.target
        self.sans = sans
        self.domain_list = domain_list
        self.ignored_error_codes = ignored_response_codes
        self.num_threads = num_threads
        self.follow_redirects = follow_redirects
        self.no_sub_enum = no_sub_enum
        self.request_handler = RequestHandler()
        self.sub_domains = set()
        log_file = HelpUtilities.get_output_path("{}/subdomains.txt".format(self.target))
        self.logger = Logger(log_file)

    async def run(self):
        self.logger.info("\n{} Enumerating Subdomains".format(COLORED_COMBOS.INFO))
        if self.sans:
            self.find_subdomains_in_sans()
        self.google_dork()
        if not self.no_sub_enum:
            await self.bruteforce()
        self.logger.info("\n{} Done enumerating Subdomains".format(COLORED_COMBOS.INFO))

    def find_subdomains_in_sans(self):
        """Looks for different TLDs as well as different sub-domains in SAN list"""
        self.logger.info("{} Trying to find Subdomains in SANs list".format(COLORED_COMBOS.INFO))
        if self.host.naked:
            domain = self.host.naked
            tld_less = domain.split(".")[0]
        else:
            domain = self.host.target.split(".")
            tld_less = domain[1]
            domain = ".".join(domain[1:])

        for san in self.sans:
            if (tld_less in san or domain in san) and self.target != san:
                self.logger.info("{} Subdomain detected: {}".format(COLORED_COMBOS.GOOD, san))

    def google_dork(self):
        self.logger.info("{} Trying to discover subdomains in Google".format(COLORED_COMBOS.INFO))
        page = self.request_handler.send(
            "GET",
            url="https://www.google.com/search?q=site:{}&num=100".format(self.target)
        )
        soup = BeautifulSoup(page.text, "lxml")
        results = set(re.findall(r"\w+\.{}".format(self.target), soup.text))
        for subdomain in results:
            if "www." not in subdomain:
                self.logger.info("{} Detected subdomain through Google dorking: {}".format(
                    COLORED_COMBOS.GOOD, subdomain))

    async def bruteforce(self):
        path = "{}/subdomain_fuzz.txt".format(self.host.target)

        # If a naked domain exists, use it
        if self.host.naked:
            self.host.target = self.host.naked

        self.logger.info("{} Bruteforcing subdomains".format(COLORED_COMBOS.INFO))
        sub_domain_fuzzer = URLFuzzer(
            host=self.host,
            wordlist=self.domain_list,
            num_threads=self.num_threads,
            ignored_response_codes=self.ignored_error_codes,
            follow_redirects=self.follow_redirects
            )
        await sub_domain_fuzzer.fuzz_all(sub_domain=True, log_file_path=path)
示例#12
0
class URLFuzzer:

    def __init__(self,
                 host,
                 ignored_response_codes,
                 num_threads,
                 wordlist,
                 follow_redirects=False):

        self.target = host.target
        self.ignored_error_codes = ignored_response_codes
        self.proto = host.protocol
        self.port = host.port
        self.num_threads = num_threads
        self.wordlist = wordlist
        self.follow_redirects = follow_redirects
        self.request_handler = RequestHandler()  # Will get the single, already initiated instance
        self.logger = None

    def _log_response(self, code, url, headers):
        if 300 > code >= 200:
            color = COLOR.GREEN
        elif 400 > code >= 300:
            color = COLOR.BLUE
            url += " redirects to {}".format(headers.get("Location"))
        elif 510 > code >= 400:
            color = COLOR.RED
        else:
            color = COLOR.RESET
        self.logger.info("\t{}[{}]{} {}".format(
            color, code, COLOR.RESET, url))

    def _build_request_url(self, uri, sub_domain):
        if not sub_domain:
            if self.port != 80 and self.port != 443:
                url = "{}://{}:{}/{}".format(self.proto, self.target, self.port, uri)
            else:
                url = "{}://{}/{}".format(self.proto, self.target, uri)
        else:
            if self.port != 80 and self.port != 443:
                url = "{}://{}.{}:{}".format(self.proto, uri, self.target, self.port)
            else:
                url = "{}://{}.{}".format(self.proto, uri, self.target)
        return url

    def _fetch(self, uri, sub_domain=False):
        """
        Send a HEAD request to URL and print response code if it's not in ignored_error_codes
        :param uri: URI to fuzz
        :param sub_domain: If True, build destination URL with {URL}.{HOST} else {HOST}/{URL}
        """
        url = self._build_request_url(uri, sub_domain=sub_domain)

        try:
            res = self.request_handler.send("HEAD", url=url, allow_redirects=self.follow_redirects)
            if res.status_code not in self.ignored_error_codes:
                self._log_response(res.status_code, url, res.headers)
        except (AttributeError, RequestHandlerException):
            # res is None or another error occurred
            pass

    def get_log_file_path(self, path):
        if path:
            log_file = path
        else:
            log_file = "{}/url_fuzz.txt".format(self.target)

        return Logger(HelpUtilities.get_output_path(log_file))

    def _rule_out_false_positives(self, sub_domain):
        fake_uris = (uuid.uuid4() for i in range(3))
        for uri in fake_uris:
            url = self._build_request_url(uri, sub_domain)
            try:
                res = self.request_handler.send("GET", url=url, allow_redirects=self.follow_redirects)
                if res.status_code == 200:
                    if sub_domain:
                        err_msg = "Wildcard subdomain support detected (all subdomains return 200)." \
                                  " Will not bruteforce subdomains"
                    else:
                        err_msg = "Web server seems to redirect requests for all resources " \
                                  "to eventually return 200. Will not bruteforce URLs"
                    raise FuzzerException(err_msg)

            except RequestHandlerException as e:
                if sub_domain:  # If should-not-work.example.com doesn't resolve, no wildcard subdomain is present
                    return
                else:
                    raise FuzzerException("Could not get a response from {}."
                                          " Maybe target is down ?".format(self.target))

    async def fuzz_all(self, sub_domain=False, log_file_path=None):
        """
        Create a pool of threads, read the wordlist and invoke fuzz_all.
        Should be run in an event loop.
        :param sub_domain: Indicate if this is subdomain enumeration or URL busting
        :param log_file_path: Log subdomain enum results to this path.
        """

        self.logger = self.get_log_file_path(log_file_path)
        try:
            with open(self.wordlist, "r") as file:
                fuzzlist = file.readlines()
                fuzzlist = [x.replace("\n", "") for x in fuzzlist]
        except FileNotFoundError:
            raise FuzzerException("Cannot read URL list from {}. Will not perform Fuzzing".format(self.wordlist))

        try:
            # Rule out wildcard subdomain support/all resources redirect to a 200 page
            self._rule_out_false_positives(sub_domain)

            if not sub_domain:
                self.logger.info("{} Fuzzing URLs".format(COLORED_COMBOS.INFO))
            self.logger.info("{} Reading from list: {}".format(COLORED_COMBOS.INFO, self.wordlist))
            pool = ThreadPool(self.num_threads)
            pool.map(partial(self._fetch, sub_domain=sub_domain), fuzzlist)

            if not sub_domain:
                self.logger.info("{} Done fuzzing URLs".format(COLORED_COMBOS.INFO))
        except FuzzerException as e:
            self.logger.info("{} {}".format(COLORED_COMBOS.BAD, e))
示例#13
0
class SubDomainEnumerator:

    def __init__(self,
                 host,
                 sans,
                 domain_list,
                 ignored_response_codes,
                 num_threads,
                 follow_redirects,
                 no_sub_enum):
        self.host = host
        self.target = host.target
        self.sans = sans
        self.domain_list = domain_list
        self.ignored_error_codes = ignored_response_codes
        self.num_threads = num_threads
        self.follow_redirects = follow_redirects
        self.no_sub_enum = no_sub_enum
        self.request_handler = RequestHandler()
        self.sub_domains = set()
        log_file = HelpUtilities.get_output_path("{}/subdomains.txt".format(self.target))
        self.logger = Logger(log_file)

    async def run(self):
        self.logger.info("{} Enumerating Subdomains".format(COLORED_COMBOS.INFO))
        if self.sans:
            self._extract_from_sans()
        self._google_dork()
        self._extract_from_dns_dumpster()
        if not self.no_sub_enum:
            await self.bruteforce()
        self.logger.info("{} Done enumerating Subdomains".format(COLORED_COMBOS.INFO))

    def _extract_from_sans(self):
        """Looks for different TLDs as well as different sub-domains in SAN list"""
        self.logger.info("{} Trying to find Subdomains in SANs list".format(COLORED_COMBOS.NOTIFY))
        if self.host.naked:
            domain = self.host.naked
            tld_less = domain.split(".")[0]
        else:
            domain = self.host.target.split(".")
            tld_less = domain[1]
            domain = ".".join(domain[1:])

        for san in self.sans:
            if (tld_less in san or domain in san) and self.target != san and not san.startswith("*"):
                self.logger.info("{} Subdomain detected: {}".format(COLORED_COMBOS.GOOD, san))

    def _google_dork(self):
        self.logger.info("{} Trying to discover subdomains in Google".format(COLORED_COMBOS.NOTIFY))
        page = self.request_handler.send(
            "GET",
            url="https://www.google.com/search?q=site:{}&num=100".format(self.target)
        )
        soup = BeautifulSoup(page.text, "lxml")
        results = set(re.findall(r"\w+\.{}".format(self.target), soup.text))
        for subdomain in results:
            if "www." not in subdomain:
                self.logger.info("{} Detected subdomain through Google dorking: {}".format(
                    COLORED_COMBOS.GOOD, subdomain))

    def _extract_from_dns_dumpster(self):
        self.logger.info("{} Trying to extract subdomains from DNS dumpster".format(COLORED_COMBOS.NOTIFY))
        try:
            page = HelpUtilities.query_dns_dumpster(host=self.host)
            soup = BeautifulSoup(page.text, "lxml")
            hosts_table = soup.select(".table")[-1]
            for row in hosts_table.find_all("tr"):
                tds = row.select("td")
                sub_domain = tds[0].text.split('\n')[0]  # Grab just the URL, truncate other information
                self.logger.info("{} Found subdomain in DNS dumpster: {}".format(COLORED_COMBOS.GOOD, sub_domain))
                self.sub_domains.add(sub_domain)
        except RaccoonException:
            self.logger.info("{} Failed to query DNS dumpster for subdomains".format(COLORED_COMBOS.BAD))

    async def bruteforce(self):
        path = "{}/subdomain_fuzz.txt".format(self.host.target)

        # If a naked domain exists, use it
        if self.host.naked:
            self.host.target = self.host.naked

        self.logger.info("{} Bruteforcing subdomains".format(COLORED_COMBOS.NOTIFY))
        sub_domain_fuzzer = URLFuzzer(
            host=self.host,
            wordlist=self.domain_list,
            num_threads=self.num_threads,
            ignored_response_codes=self.ignored_error_codes,
            follow_redirects=self.follow_redirects
            )
        await sub_domain_fuzzer.fuzz_all(sub_domain=True, log_file_path=path)
示例#14
0
 def __init__(self):
     self.request_handler = RequestHandler()
示例#15
0
class WAF:
    def __init__(self, host):
        self.host = host
        self.cnames = host.dns_results.get('CNAME')
        self.request_handler = RequestHandler()
        self.web_server_validator = WebServerValidator()
        self.waf_present = False
        self.waf_cname_map = {
            "incapdns": "Incapsula",
            "edgekey": "Akamai",
            "akamai": "Akamai",
            "edgesuite": "Akamai",
            "distil": "Distil Networks",
            "cloudfront": "CloudFront",
            "netdna-cdn": "MaxCDN"
        }
        self.waf_app_method_map = {
            "CloudFront": WAFApplicationMethods.detect_cloudfront,
            "Cloudflare": WAFApplicationMethods.detect_cloudflare,
            "Incapsula": WAFApplicationMethods.detect_incapsula,
            "MaxCDN": WAFApplicationMethods.detect_maxcdn,
            "Edgecast": WAFApplicationMethods.detect_edgecast,
            "Distil Networks": WAFApplicationMethods.detect_distil,
            "Sucuri": WAFApplicationMethods.detect_sucuri,
            "Reblaze": WAFApplicationMethods.detect_reblaze
        }
        log_file = HelpUtilities.get_output_path("{}/WAF.txt".format(
            self.host.target))
        self.logger = Logger(log_file)

    def _waf_detected(self, name):
        self.logger.info(
            "{} Detected WAF presence in web application: {}{}{}".format(
                COLORED_COMBOS.BAD, COLOR.RED, name, COLOR.RESET))
        self.waf_present = True

    def _detect_by_cname(self):
        for waf in self.waf_cname_map:
            if any(waf in str(cname) for cname in self.cnames):
                self.logger.info(
                    "{} Detected WAF presence in CNAME: {}{}{}".format(
                        COLORED_COMBOS.BAD, COLOR.RED,
                        self.waf_cname_map.get(waf), COLOR.RESET))
                self.waf_present = True

    def _detect_by_application(self):
        try:
            session = self.request_handler.get_new_session()
            response = session.get(timeout=20,
                                   allow_redirects=True,
                                   url="{}://{}:{}".format(
                                       self.host.protocol, self.host.target,
                                       self.host.port))
            for waf, method in self.waf_app_method_map.items():
                result = method(response)
                if result:
                    self._waf_detected(waf)

        except (ConnectionError, TooManyRedirects) as e:
            raise WAFException("Couldn't get response from server.\n"
                               "Caused due to exception: {}".format(str(e)))

    async def detect(self):
        self.logger.info("{} Trying to detect WAF presence in {}".format(
            COLORED_COMBOS.INFO, self.host))
        if self.cnames:
            self._detect_by_cname()
        try:
            self.web_server_validator.validate_target_webserver(self.host)
            self._detect_by_application()

            if not self.waf_present:
                self.logger.info(
                    "{} Did not detect WAF presence in target".format(
                        COLORED_COMBOS.GOOD))
        except WebServerValidatorException:
            self.logger.info(
                "{} Target does not seem to have an active web server on port {}. "
                "No WAF could be detected on an application level.".format(
                    COLORED_COMBOS.NOTIFY, self.host.port))
示例#16
0
class SubDomainEnumerator:
    def __init__(self, host, sans, domain_list, ignored_response_codes,
                 num_threads, follow_redirects, no_sub_enum):
        self.host = host
        self.target = host.target
        self.sans = sans
        self.domain_list = domain_list
        self.ignored_error_codes = ignored_response_codes
        self.num_threads = num_threads
        self.follow_redirects = follow_redirects
        self.no_sub_enum = no_sub_enum
        self.request_handler = RequestHandler()
        log_file = HelpUtilities.get_output_path("{}/subdomains.txt".format(
            self.target))
        self.logger = Logger(log_file)

    async def run(self):
        self.logger.info("{} Enumerating Subdomains".format(
            COLORED_COMBOS.INFO))
        if self.sans:
            self._extract_from_sans()
        self._google_dork()
        self._extract_from_dns_dumpster()
        if not self.no_sub_enum:
            await self.bruteforce()
        self.logger.info("{} Done enumerating Subdomains".format(
            COLORED_COMBOS.INFO))

    def _extract_from_sans(self):
        """Looks for different TLDs as well as different sub-domains in SAN list"""
        self.logger.info("{} Trying to find Subdomains in SANs list".format(
            COLORED_COMBOS.NOTIFY))
        if self.host.naked:
            domain = self.host.naked
            tld_less = domain.split(".")[0]
        else:
            domain = self.host.target.split(".")
            tld_less = domain[1]
            domain = ".".join(domain[1:])

        for san in self.sans:
            if (tld_less in san or domain
                    in san) and self.target != san and not san.startswith("*"):
                self.logger.info("{} Subdomain detected: {}".format(
                    COLORED_COMBOS.GOOD, san))

    def _google_dork(self):
        self.logger.info("{} Trying to discover subdomains in Google".format(
            COLORED_COMBOS.NOTIFY))
        page = self.request_handler.send(
            "GET",
            url="https://www.google.com/search?q=site:{}&num=100".format(
                self.target))
        soup = BeautifulSoup(page.text, "lxml")
        results = set(re.findall(r"\w+\.{}".format(self.target), soup.text))
        for subdomain in results:
            if "www." not in subdomain:
                self.logger.info(
                    "{} Detected subdomain through Google dorking: {}".format(
                        COLORED_COMBOS.GOOD, subdomain))

    def _extract_from_dns_dumpster(self):
        self.logger.info(
            "{} Trying to extract subdomains from DNS dumpster".format(
                COLORED_COMBOS.NOTIFY))
        try:
            page = HelpUtilities.query_dns_dumpster(host=self.host)
            soup = BeautifulSoup(page.text, "lxml")
            hosts_table = soup.select(".table")[-1]
            for row in hosts_table.find_all("tr"):
                tds = row.select("td")
                sub_domain = tds[0].text.split('\n')[
                    0]  # Grab just the URL, truncate other information
                self.logger.info(
                    "{} Found subdomain in DNS dumpster: {}".format(
                        COLORED_COMBOS.GOOD, sub_domain))
        except (RaccoonException, IndexError):
            self.logger.info(
                "{} Failed to query DNS dumpster for subdomains".format(
                    COLORED_COMBOS.BAD))

    async def bruteforce(self):
        path = "{}/subdomain_fuzz.txt".format(self.host.target)

        # If a naked domain exists, use it
        if self.host.naked:
            self.host.target = self.host.naked

        self.logger.info("{} Bruteforcing subdomains".format(
            COLORED_COMBOS.NOTIFY))
        sub_domain_fuzzer = URLFuzzer(
            host=self.host,
            wordlist=self.domain_list,
            num_threads=self.num_threads,
            ignored_response_codes=self.ignored_error_codes,
            follow_redirects=self.follow_redirects)
        await sub_domain_fuzzer.fuzz_all(sub_domain=True, log_file_path=path)
示例#17
0
class URLFuzzer:
    def __init__(self,
                 host,
                 ignored_response_codes,
                 num_threads,
                 wordlist,
                 follow_redirects=False):

        self.target = host.target
        self.ignored_error_codes = ignored_response_codes
        self.proto = host.protocol
        self.port = host.port
        self.num_threads = num_threads
        self.wordlist = wordlist
        self.follow_redirects = follow_redirects
        self.request_handler = RequestHandler(
        )  # Will get the single, already initiated instance
        self.logger = None

    def _log_response(self, code, url, headers):
        if 300 > code >= 200:
            color = COLOR.GREEN
        elif 400 > code >= 300:
            color = COLOR.BLUE
            url += " redirects to {}".format(headers.get("Location"))
        elif 510 > code >= 400:
            color = COLOR.RED
        else:
            color = COLOR.RESET
        self.logger.info("\t{}[{}]{} {}".format(color, code, COLOR.RESET, url))

    def _fetch(self, uri, sub_domain=False):
        """
        Send a HEAD request to URL and print response code if it's not in ignored_error_codes
        :param uri: URI to fuzz
        :param sub_domain: If True, build destination URL with {URL}.{HOST} else {HOST}/{URL}
        """
        if not sub_domain:
            if self.port != 80 and self.port != 443:
                url = "{}://{}:{}/{}".format(self.proto, self.target,
                                             self.port, uri)
            else:
                url = "{}://{}/{}".format(self.proto, self.target, uri)
        else:
            if self.port != 80 and self.port != 443:
                url = "{}://{}.{}:{}".format(self.proto, uri, self.target,
                                             self.port)
            else:
                url = "{}://{}.{}".format(self.proto, uri, self.target)

        try:
            res = self.request_handler.send(
                "HEAD", url=url, allow_redirects=self.follow_redirects)
            if res.status_code not in self.ignored_error_codes:
                self._log_response(res.status_code, url, res.headers)
        except (AttributeError, RequestHandlerException):
            # res is None or another error occurred
            pass

    def get_log_file_path(self, path):
        if path:
            log_file = path
        else:
            log_file = "{}/url_fuzz.txt".format(self.target)

        return Logger(HelpUtilities.get_output_path(log_file))

    async def fuzz_all(self, sub_domain=False, log_file_path=None):
        """
        Create a pool of threads, read the wordlist and invoke fuzz_all.
        Should be run in an event loop.
        :param sub_domain: Indicate if this is subdomain enumeration or URL busting
        :param log_file_path: Log subdomain enum results to this path.
        """

        self.logger = self.get_log_file_path(log_file_path)
        try:
            with open(self.wordlist, "r") as file:
                fuzzlist = file.readlines()
                fuzzlist = [x.replace("\n", "") for x in fuzzlist]
        except FileNotFoundError:
            raise FuzzerException(
                "Cannot read URL list from {}. Will not perform Fuzzing".
                format(self.wordlist))

        if not sub_domain:
            self.logger.info("{} Fuzzing URLs".format(COLORED_COMBOS.INFO))

        self.logger.info("{} Reading from list: {}".format(
            COLORED_COMBOS.INFO, self.wordlist))
        pool = ThreadPool(self.num_threads)
        pool.map(partial(self._fetch, sub_domain=sub_domain), fuzzlist)

        if not sub_domain:
            self.logger.info("{} Done fuzzing URLs".format(
                COLORED_COMBOS.INFO))
示例#18
0
文件: main.py 项目: whoami213/Raccoon
def main(target,
         tor_routing,
         proxy_list,
         proxy,
         cookies,
         dns_records,
         wordlist,
         threads,
         ignored_response_codes,
         subdomain_list,
         full_scan,
         scripts,
         services,
         port,
         tls_port,
         skip_health_check,
         follow_redirects,
         no_url_fuzzing,
         no_sub_enum,
         skip_nmap_scan,
         # delay,
         outdir,
         quiet):
    try:
        # ------ Arg validation ------
        # Set logging level and Logger instance
        log_level = HelpUtilities.determine_verbosity(quiet)
        logger = SystemOutLogger(log_level)
        intro(logger)

        target = target.lower()
        try:
            HelpUtilities.validate_executables()
        except RaccoonException as e:
            logger.critical(str(e))
            exit(9)
        HelpUtilities.validate_wordlist_args(proxy_list, wordlist, subdomain_list)
        HelpUtilities.validate_proxy_args(tor_routing, proxy, proxy_list)
        HelpUtilities.create_output_directory(outdir)

        if tor_routing:
            logger.info("{} Testing that Tor service is up...".format(COLORED_COMBOS.NOTIFY))
        elif proxy_list:
            if proxy_list and not os.path.isfile(proxy_list):
                raise FileNotFoundError("Not a valid file path, {}".format(proxy_list))
            else:
                logger.info("{} Routing traffic using proxies from list {}\n".format(
                    COLORED_COMBOS.NOTIFY, proxy_list))
        elif proxy:
            logger.info("{} Routing traffic through proxy {}\n".format(COLORED_COMBOS.NOTIFY, proxy))

        # TODO: Sanitize delay argument

        dns_records = tuple(dns_records.split(","))
        ignored_response_codes = tuple(int(code) for code in ignored_response_codes.split(","))

        if port:
            HelpUtilities.validate_port_range(port)

        # ------ /Arg validation ------

        if cookies:
            try:
                cookies = HelpUtilities.parse_cookie_arg(cookies)
            except RaccoonException as e:
                logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
                exit(2)

        # Set Request Handler instance
        request_handler = RequestHandler(
            proxy_list=proxy_list,
            tor_routing=tor_routing,
            single_proxy=proxy,
            cookies=cookies
        )

        if tor_routing:
            try:
                HelpUtilities.confirm_traffic_routs_through_tor()
                logger.info("{} Validated Tor service is up. Routing traffic anonymously\n".format(
                    COLORED_COMBOS.NOTIFY))
            except RaccoonException as err:
                print("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
                exit(3)

        main_loop = asyncio.get_event_loop()

        logger.info("{}### Raccoon Scan Started ###{}\n".format(COLOR.GRAY, COLOR.RESET))
        logger.info("{} Trying to gather information about host: {}".format(COLORED_COMBOS.INFO, target))

        # TODO: Populate array when multiple targets are supported
        # hosts = []
        try:
            host = Host(target=target, dns_records=dns_records)
            host.parse()
        except HostHandlerException as e:
            logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
            exit(11)

        if not skip_health_check:
            try:
                HelpUtilities.validate_target_is_up(host)
            except RaccoonException as err:
                logger.critical("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
                exit(42)

        if not skip_nmap_scan:
            logger.info("\n{} Setting Nmap scan to run in the background".format(COLORED_COMBOS.INFO))
            nmap_scan = NmapScan(host, full_scan, scripts, services, port)
            # # # TODO: Populate array when multiple targets are supported
            # nmap_threads = []
            nmap_thread = threading.Thread(target=Scanner.run, args=(nmap_scan,))
            # Run Nmap scan in the background. Can take some time
            nmap_thread.start()

        # Run first set of checks - TLS, Web/WAF Data, DNS data
        waf = WAF(host)
        tls_info_scanner = TLSHandler(host, tls_port)
        web_app_scanner = WebApplicationScanner(host)
        tasks = (
            asyncio.ensure_future(tls_info_scanner.run()),
            asyncio.ensure_future(waf.detect()),
            asyncio.ensure_future(DNSHandler.grab_whois(host)),
            asyncio.ensure_future(web_app_scanner.run_scan()),
            asyncio.ensure_future(DNSHandler.generate_dns_dumpster_mapping(host, logger))
        )

        main_loop.run_until_complete(asyncio.wait(tasks))

        # Second set of checks - URL fuzzing, Subdomain enumeration
        if not no_url_fuzzing:
            fuzzer = URLFuzzer(host, ignored_response_codes, threads, wordlist, follow_redirects)
            main_loop.run_until_complete(fuzzer.fuzz_all())

        if not host.is_ip:
            sans = tls_info_scanner.sni_data.get("SANs")
            subdomain_enumerator = SubDomainEnumerator(
                host,
                domain_list=subdomain_list,
                sans=sans,
                ignored_response_codes=ignored_response_codes,
                num_threads=threads,
                follow_redirects=follow_redirects,
                no_sub_enum=no_sub_enum
            )
            main_loop.run_until_complete(subdomain_enumerator.run())

        if not skip_nmap_scan:
            if nmap_thread.is_alive():
                logger.info("{} All scans done. Waiting for Nmap scan to wrap up. "
                            "Time left may vary depending on scan type and port range".format(COLORED_COMBOS.INFO))

                while nmap_thread.is_alive():
                    time.sleep(15)

        logger.info("\n{}### Raccoon scan finished ###{}\n".format(COLOR.GRAY, COLOR.RESET))
        os.system("stty sane")

    except KeyboardInterrupt:
        print("{}Keyboard Interrupt detected. Exiting{}".format(COLOR.RED, COLOR.RESET))
        # Fix F'd up terminal after CTRL+C
        os.system("stty sane")
        exit(42)
示例#19
0
class URLFuzzer:

    def __init__(self,
                 host,
                 ignored_response_codes,
                 num_threads,
                 wordlist,
                 follow_redirects=False):

        self.target = host.target
        self.ignored_error_codes = ignored_response_codes
        self.proto = host.protocol
        self.port = host.port
        self.num_threads = num_threads
        self.wordlist = wordlist
        self.follow_redirects = follow_redirects
        self.request_handler = RequestHandler()  # Will get the single, already initiated instance
        self.logger = None

    def _log_response(self, code, url, headers):
        if 300 > code >= 200:
            color = COLOR.GREEN
        elif 400 > code >= 300:
            color = COLOR.BLUE
            url += " redirects to {}".format(headers.get("Location"))
        elif 510 > code >= 400:
            color = COLOR.RED
        else:
            color = COLOR.RESET
        self.logger.info("\t{}[{}]{} {}".format(
            color, code, COLOR.RESET, url))

    def _fetch(self, uri, sub_domain=False):
        """
        Send a HEAD request to URL and print response code if it's not in ignored_error_codes
        :param uri: URI to fuzz
        :param sub_domain: If True, build destination URL with {URL}.{HOST} else {HOST}/{URL}
        """
        if not sub_domain:
            if self.port != 80 and self.port != 443:
                url = "{}://{}:{}/{}".format(self.proto, self.target, self.port, uri)
            else:
                url = "{}://{}/{}".format(self.proto, self.target, uri)
        else:
            if self.port != 80 and self.port != 443:
                url = "{}://{}.{}:{}".format(self.proto, uri, self.target, self.port)
            else:
                url = "{}://{}.{}".format(self.proto, uri, self.target)

        try:
            res = self.request_handler.send("HEAD", url=url, allow_redirects=self.follow_redirects)
            if res.status_code not in self.ignored_error_codes:
                self._log_response(res.status_code, url, res.headers)
        except (AttributeError, RequestHandlerException):
            # res is None or another error occurred
            pass

    def get_log_file_path(self, path):
        if path:
            log_file = path
        else:
            log_file = "{}/url_fuzz.txt".format(self.target)

        return Logger(HelpUtilities.get_output_path(log_file))

    async def fuzz_all(self, sub_domain=False, log_file_path=None):
        """
        Create a pool of threads, read the wordlist and invoke fuzz_all.
        Should be run in an event loop.
        :param sub_domain: Indicate if this is subdomain enumeration or URL busting
        :param log_file_path: Log subdomain enum results to this path.
        """

        self.logger = self.get_log_file_path(log_file_path)
        try:
            with open(self.wordlist, "r") as file:
                fuzzlist = file.readlines()
                fuzzlist = [x.replace("\n", "") for x in fuzzlist]
        except FileNotFoundError:
            raise FuzzerException("Cannot read URL list from {}. Will not perform Fuzzing".format(self.wordlist))

        if not sub_domain:
            self.logger.info("{} Fuzzing URLs".format(COLORED_COMBOS.INFO))

        self.logger.info("{} Reading from list: {}".format(COLORED_COMBOS.INFO, self.wordlist))
        pool = ThreadPool(self.num_threads)
        pool.map(partial(self._fetch, sub_domain=sub_domain), fuzzlist)

        if not sub_domain:
            self.logger.info("{} Done fuzzing URLs".format(COLORED_COMBOS.INFO))