Exemplo n.º 1
0
    def sub(self):
        """
        This method is used to find all the sub-domain from base-domain
        """

        logger.info("Start finding sub-domains of %s" % self.profile.get_target())
        self.kb.set_sub_domains(self.targets)
        to_walk = self.targets
        sub_plugins = self.plugins["search"] + self.plugins["brute"]

        while len(to_walk):
            sub_domain_list = []
            new_domain = []

            for plugin in sub_plugins:
                for domain in to_walk:
                    engine_result = plugin.discover(domain)
                    for sd in engine_result:
                        sub_domain_list.append(sd)

            for domain in sub_domain_list:
                if self.kb.add_sub_domain(domain):
                    new_domain.append(domain)

            to_walk = new_domain

        return self.kb.get_sub_domains()
Exemplo n.º 2
0
    def extract(self, url):

        site = self.base_domain.domain_name

        header = {
            "User-Agent":
            "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
            "Cookie": "csrftoken=xW6DIMrTX9qge6cQuCE1OmZgmGunVinw",
            "Referer": "https://dnsdumpster.com/"
        }
        payload = {
            "csrfmiddlewaretoken": "xW6DIMrTX9qge6cQuCE1OmZgmGunVinw",
            "targetip": site
        }

        content = self.requester.post(url, payload, header).text
        soup = BeautifulSoup(content, "lxml")
        search_tags = soup.find_all('td', attrs={"class": "col-md-4"})
        if not search_tags:
            logger.info("Plug-in DNsDumpster: can't get any result for: %s" %
                        self.base_domain.domain_name)
            return
        for tag in search_tags:
            domain = tag.contents[0].strip('.')
            if site in domain:
                self.add(domain)
Exemplo n.º 3
0
    def add_sub_domain(self, sub_domain):
        """
        Add the sub-domain to the result list
        If add successfully, return True. Else, return False
        """

        if not self._is_existed_domain(sub_domain):
            logger.info('New domain was found by %s: %s' %
                        (sub_domain.meta_data['domain_found_by'],
                         sub_domain.domain_name))
            self._sub_domains.append(sub_domain)
            return True
        return False
Exemplo n.º 4
0
    def port(self):
        """
        This method is used to find all open ports of a domain
        """

        logger.info("Start finding opened ports")
        for plugin in self.plugins['port']:
            already_scanned = []
            for domain in self.kb.get_sub_domains():
                if domain.ip not in already_scanned:
                    open_ports = plugin.scan(domain)
                    self.kb.add_open_ports(domain, open_ports)
                logger.info(
                    "%s (%s) open ports: %s" % (
                        domain.domain_name, domain.ip, ", ".join([str(port) for port in domain.get_open_ports()])))
Exemplo n.º 5
0
    def post(self, url, data=None, headers=None):
        if headers is None:
            headers = self._headers

        for i in range(3):
            while True:
                try:
                    return requests.post(url, headers=headers, proxies=self._proxies, data=data, timeout=60)
                except requests.exceptions.Timeout:
                    logger.error("It takes a request so long so I must kill it.")
                    logger.info("Trying to reconnect...")
                    continue
                except:
                    logger.error("I don't know why this error occurred")
                    logger.info("Trying to reconnect...")
                    continue
        return None
Exemplo n.º 6
0
    def extract(self, url):
        try:
            content = self.requester.get(url).text
            if self.has_error(content):
                logger.error("Can not find any result for: %s" %
                             self.base_domain.domain_name)
                return None

            _soup = BeautifulSoup(content, "html5lib")

            if not _soup.find_all("em"):
                logger.error("This site seem to blocked your requests")
                return

            _last = ""
            _from = ""

            total = int(_soup.find_all("em")[0].string.split()[1])
            logger.info("Total of results: %d for: %s " %
                        (total, self.base_domain.domain_name))

            if total > 20:
                count = total / 20
                for tem in range(count + 1):
                    url_temp = ""
                    r = self.requester.get(url + _last + _from)
                    soup = BeautifulSoup(r.text, "html5lib")
                    search_region = BeautifulSoup(
                        str(soup.find_all("table", attrs={"class":
                                                          "TBtable"})), "lxml")

                    for item in search_region.find_all('a',
                                                       attrs={"rel": True}):
                        url_temp = self.parse_domain_name(item['href'])
                        self.add(url_temp)
                    _last = "&last=" + url_temp
                    _from = "&from=" + str((tem + 1) * 20 + 1)
            else:
                search_region = BeautifulSoup(
                    str(_soup.find_all("table", attrs={"class": "TBtable"})),
                    "lxml")
                for item in search_region.find_all('a', attrs={"rel": True}):
                    url_temp = self.parse_domain_name(item['href'])
                    self.add(url_temp)
        except:
            raise
Exemplo n.º 7
0
    def discover(self, domain):
        """
        This is the main method, it pass domain_name parameter
        and returns a list of the sub-domain found by the search engine
        """
        try:
            logger.info("Plugin %s has been activated" % self.get_name())
            self.base_domain = domain

            _dict = self.dictionary()
            pool = ThreadPool(self.max_worker)
            pool.map(self.worker, _dict)
            pool.close()
            pool.join()

            return self.sub_domains
        except:
            msg = "Error occurred with plugin %s " % self.get_name()
            raise PluginException(msg)
Exemplo n.º 8
0
    def get_total_page(self):
        max_page_temp = self.max_page

        while max_page_temp >= 0:
            url = self.base_url.format(query=self.get_query(),
                                       page=max_page_temp)
            header = {
                "User-Agent":
                "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0"
            }
            content = self.requester.get(url, header).text
            if self.has_error(content):
                logger.error("To much requests and Yahoo knew")
                return 0

            if (("We did not find results for" not in content)
                    or ("Check spelling or type a new query" not in content)):
                list_seed = []
                soup = BeautifulSoup(content, "html5lib")
                search_page = soup.find_all("a",
                                            href=True,
                                            title=True,
                                            attrs={'class': None})
                for i in search_page:
                    list_seed.append(int(i.string))
                current = soup.find("strong")
                try:
                    list_seed.append(int(current.string))
                except:
                    logger.error(
                        "Yahoo Plug-in: Failed to get current seed but that means there are "
                        "no more sub-domain for this base_domain but Yahoo could block your requests also"
                    )
                if not list_seed:
                    return 0
                else:
                    return max(list_seed)
            else:
                logger.info(
                    "Yahoo Plug-in: max_page down to %d since bot can not get any info about total_page"
                    % max_page_temp)
                max_page_temp -= 10
        return 0
Exemplo n.º 9
0
    def get_total_page(self):
        max_page_temp = self.max_page

        while max_page_temp >= 0:
            url = self.base_url.format(query=self.get_query(), page=max_page_temp)
            r = self.requester.get(url)

            if r is None:
                return 0

            content = r.text

            if self.was_blocked(content):
                logger.error("Ask blocked the request")
                return 0

            elif self.was_not_found(content):
                logger.info(
                    "Ask Plug-in: max_page down to %d for domain: %s" % (max_page_temp, self.base_domain.domain_name))
                max_page_temp -= 5

            else:
                soup = BeautifulSoup(content, "html5lib")
                search_pages = soup.find_all("a", attrs={"ul-attr-accn": "pagination"})
                list_no_page = []
                for tag in search_pages:
                    try:
                        no_page = int(tag.string)
                        list_no_page.append(no_page)
                    except:
                        continue
                if not list_no_page:
                    logger.debug(soup)
                    return 1
                else:
                    return max(list_no_page)
        return 0