Exemplo n.º 1
0
    def Site_lookup(self, url):
        req_url = url
        directories = [
            'xmlrpc.php', 'admin', 'Admin', 'robots.txt', 'phpmyadmin',
            'robots', 'sitemap.xml', 'Sitemap', 'sitemap'
        ]

        print(
            f"{green} [### {url} ###] {reset}                                     "
        )

        # with ThreadPoolExecutor(max_workers=10) as executor:
        #     executor.map(self.hit,directories)
        for directory in directories:

            try:
                if self.verbose:
                    print(
                        f"trying  {req_url}/{directory}                                                          ",
                        end="\r",
                        flush=True)

                r = requester(f"{req_url}/{directory}", time=self.timeout)

                if r.status_code in self.hide_code: pass
                elif r.status_code < 400:
                    print(
                        f"{red}SENSITIVE FILES RESPONSE CODE[{r.status_code}]{reset}----->{yellow} {req_url}/{directory}{reset}"
                    )

            except:
                pass
Exemplo n.º 2
0
    def hit(self, directory):

        r = requester(f"{self.url}/{directory}")
        if r.status_code < 400:
            print(
                f"{blue}SENSITIVE FILES RESPONSE CODE[{r.status_code}]{reset}----->{yellow} {self.url}/{directory}{reset}"
            )
Exemplo n.º 3
0
def Find_redirect(url):
    
    flag = 'bing'
    print(f"{yellow}[+] CHECKING FOR  OPENREDIRECTION  -----> {reset}{url}                                      ",end='\r',flush=True)
    
    r = requester(url)
    if r.status_code == 200:
        if flag in r.text:
            print(f"{red}[-]OPEN REDIRECTION MAY POSSIBLE -->", url)
Exemplo n.º 4
0
def get_all_url_waybackmachine(domain, path):

    waybackurl = 'http://web.archive.org/cdx/search/cdx?url=*.%s/*&output=text&fl=original&collapse=urlkey' % domain
    r = requester.requester(waybackurl, time=10)

    urls = r.text

    with open(path, 'a') as f:
        for url in urls:
            f.write(url)

    print(f"WAYBACK URLS PRINTED TO ------> {path}")
Exemplo n.º 5
0
    def dirbrute(self):

        try:
            with open(self.subdomain_path, 'r') as domains:
                for domain in domains:
                    domain = domain.strip()
                    r = requester(f"https://{domain}", time=self.timeout)

                    if r.status_code in self.hide_code: pass
                    elif r.status_code < 400:
                        print(f"BRUTEFORCING DIRECTORIES IN {domain}")
                        Brute(domain, "Bounty", self.wordlist).dir_brute()

        except:
            pass
    def brute(self, name):

        try:

            req_url = self.url + name
            r = requester(req_url, time=10)

            print(
                f"trying {req_url}                                                          ",
                end="\r",
                flush=True)
            if (r.status_code < 400):
                self.log.info(f"{req_url} -----> {r.status_code}")
        except:
            pass
Exemplo n.º 7
0
def link_scrapper(url, domain_name, link_tag, attribute, fd):
    try:
        r = requester.requester(url)
        soup = BeautifulSoup(r.content, "html.parser")

        for a_tags in soup.findAll(link_tag):
            href = a_tags.attrs.get(attribute)
            href = urljoin(url, href)

            if domain_name not in href or href in internal_urls:
                continue
            fd.write(href + '\n')
            if not href.endswith(tuple(junk_extensions)):
                internal_urls.append(href)

    except:
        pass
Exemplo n.º 8
0
    def scrap(self, path_scrapped_wordlist):

        r = requester(self.url, time=self.timeout)

        soup = soupObject(r.text)

        #finding the title
        title = soup.find('title').text
        print(
            f"{green}[+]TITLE OF THE WEBSITE{reset} ===> {yellow}{title}{reset}"
        )
        words_in_page = ''.join(soup.stripped_strings)

        #parsing the header for useful info
        self.hederParser(r)

        black_list = [
            '/', '*', '&', '-', '=', '+', '%', '|', '(', ')', '[', ']', '{',
            '}', ',', ';', ':'
        ]

        #Scrap the main website to generate wordlist

        for b in black_list:
            words_in_page = words_in_page.replace(b, " ")

        words = list(filter(lambda x: x != "", words_in_page.split()))

        with open(path_scrapped_wordlist, 'a') as f:
            for word in words:
                f.write(word + '\n')

        print(
            f"{green}[+]SCRAPPED ALL WORDS AND DETAILS FROM THE MAIN WEBPAGE PRINTED {reset}====> {yellow}{path_scrapped_wordlist}{reset}"
        )
        print(
            f"{green}[+]TOTAL GENERATED WORDS FROM THE MAIN WEBPAGE{reset} ===>{yellow} {len(words)}{reset}"
        )