コード例 #1
0
ファイル: crawler.py プロジェクト: codeswhite/Domainator
    def crawl(self, page):
        pr('Crawling page: ' + fc + page + fx)

        url = self.sb.pack_url(path=page)
        res = REQ_S.get(url)
        if res.status_code == 404:
            i = f'page: "{page}" is 404'
            logger.info(i)
            pr(i, '!')
            return
        elif res.status_code != 200:
            i = f'page returned code "{res.status_code}" <=> "{page}" '
            logger.info(i)
            pr(i, '!')
            return

        il = xl = 0
        soup = BeautifulSoup(res.content, 'html.parser')
        # Links are in ['a', 'link', 'img', 'svg', 'iframe', 'embed', 'audio']
        for k, v in {
                'a': 'href',
                'link': 'href',
                'iframe': 'src',
                'embed': 'src'
        }.items():
            for l in soup.find_all(k):
                try:
                    x = l[v].lower()
                except KeyError:
                    i = f'"{page}" KeyError: No link found in "{k}" element'
                    logger.info(i)
                    pr(i, '!')
                    continue
                if x.startswith('#'):
                    continue
                if x.endswith('.ico'):
                    continue

                if x.startswith('/'):
                    x = url + x

                if re.match(r'[^@]+@[^@]+\.[^@]+', x):  # Email
                    if x not in self.emails:
                        pr('Found new email: ' + fc + x + fx)
                        self.emails.add(x)
                    continue

                ux = urlsplit(x)
                if self.sb.domain not in ux.netloc:
                    self.external_res.add(x)
                    xl += 1
                    continue
                final = ux.path.replace('//', '/')  # replacing as a workaround

                if final not in self.known_paths:
                    self.known_paths.add(final)
                    il += 1
コード例 #2
0
ファイル: crawler.py プロジェクト: codeswhite/Domainator
 def show_menu(self):
     while 1:
         c = choose([
             'Emails', 'External Resources', 'Known Paths', 'Crawled Paths'
         ])
         if c < 0:
             break
         li = (self.emails, self.external_res, self.known_paths,
               self.crawled_paths)[c]
         if not li:
             pr('Nothing to show yet!', '!')
         for i in li:
             pr(i, '#')
コード例 #3
0
ファイル: crawler.py プロジェクト: codeswhite/Domainator
    def google(self):
        for loop in range(0, int(ask('How many pages?'))):
            url = f"https://google.com/search?q=site:{self.sb.domain}&ie=utf-8&oe=utf-8&aq=t&start={str(loop)}0"
            res = REQ_S.get(url)
            if res.status_code != 200:
                pr('Bad status code: %d' % res.status_code, '!')
                return
            c = 0
            soup = BeautifulSoup(res.content, 'html.parser')
            for l in soup.find_all('cite'):
                if not l.span:
                    continue
                # print(l)
                ls = urlsplit('http://' + l.span.decode_contents())
                if self.sb.domain not in ls.netloc:
                    pr('Wrong domain found: ' + fy + ls.path + fx, '!')
                    continue

                pts = ls.netloc.split('.')
                if len(pts) > 2 and ls.netloc not in self.sb.known_subdomains:
                    pr('Found new subdomain: ' + fc + ls.netloc + fx)
                    continue

                if ls.path not in self.known_paths:
                    self.known_paths.add(ls.path)
                    c += 1
            pr(f'Added {c} new paths')
コード例 #4
0
 def whois(self):
     url = 'https://www.whois.com/whois/' + self.domain
     try:
         res = REQ_S.get(url)
         if res.status_code != 200:
             pr('Bad status code: %d' % res.status_code, '!')
             return
         bs = bs4.BeautifulSoup(res.content, 'html.parser')
         result = bs.find_all('pre',
                              {'class': 'df-raw'})[0].decode_contents()
         print(f"\n{fc + result + fx}")
     except requests.exceptions.RequestException:
         from traceback import print_exc
         print_exc()
コード例 #5
0
    def find_subdomains(self):
        print("{}{:<62}| {:<50}{}".format(fc, "URL", "STATUS", fx))
        with open('./src/subdomains') as f:
            for sub in f:
                if sub == self.subdomain:
                    continue

                sub = sub.strip()
                url = self.pack_url(subdomain=sub)
                try:
                    res = REQ_S.get(url)
                    if res.status_code != 404:
                        print("{}{:<62}| {:<50}{}".format(
                            fg, url, res.status_code, fx))
                except KeyboardInterrupt:
                    pr('Scan stopped!', '!')
                    break
                except:
                    print("{}{:<62}| {:<50}{}".format(fr, url, 'ERROR', fx))
コード例 #6
0
    def reverse_HT(self):
        url = "http://api.hackertarget.com/reverseiplookup/?q=" + self.domain
        res = REQ_S.get(url)
        if res.status_code != 200:
            pr('Bad status code: %d' % res.status_code, '!')
            return
        lst = res.text.strip().split("\n")

        reverse_dir = './reverseip'
        if not os.path.isdir(reverse_dir):
            os.mkdir(reverse_dir)

        fn = os.path.join(reverse_dir, f'ht-{self.domain}')
        with open(fn, 'w') as f:
            for l in lst:
                if l:
                    f.write(l.strip() + '\n')
                    # pr(l, '#')
        print()
        pr(f'Dumped {len(lst)} entries to "{fn}"\n')
コード例 #7
0
    def find_panels(self):
        pr("Searching for admin panels", '#')
        pth_lst = ('admin/', 'site/admin', 'admin.php/', 'up/admin/',
                   'central/admin/', 'whm/admin/', 'whmcs/admin/',
                   'support/admin/', 'upload/admin/', 'video/admin/',
                   'shop/admin/', 'shoping/admin/', 'wp-admin/',
                   'wp/wp-admin/', 'blog/wp-admin/', 'admincp/',
                   'admincp.php/', 'vb/admincp/', 'forum/admincp/',
                   'up/admincp/', 'administrator/', 'administrator.php/',
                   'joomla/administrator/', 'jm/administrator/',
                   'site/administrator/', 'install/', 'vb/install/', 'dimcp/',
                   'clientes/', 'admin_cp/', 'login/', 'login.php',
                   'site/login', 'site/login.php', 'up/login/', 'up/login.php',
                   'cp.php', 'up/cp', 'cp', 'master', 'adm', 'member',
                   'control', 'webmaster', 'myadmin', 'admin_cp', 'admin_site')

        try:
            for i in range(len(pth_lst)):
                pth = pth_lst[i]
                res = requests.get(self.pack_url(path=pth))
                n = '!'
                if res.status_code == 200:
                    n = '+'
                pr(
                    f'({i}/{len(pth_lst)}) "{pth}" code: {fg if res.status_code == 200 else fx}{res.status_code}{fx}',
                    n)
        except requests.exceptions.ConnectionError:
            pr("Couldn't connect!", '!')
コード例 #8
0
    def speed_check(self):
        import time

        start = time.time()
        ip = socket.gethostbyname(self.domain)
        dns_tm = time.time() - start
        _dns = "{:<10}:{:<20} seconds".format("DNS", dns_tm)
        pr(_dns, '#')

        start = time.time()
        _data = REQ_S.get(self.pack_url())
        load_tm = time.time() - start
        _load = "{:<10}:{:<20} seconds".format("Load", load_tm)
        _wo = "{:<10}:{:<20} seconds".format("W/O DNS", load_tm - dns_tm)

        pr(_load, '#')
        pr(_wo, '#')
コード例 #9
0
    def __init__(self):
        # Parse arguemnts
        if len(sys.argv) > 1:
            arg = sys.argv[1]
        else:
            arg = ask('Enter domain:')
            if not arg:
                exit()

        # Verify domain integrity
        if '://' in arg:
            parsed = urlsplit(arg)
        else:
            parsed = urlsplit('http://' + arg)
        if '.' not in parsed.netloc:
            pr('Invalid domain!', '!')
            exit()

        # Verify subdomain
        self.subdomain = self.base_domain = None
        pts = parsed.netloc.split('.')
        if len(pts) > 2:
            pr('Is this the subdomain you wish to use:? ' + pts[0])
            if pause('agree', cancel=True):  # subdomain
                self.subdomain = pts[0]
                self.base_domain = '.'.join(pts[1:])
        if not self.subdomain:
            self.subdomain = 'www'
        if not self.base_domain:
            self.base_domain = parsed.netloc

        self.domain = parsed.netloc
        self.scheme = parsed.scheme if parsed.scheme else 'http'
        print()
        pr('Using domain: ' + fc + self.domain + fx)

        self.crawler = Crawler(self, parsed.path)
コード例 #10
0
ファイル: crawler.py プロジェクト: codeswhite/Domainator
    def menu(self):
        while 1:
            pr(f'Emails: {fc}{len(self.emails)}')
            pr(f'External resources: {fc}{len(self.external_res)}')
            pr(f'Internal known paths: {fc}{len(self.known_paths)}')
            pr(f'Crawled paths: {fc}{len(self.crawled_paths)}')
            c = choose(['Show', 'Google Crawl', 'BS Crawl'],
                       'Choose crawling engine')
            if c < 0:
                break
            if c == 0:
                self.show_menu()
            elif c == 1:
                try:
                    self.google()
                except KeyboardInterrupt:
                    pr('Stopped!', '!')
            elif c == 2:
                try:
                    while 1:
                        avail = list(self.known_paths - self.crawled_paths)
                        if not avail:
                            pr('No crawlable pages!', '!')
                            break
                        page = choice(avail)
                        self.crawl(page)
                        self.crawled_paths.add(page)

                except KeyboardInterrupt:
                    pr('Crawling stopped', '!')
コード例 #11
0
    def banners_cloud_flare(self):
        pr('Retrieving headers', '#')
        domain = self.pack_url()
        res = REQ_S.get(domain)
        if res.status_code != 200:
            pr('Bad status code: %d' % res.status_code, '!')
            return

        pr('Headers:')
        for h in res.headers.items():
            x = f'{h[0]} => {h[1]}'
            pr(x, '#')

        pr('Checking for CloudFlare in headers', '#')
        if "cloudflare" not in res.text:
            pr(self.domain + " is not using Cloudflare!")
            return

        if not pause('Attempt to bypass?', cancel=True):
            return
        pr("CloudFlare found, attempting to bypass..")

        # TODO TEST

        url = "http://www.crimeflare.biz/cgi-bin/cfsearch.cgi"
        res = REQ_S.get(url, data={'cfS': self.domain})
        reg = re.findall(r'\d+\.\d+\.\d+\.\d+', res.text)
        if reg:
            real_ip = reg[1]
        else:
            pr("CloudFlare wasn't bypassed, No real IP found", '!')
            return
        res = REQ_S.get(f"http://{real_ip}")
        if "cloudflare" not in res.text.lower():
            if real_ip:
                pr("Cloudflare Bypassed!", '#')
                pr('===============================')
                pr("Real IP ==> " + fc + real_ip + fx)
                pr('===============================')
            return
        pr("Cloudflare wasn't bypassed, Real IP blocked by CloudFlare", '!')
コード例 #12
0
    def reverse_YGS(self):  # TODO record to file
        url = "https://domains.yougetsignal.com/domains.php"
        data = {'remoteAddress': self.domain, 'key': ''}
        res = REQ_S.get(url, params=data)
        if res.status_code != 200:
            pr('Bad status code: %d' % res.status_code, '!')
            # return

        grab = res.json()
        if 'fail' in grab['status'].lower():
            pr("Message:", '#')
            print(grab['message'])
            return

        pr("Results from: " + grab['lastScrape'], '#')
        pr("IP: " + grab['remoteIpAddress'], '#')
        pr("Domain: " + grab['remoteAddress'], '#')
        pr(f"Total Domains: {grab['domainCount']}\n", '#')
        for x, _ in grab['domainArray']:
            pr(x, '#')
コード例 #13
0
ファイル: main.py プロジェクト: codeswhite/Domainator
            break
        if cc == 0:
            dom.banners_cloud_flare()
        elif cc == 1:
            dom.speed_check()
        elif cc == 2:
            dom.find_subdomains()
        elif cc == 3:
            dom.crawler.menu()
        elif cc == 4:
            dom.find_panels()


def main_menu(dom: Domainator):
    while 1:
        c = choose(['Passive', 'Active'], 'Choose category:')
        if c < 0:
            break
        if c == 0:
            passive_menu(dom)
        if c == 1:
            active_menu(dom)


if __name__ == '__main__':
    logging.info('Starting')
    try:
        main_menu(Domainator())
    except KeyboardInterrupt:
        pr('Interrupted!', '!')