def googleCrawl(website): search = ("site:" + str(removeHTTP(website))) webs = removeHTTP(website) for loop in range(0,10): url = "https://google.com/search?q=" + str(search) + "&ie=utf-8&oe=utf-8&aq=t&start=" + str(loop) + "0" request = requests.get(url, headers=_headers) content = request.text.encode('UTF-8') soup = BeautifulSoup(content, 'lxml') sub_links = soup.find_all('div', class_='r') for links in sub_links: links = links.a['href'] if str(webs) in links: write(var="~", color=c, data=links)
def manualCrawl(website): website = addHTTP(website) webs = removeHTTP(website) request = Request(website, _timeout=5, _encode=True) soup = BeautifulSoup(request, 'lxml') ### Links are in ['a', 'link', 'img', 'svg', 'iframe', 'embed', 'audio'] _links = [] a = soup.find_all("a") for links in a: _links.append(links['href']) link = soup.find_all("link") for links in a: _links.append(links['href']) img = soup.find_all("img") for links in img: _links.append(links['src']) iframe = soup.find_all("iframe") for links in iframe: _links.append(links['src']) embed = soup.find_all("embed") for links in embed: _links.append(links['src']) _links = set(_links) for __links in _links: if str(webs) in __links: write(var="~", color=c, data=__links)
def reverseViaYGS(website): website = addHTTP(website); webs = removeHTTP(website) url = "https://domains.yougetsignal.com/domains.php" post = { 'remoteAddress' : webs, 'key' : '' } request = requests.post(url, headers=_headers, data=post) request = request.text.encode('UTF-8') grab = json.loads(request) # print(json.dumps(grab, indent=4)) Status = grab['status'] IP = grab['remoteIpAddress'] Domain = grab['remoteAddress'] Total_Domains = grab['domainCount'] Array = grab['domainArray'] if (Status == 'Fail'): write(var="#", color=r, data="Sorry! Reverse Ip Limit Reached.") else: write(var="$", color=c, data="IP: " + IP + "") write(var="$", color=c, data="Domain: " + Domain + "") write(var="$", color=c, data="Total Domains: " + Total_Domains + "\n") domains = [] for x, y in Array: domains.append(x) for res in domains: write(var="#", color=b, data=res)
def dnsdump(website): website = removeHTTP(website) print(website) #str w=str(website) res = DNSDumpsterAPI(False).search(website) if not res: print('\n DNS Records') return print('\n DNS Records') for entry in res.get('dns_records', {}).get('dns', []): print('{domain} ({ip}) {as} {provider} {country}'.format(**entry)) for entry in res.get('dns_records', {}).get('mx', []): print('\n MX Records') print('{domain} ({ip}) {as} {provider} {country}'.format(**entry)) print('\n\033[1;32m[+]\033[1;m Host Records (A)') for entry in res.get('dns_records', {}).get('host', []): if entry.get('reverse_dns', None): print('{domain} ({reverse_dns}) ({ip}) {as} {provider} {country}'. format(**entry)) else: print('{domain} ({ip}) {as} {provider} {country}'.format(**entry)) print('\n TXT Records') for entry in res.get('dns_records', {}).get('txt', []): print(entry) print('\n DNS Map: https://dnsdumpster.com/static/map/' + website + '.png\n')
def bingCrawl(website): search = ("site:" + str(removeHTTP(website))) webs = removeHTTP(website) link = [] _link = [] _links = [] for loop in range(0, 50): url = "http://www.bing.com/search?q=" + str(search) + "&first=" + str( loop) + "0" request = requests.get(url, headers=_headers, timeout=5) content = request.text.encode('UTF-8') links = re.findall(r'<a\shref="(.*?)"\sh="(.*?)">', content)[5] link.append(links[0]) _link = set(link) for links in _link: if str(webs) in links: write(var="~", color=g, data=links)
def browserspyRep(website): url = "http://browserspy.dk/webserver.php" _data = { 'server': removeHTTP(website) } request = requests.post(url, headers=_headers, data=_data).text.encode('UTF-8') _data = re.findall(r'<tr class="(.*)">\n<td class="property">(.*)</td>\n<td class="value">(.*)</td>\n</tr>', request) for res in _data: result = res[1].capitalize() + ": " + res[2] write(var="#", color=c, data=result)
def bingCrawl(website): search = ("site:" + str(removeHTTP(website))) webs = removeHTTP(website) link = [] for loop in range(0,50): url = "http://www.bing.com/search?q=" + str(search) + "&first=" + str(loop) + "0" try: request = requests.get(url, headers=_headers) content = request.text.encode('UTF-8') # print(content) links = re.findall(r'<a\shref="(.*?)"\sh="(.*?)">', content)[5] # print(links[0]) link.append(links[0]) except requests.exceptions.ConnectionError as e: pass _link = set(link) for links in _link: if str(webs) in links: write(var="~", color=g, data=links)
def whoIS(website): website = removeHTTP(website) url = f"https://www.whois.com/whois/{website}" try: request = Request(url, _timeout=5, _encode=None) bs = BeautifulSoup(request, 'html.parser') result = bs.find_all('pre', {'class': 'df-raw'})[0].text.encode('UTF-8') print(f"\r{c}{result.decode()}") except: write(var="!", color=r, data="Sorry, whois cannot be performed right now...!!! :[")
def reverseViaHT(website): website = addHTTP(website); webs = removeHTTP(website) url = "http://api.hackertarget.com/reverseiplookup/?q=" combo = "{url}{website}".format(url=url, website=webs) request = Request(combo, _timeout=5, _encode=True) if len(request) != 5: list = request.strip("").split("\n") for _links in list: if len(_links) != 0: write(var="#", color=c, data=_links) else: write(var="@", color=r, data="Sorry, The webserver of the website you entered have no domains other then the one you gave :')")
def findSubdomains(website): website = removeHTTP(website) print("{}{:<62}| {:<50}".format(c, "URL", "STATUS")) for _sub in subdomains: if len(_sub) != 0: combo = _sub + "." + website; combo = addHTTP(combo) try: resp = requests.get(combo, timeout=5, headers=_headers).status_code if resp != 404: print("{}{:<62}| {:<50}".format(g, combo, resp)) except Exception: print("{}{:<62}| {:<50}".format(r, combo, "404"))
def bingCrawl(website): search = f"site:{str(removeHTTP(website))}" webs = removeHTTP(website) link = [] for loop in range(0, 10): url = f"http://www.bing.com/search?q={str(search)}&first={str(loop)}0" try: request = requests.get(url, headers=_headers) links = re.findall(r'<a\shref="(.*?)"\sh="(.*?)">', request.text) for _links in links: link.append(_links[0]) except requests.exceptions.ConnectionError as e: pass _link = set(link) for links in _link: if str(webs) in links: write(var="~", color=g, data=links)
def nameServers(website): website = removeHTTP(website) res = Nameservers(website, 'NS') for nameservers in res: write(var="#", color=c, data=nameservers)