def engine_url(self): try: url = f"https://crt.sh/?q={self.target}&output=json" response = requests.get(url, headers=tuga_useragents.useragent()) return response except requests.exceptions.Timeout: pass
def engine_url(self): try: url = f'https://www.virustotal.com/ui/domains/{self.target}/subdomains?limit=40' response = requests.get(url, headers=tuga_useragents.useragent()) except requests.exceptions.Timeout: pass return response
def enumerate(self, url, output, target): subdomains = set() subdomainscount = 0 start_time = time.time() try: response = requests.get(url, headers=tuga_useragents.useragent()) while subdomainscount < 500: subdomains = response.json()["subdomains"][subdomainscount] subdomainscount = subdomainscount + 1 print(f"[*] {subdomains}") if self.output is not None: write_file(subdomains, self.engine + '_' + self.output, target) if self.output: print(f"\nSaving result... {self.engine + '_' + self.output}") except IndexError: pass print(G + f"\n[**] TugaRecon is complete. Threat Crowd: {subdomainscount} subdomains have been found in %s seconds" % ( time.time() - start_time) + W) if not subdomains: print(f"[x] No data found for {self.target} using Threat Crowd.")
def enumerate(self, url, output, target): subdomains = set() subdomainscount = 0 start_time = time.time() try: response = requests.get(url, headers=tuga_useragents.useragent()) while subdomainscount < 10000: # Remove "," an IP from list remove_ip = response.text.replace(",", " ") subdomains = remove_ip.split() subdomainscount = subdomainscount + 2 print(f"[*] {subdomains[subdomainscount]}") # Write to a file if self.output is not None: write_file(subdomains[subdomainscount], self.engine + '_' + self.output, target) if self.output: print(f"\nSaving result... {self.engine + '_' + self.output}") except IndexError: pass print( G + f"\n[**] TugaRecon is complete. HackerTarget: {int((subdomainscount/2)-1)} subdomains have been found in %s seconds" % (time.time() - start_time) + W) if not subdomains: print(f"[x] No data found for {self.target} using HackerTarget.")
def engine_url(self): try: url = f'https://api.certspotter.com/v1/issuances?domain={self.target}&include_subdomains=true&expand=dns_names' response = requests.get(url, headers=tuga_useragents.useragent()) except requests.exceptions.Timeout: pass return response
def enumerate(self, url, output, target): subdomains = set() subdomainscount = 0 start_time = time.time() try: response = requests.get(url, headers=tuga_useragents.useragent()) while subdomainscount < 10000: subdomains = response.json()[subdomainscount]["name_value"] subdomainscount = subdomainscount + 1 if "@" in subdomains: # filter for emails pass else: print(f"[*] {subdomains}") if self.output is not None: write_file(subdomains, self.engine + '_' + self.output, target) if self.output: print(f"\nSaving result... {self.engine + '_'+ self.output}") except IndexError: pass print( G + f"\n[**] TugaRecon is complete. SSL Certificates: {subdomainscount} subdomains have been found in %s seconds" % (time.time() - start_time) + W) if not subdomains: print( f"[x] Oops! No data found for {self.target} using SSL Certificates." )
def engine_url(self): try: url = f'https://threatcrowd.org/searchApi/v2/domain/report/?domain={self.target}' response = requests.get(url, headers=tuga_useragents.useragent()) except requests.exceptions.Timeout: pass return response
def engine_url(self): try: url = f"https://crt.sh/?q={self.target}&output=json" response = requests.get(url, headers=tuga_useragents.useragent()) return response except requests.ConnectionError: print(G + f"[SSL] Warning! Unable to get subdomains... Try again!\n" + W) response = 1 return response
def engine_url(self): try: url = f'https://ctsearch.entrust.com/api/v1/certificates?fields=subjectDN&domain={self.target}&includeExpired=true&exactMatch=false&limit=5000' response = requests.get(url, headers=tuga_useragents.useragent(), verify=False) except requests.exceptions.Timeout: pass return response
def engine_url(self): try: url = f'https://api.certspotter.com/v1/issuances?domain={self.target}&include_subdomains=true&expand=dns_names' response = requests.get(url, headers=tuga_useragents.useragent()) except (requests.ConnectionError, requests.Timeout) as exception: print( G + f"[CertSpotter] Warning! Unable to get subdomains... Try again!\n" + W) response = 1 return response
def engine_url(self): try: url = f'https://threatcrowd.org/searchApi/v2/domain/report/?domain={self.target}' response = requests.get(url, headers=tuga_useragents.useragent()) return response except requests.ConnectionError: print( G + f"[Threat Crowd] Warning! Unable to get subdomains... Try again!\n" + W) response = 1 return response
def engine_url(self): url = f"https://api.hackertarget.com/hostsearch/?q={self.target}" response = requests.get(url, headers=tuga_useragents.useragent()) return response
def google(self, target, output): # declaring list g_clean to store the fetched urls g_clean = [] page_number = 20 while page_number < 90: # exception handling code to make sure we don't run into errors try: gurl = self.get_list(target, page_number) #print(gurl) # fetching the response using get method in requests html = requests.get(gurl, headers=tuga_useragents.useragent()) time.sleep(5) if page_number > 50: time.sleep(10) page_number = page_number + 10 # checking the response status to be success if html.status_code == 429: print(G + "Google systems have detected unusual traffic ", html ) print("CAPTCHA detected!!! Maybe try form another IP, or wait..." +W) sys.exit(1) if html.status_code == 200: # parsing the fetched html in the response using lxml parser in beautiful soup soup = BeautifulSoup(html.text, 'lxml') # finding all the 'a' tags, links, in the parsed html a = soup.find_all('a') # looping through the all found a tags for processing for i in a: # extracting the href attribute for the link to the search results k = i.get('href') # exception handling code to prevent running into erros try: # search for the pattern of a url to prevent unneccessary attributes in the result using re module m = re.search("(?P<url>https?://[^\s]+)", k) # fetching only the url part in the array n = m.group(0) # splitting the url up to the parameters part to get only the necessary url rul = n.split('&')[0] # print(rul) # parsing the url to divide it into components using urlparse result = urlparse(rul) # print(result) domain = '{uri.scheme}://{uri.netloc}/'.format(uri=result) # checking if the fetched url not belongs to target if true skip the url # print("teste============== ", domain) if not (re.search(f"{self.target}", domain)): continue # else add it to the result list else: # print("teste==============> ", domain) url_a = re.compile(r"https?://(www\.)?") url1 = url_a.sub('', domain).strip().strip('/') # print("==============> ", url1) if url1 not in g_clean: # print("teste: s ", url1) g_clean.append(url1) else: continue except: continue finally: self.get_url(g_clean, page_number) if self.output: print(f"\nSaving result... {self.engine + '_' + self.output}") print(G + "\n[**] TugaRecon is complete." + W) print(G + "Please wait some time... before doing a new search with this module\n" + W)