def recv_info(self, info): # Make sure it's a CNAME record. # This is required because a plugin can't ask for a specific DNS # register type - all types are received together. if info.type != "CNAME": return # Get the root domain. root = info.target Logger.log_verbose( "Looking for poisoned domains at: *.%s" % root) # Load the malware URLs list. wordlist_filename = Config.plugin_args["wordlist"] try: wordlist = WordListLoader.get_advanced_wordlist_as_list( wordlist_filename) except WordlistNotFound: Logger.log_error_verbose( "Wordlist not found: " + wordlist_filename) return except TypeError: Logger.log_error_verbose( "Wordlist is not a file: " + wordlist_filename) return results = [] root_set = set([root]) for x in root_set.intersection(set(wordlist)): v = DNSPoisoning(x) v.add_information(info) results.append(v) # Log how many results we got. if results: Logger.log_verbose( "Discovered %s poisoned domains." % len(results)) else: Logger.log_verbose("No poisoned domains found.") # Return the results. return results
def run(self, info): # Make sure it's a CNAME record. # This is required because a plugin can't ask for a specific DNS # register type - all types are received together. if info.type != "CNAME": return # Get the root domain. root = info.target Logger.log_verbose("Looking for poisoned domains at: *.%s" % root) # Load the malware URLs list. wordlist_filename = Config.plugin_args["wordlist"] try: wordlist = WordListLoader.get_advanced_wordlist_as_list( wordlist_filename) except WordlistNotFound: Logger.log_error_verbose("Wordlist not found: " + wordlist_filename) return except TypeError: Logger.log_error_verbose("Wordlist is not a file: " + wordlist_filename) return results = [] root_set = set([root]) for x in root_set.intersection(set(wordlist)): results.append(DNSPoisoning(info, x)) # Log how many results we got. if results: Logger.log_verbose("Discovered %s poisoned domains." % len(results)) else: Logger.log_verbose("No poisoned domains found.") # Return the results. return results
def get_list_from_wordlist(wordlist): """ Load the content of the wordlist and return a set with the content. :param wordlist: wordlist name. :type wordlist: str :return: a set with the results. :rtype result_output: set """ try: m_commom_wordlists = set() for v in Config.plugin_extra_config[wordlist].itervalues(): m_commom_wordlists.update(WordListLoader.get_advanced_wordlist_as_list(v)) return m_commom_wordlists except KeyError,e: Logger.log_error_more_verbose(str(e)) return set()
def recv_info(self, info): # Make sure it's really a CNAME record. # This check should never fail anyway! if info.type != "CNAME": Logger.log_error_verbose("No CNAME found, skipped.") return # Get the root domain. root = info.target Logger.log_verbose("Looking for poisoned domains at: *.%s" % root) # Load the malware URLs list. wordlist_filename = Config.plugin_args["wordlist"] try: wordlist = WordListLoader.get_advanced_wordlist_as_list(wordlist_filename) except WordlistNotFound: Logger.log_error_verbose("Wordlist not found: " + wordlist_filename) return except TypeError: Logger.log_error_verbose("Wordlist is not a file: " + wordlist_filename) return results = [] root_set = set([root]) for x in root_set.intersection(set(wordlist)): v = DNSPoisoning(x) v.add_information(info) results.append(v) # Log how many results we got. if results: Logger.log_verbose("Discovered %s poisoned domains." % len(results)) else: Logger.log_verbose("No poisoned domains found.") # Return the results. return results
def recv_info(self, info): m_url = info.url Logger.log_more_verbose("Start to process URL: %r" % m_url) # # Get the remote web server fingerprint # m_webserver_finger = info.get_associated_informations_by_category(WebServerFingerprint.information_type) m_wordlist = set() # There is fingerprinting information? if m_webserver_finger: m_webserver_finger = m_webserver_finger.pop() m_server_canonical_name = m_webserver_finger.name_canonical m_servers_related = m_webserver_finger.related # Set with related web servers # # Load wordlists # m_wordlist_update = m_wordlist.update # Common wordlist try: w = Config.plugin_extra_config["common"] m_wordlist_update([l_w for l_w in w.itervalues()]) except KeyError: Logger.log_error("Can't load wordlists") # Wordlist of server name try: w = Config.plugin_extra_config["%s_predictables" % m_server_canonical_name] m_wordlist_update([l_w for l_w in w.itervalues()]) except KeyError: Logger.log_error("Can't load wordlists") # Wordlist of related with the server found try: for l_servers_related in m_servers_related: w = Config.plugin_extra_config["%s_predictables" % m_server_canonical_name] m_wordlist_update([l_w for l_w in w.itervalues()]) except KeyError: Logger.log_error("Can't load wordlists") else: # Common wordlists try: w = Config.plugin_extra_config["common"] m_wordlist.update([l_w for l_w in w.itervalues()]) except KeyError: Logger.log_error("Can't load wordlists") # Load content of wordlists m_urls = set() m_urls_update = m_urls.update # Fixed Url m_url_fixed = m_url if m_url.endswith("/") else "%s/" % m_url for l_w in m_wordlist: # Use a copy of wordlist to avoid modify the original source l_loaded_wordlist = WordListLoader.get_advanced_wordlist_as_list(l_w) m_urls_update((urljoin(m_url_fixed, (l_wo[1:] if l_wo.startswith("/") else l_wo)) for l_wo in l_loaded_wordlist)) # Generates the error page m_error_response = get_error_page(m_url) # Create the matching analyzer try: m_store_info = MatchingAnalyzer(m_error_response, min_ratio=0.65) except ValueError: # Thereis not information return # Create the partial funs _f = partial(process_url, severity_vectors['predictables'], get_http_method(m_url), m_store_info, self.update_status, len(m_urls)) # Process the URLs for i, l_url in enumerate(m_urls): _f((i, l_url)) # Generate and return the results. return generate_results(m_store_info.unique_texts)
def analyze_html(self, info): #---------------------------------------------------------------------- # Get malware suspicious links. Logger.log_more_verbose("Processing HTML: %s" % info.identity) # Load the malware wordlist. wordlist_filename = Config.plugin_config["malware_sites"] try: wordlist = WordListLoader.get_advanced_wordlist_as_list( wordlist_filename) except WordlistNotFound: Logger.log_error("Wordlist '%s' not found.." % wordlist_filename) return except TypeError: Logger.log_error( "Wordlist '%s' is not a file." % wordlist_filename) return if not wordlist: Logger.log_error("Wordlist '%s' is empty." % wordlist_filename) Logger.log("1") # Get links base_urls = set() for url in info.find_linked_data(Data.TYPE_RESOURCE, Resource.RESOURCE_URL): m_url = url.url base_urls.add(m_url) if info.information_type == Information.INFORMATION_HTML: m_links = extract_from_html(info.raw_data, m_url) m_links.update( extract_from_text(info.raw_data, m_url) ) elif info.information_type == Information.INFORMATION_PLAIN_TEXT: m_links = extract_from_text(info.raw_data, m_url) else: raise Exception("Internal error!") m_links.difference_update(base_urls) Logger.log("2") # If we have no links, abort now if not m_links: Logger.log_verbose("No output links found.") return # Do not follow URLs that contain certain keywords m_forbidden = WordListLoader.get_wordlist( Config.plugin_config["wordlist_no_spider"]) m_urls_allowed = { url for url in m_links if url and not any(x in url for x in m_forbidden) } Logger.log("3") # Get only output links m_output_links = [] for url in m_urls_allowed: try: if url not in Config.audit_scope: m_output_links.append(url) except Exception, e: Logger.log_error_more_verbose(format_exc())
def recv_info(self, info): # Get the root domain only. root = info.root # Skip localhost. if root == "localhost": return # Skip root domains we've already processed. if self.state.put(root, True): return # Load the subdomains wordlist. try: wordlist = WordListLoader.get_advanced_wordlist_as_list(Config.plugin_args["wordlist"]) except WordlistNotFound: Logger.log_error_verbose("Wordlist '%s' not found.." % Config.plugin_args["wordlist"]) return except TypeError: Logger.log_error_verbose("Wordlist '%s' is not a file." % Config.plugin_args["wordlist"]) return # Load the subdomains whitelist. try: whitelist = WordListLoader.get_advanced_wordlist_as_list(Config.plugin_config["wordlist"]) except WordlistNotFound: Logger.log_error_verbose("Wordlist '%s' not found.." % Config.plugin_config["wordlist"]) return except TypeError: Logger.log_error_verbose("Wordlist '%s' is not a file." % Config.plugin_config["wordlist"]) return # # Set a base line for dinamyc sub-domains # m_virtual_domains = [] for v in (generate_random_string(40) for x in xrange(3)): l_subdomain = ".".join((v, root)) records = DNS.get_a(l_subdomain, also_CNAME=True) for rec in records: if rec.type == "CNAME": m_virtual_domains.append(rec.target) # If 3 subdomains are the same, set the base domain m_base_domain = None if len(set(m_virtual_domains)) == 1: m_base_domain = m_virtual_domains[0] # Configure the progress notifier. self.progress.set_total(len(wordlist)) self.progress.min_delta = 1 # notify every 1% # For each subdomain in the wordlist... found = 0 results = [] visited = set() for prefix in wordlist: # Mark as completed before actually trying. # We can't put this at the end of the loop where it belongs, # because the "continue" statements would skip over this too. self.progress.add_completed() # Build the domain name. name = ".".join((prefix, root)) # Skip if out of scope. if name not in Config.audit_scope: continue # Resolve the subdomain. records = DNS.get_a(name, also_CNAME=True) records.extend( DNS.get_aaaa(name, also_CNAME=True) ) # If no DNS records were found, skip. if not records: continue # If CNAME is the base domain, skip chk = [True for x in records if x.type == "CNAME" and x.target == m_base_domain] if len(chk) > 0 and all(chk): continue # We found a subdomain! found += 1 Logger.log_more_verbose( "Subdomain found: %s" % name) # Create the Domain object for the subdomain. domain = Domain(name) results.append(domain) # # Check for Domain disclosure # if prefix not in whitelist: d = DomainDisclosure(name, risk = 0, level = "low", title = "Possible subdomain leak", description = "A subdomain was discovered which may be an unwanted information disclosure." ) d.add_resource(domain) results.append(d) # For each DNs record, grab the address or name. # Skip duplicated records. for rec in records: if rec.type == "CNAME": location = rec.target elif rec.type in ("A", "AAAA"): location = rec.address else: # should not happen... results.append(rec) domain.add_information(rec) continue if location not in visited: visited.add(location) results.append(rec) domain.add_information(rec) # Log the results. if found: Logger.log( "Found %d subdomains for root domain: %s" % (found, root)) else: Logger.log_verbose( "No subdomains found for root domain: %s" % root) # Return the results. return results
def recv_info(self, info): # Get the root domain only. root = info.root # Skip localhost. if root == "localhost": return # Skip root domains we've already processed. if self.state.put(root, True): return # Load the subdomains wordlist. try: wordlist = WordListLoader.get_advanced_wordlist_as_list(Config.plugin_args["wordlist"]) except WordlistNotFound: Logger.log_error_verbose("Wordlist '%s' not found.." % Config.plugin_args["wordlist"]) return except TypeError: Logger.log_error_verbose("Wordlist '%s' is not a file." % Config.plugin_args["wordlist"]) return # Configure the progress notifier. self.progress.set_total(len(wordlist)) self.progress.min_delta = 1 # notify every 1% # For each subdomain in the wordlist... found = 0 results = [] visited = set() for prefix in wordlist: # Mark as completed before actually trying. # We can't put this at the end of the loop where it belongs, # because the "continue" statements would skip over this too. self.progress.add_completed() # Build the domain name. name = ".".join((prefix, root)) # Skip if out of scope. if name not in Config.audit_scope: continue # Resolve the subdomain. records = DNS.get_a(name, also_CNAME=True) records.extend( DNS.get_aaaa(name, also_CNAME=True) ) # If no DNS records were found, skip. if not records: continue # We found a subdomain! found += 1 Logger.log_more_verbose( "Subdomain found: %s" % name) # Create the Domain object for the subdomain. domain = Domain(name) results.append(domain) # For each DNs record, grab the address or name. # Skip duplicated records. for rec in records: if rec.type == "CNAME": location = rec.target elif rec.type in ("A", "AAAA"): location = rec.address else: # should not happen... results.append(rec) domain.add_information(rec) continue if location not in visited: visited.add(location) results.append(rec) domain.add_information(rec) # Log the results. if found: Logger.log( "Found %d subdomains for root domain: %s" % (found, root)) else: Logger.log_verbose( "No subdomains found for root domain: %s" % root) # Return the results. return results
def recv_info(self, info): m_url = info.url Logger.log_more_verbose("Start to process URL: %r" % m_url) # # Get the remote web server fingerprint # m_webserver_finger = info.get_associated_informations_by_category(WebServerFingerprint.information_type) m_wordlist = set() # There is fingerprinting information? if m_webserver_finger: m_webserver_finger = m_webserver_finger.pop() m_server_canonical_name = m_webserver_finger.name_canonical m_servers_related = m_webserver_finger.related # Set with related web servers # # Load wordlists # m_wordlist_update = m_wordlist.update # Common wordlist try: w = Config.plugin_extra_config["common"] m_wordlist_update([l_w for l_w in w.itervalues()]) except KeyError: pass # Wordlist of server name try: w = Config.plugin_extra_config["%s_predictables" % m_server_canonical_name] m_wordlist_update([l_w for l_w in w.itervalues()]) except KeyError: pass # Wordlist of related with the server found try: for l_servers_related in m_servers_related: w = Config.plugin_extra_config["%s_predictables" % m_server_canonical_name] m_wordlist_update([l_w for l_w in w.itervalues()]) except KeyError: pass else: # Common wordlists try: w = Config.plugin_extra_config["common"] m_wordlist.update([l_w for l_w in w.itervalues()]) except KeyError: pass # Load content of wordlists m_urls = set() m_urls_update = m_urls.update # Fixed Url m_url_fixed = m_url if m_url.endswith("/") else "%s/" % m_url for l_w in m_wordlist: # Use a copy of wordlist to avoid modify the original source l_loaded_wordlist = WordListLoader.get_advanced_wordlist_as_list(l_w) m_urls_update((urljoin(m_url_fixed, (l_wo[1:] if l_wo.startswith("/") else l_wo)) for l_wo in l_loaded_wordlist)) # Generates the error page m_error_response = get_error_page(m_url) # Create the matching analyzer try: m_store_info = MatchingAnalyzer(m_error_response, min_ratio=0.65) except ValueError: # Thereis not information return # Create the partial funs _f = partial(process_url, severity_vectors['predictables'], get_http_method(m_url), m_store_info, self.update_status, len(m_urls)) # Process the URLs for i, l_url in enumerate(m_urls): _f((i, l_url)) # Generate and return the results. return generate_results(m_store_info.unique_texts)
def recv_info(self, info): m_domain = info.root # Skips localhost if m_domain == "localhost": return m_return = None # Checks if the hostname has been already processed if not self.state.check(m_domain): # # Looking for # m_subdomains = WordListLoader.get_advanced_wordlist_as_list("subs_small.txt") # Run in parallel self.base_domain = m_domain self.completed = Counter(0) self.total = len(m_subdomains) r = pmap(self.get_subdomains_bruteforcer, m_subdomains, pool_size=10) # # Remove repeated # # The results m_domains = set() m_domains_add = m_domains.add m_domains_already = [] m_domains_already_append = m_domains_already.append m_ips = set() m_ips_add = m_ips.add m_ips_already = [] m_ips_already_append = m_ips_already.append if r: for doms in r: for dom in doms: # Domains if dom.type == "CNAME": if not dom.target in m_domains_already: m_domains_already_append(dom.target) if dom.target in Config.audit_scope: m_domains_add(dom) else: discard_data(dom) # IPs if dom.type == "A": if dom.address not in m_ips_already: m_ips_already_append(dom.address) m_ips_add(dom) # Unify m_domains.update(m_ips) m_return = m_domains # Add the information to the host map(info.add_information, m_return) # Set the domain as processed self.state.set(m_domain, True) Logger.log_verbose("DNS analyzer plugin found %d subdomains" % len(m_return)) # Write the info as more user friendly if Logger.MORE_VERBOSE: m_tmp = [] m_tmp_append = m_tmp.append for x in m_return: if getattr(x, "address", False): m_tmp_append("%s (%s)" % (getattr(x, "address"), str(x))) elif getattr(x, "target", False): m_tmp_append("%s (%s)" % (getattr(x, "target"), str(x))) else: m_tmp_append(str(x)) Logger.log_more_verbose("Subdomains found: \n\t+ %s" % "\n\t+ ".join(m_tmp)) return m_return
def analyze_html(self, info): #---------------------------------------------------------------------- # Get malware suspicious links. Logger.log_more_verbose("Processing HTML: %s" % info.identity) # Load the malware wordlist. wordlist_filename = Config.plugin_config["malware_sites"] try: wordlist = WordListLoader.get_advanced_wordlist_as_list( wordlist_filename) except WordlistNotFound: Logger.log_error("Wordlist '%s' not found.." % wordlist_filename) return except TypeError: Logger.log_error("Wordlist '%s' is not a file." % wordlist_filename) return if not wordlist: Logger.log_error("Wordlist '%s' is empty." % wordlist_filename) Logger.log("1") # Get links base_urls = set() for url in info.find_linked_data(Data.TYPE_RESOURCE, Resource.RESOURCE_URL): m_url = url.url base_urls.add(m_url) if info.information_type == Information.INFORMATION_HTML: m_links = extract_from_html(info.raw_data, m_url) m_links.update(extract_from_text(info.raw_data, m_url)) elif info.information_type == Information.INFORMATION_PLAIN_TEXT: m_links = extract_from_text(info.raw_data, m_url) else: raise Exception("Internal error!") m_links.difference_update(base_urls) Logger.log("2") # If we have no links, abort now if not m_links: Logger.log_verbose("No output links found.") return # Do not follow URLs that contain certain keywords m_forbidden = WordListLoader.get_wordlist( Config.plugin_config["wordlist_no_spider"]) m_urls_allowed = { url for url in m_links if url and not any(x in url for x in m_forbidden) } Logger.log("3") # Get only output links m_output_links = [] for url in m_urls_allowed: try: if url not in Config.audit_scope: m_output_links.append(url) except Exception, e: Logger.log_error_more_verbose(format_exc())
class PredictablesDisclosureBruteforcer(TestingPlugin): #-------------------------------------------------------------------------- def get_accepted_types(self): return [FolderURL] #-------------------------------------------------------------------------- def run(self, info): m_url = info.url Logger.log_more_verbose("Start to process URL: %r" % m_url) # # Get the remote web server fingerprint # m_webserver_finger = info.get_associated_informations_by_category(WebServerFingerprint.information_type) m_wordlist = set() # Common wordlists try: w = Config.plugin_extra_config["common"] m_wordlist.update([l_w for l_w in w.itervalues()]) except KeyError: Logger.log_error("Can't load common wordlists") # There is fingerprinting information? if m_webserver_finger: m_webserver_finger = m_webserver_finger.pop() m_server_canonical_name = m_webserver_finger.canonical_name m_servers_related = m_webserver_finger.related # Set with related web servers # # Load wordlists # m_wordlist_update = m_wordlist.update # Wordlist of server name try: w = Config.plugin_extra_config["%s_predictables" % m_server_canonical_name] m_wordlist_update([l_w for l_w in w.itervalues()]) except KeyError: Logger.log_error("Can't load predictables wordlists for server: '%s'." % m_server_canonical_name) # Wordlist of related with the server found try: for l_servers_related in m_servers_related: w = Config.plugin_extra_config["%s_predictables" % l_servers_related] m_wordlist_update([l_w for l_w in w.itervalues()]) except KeyError, e: Logger.log_error("Can't load wordlists predictables wordlists for related webserver: '%s'" % e) # Load content of wordlists m_urls = set() m_urls_update = m_urls.add for l_w in m_wordlist: # Use a copy of wordlist to avoid modify the original source l_loaded_wordlist = WordListLoader.get_advanced_wordlist_as_list(l_w) for l_wo in l_loaded_wordlist: try: l_wo = l_wo[1:] if l_wo.startswith("/") else l_wo tmp_u = urljoin(m_url, l_wo) except ValueError, e: Logger.log_error("Failed to parse key, from wordlist, '%s'" % tmp_u) continue m_urls_update(tmp_u)
def recv_info(self, info): m_domain = info.root # Skips localhost if m_domain == "localhost": return m_return = None # Checks if the hostname has been already processed if not self.state.check(m_domain): # # Looking for # m_subdomains = WordListLoader.get_advanced_wordlist_as_list( "subs_small.txt") # Run in parallel self.base_domain = m_domain self.completed = Counter(0) self.total = len(m_subdomains) r = pmap(self.get_subdomains_bruteforcer, m_subdomains, pool_size=10) # # Remove repeated # # The results m_domains = set() m_domains_add = m_domains.add m_domains_already = [] m_domains_already_append = m_domains_already.append m_ips = set() m_ips_add = m_ips.add m_ips_already = [] m_ips_already_append = m_ips_already.append if r: for doms in r: for dom in doms: # Domains if dom.type == "CNAME": if not dom.target in m_domains_already: m_domains_already_append(dom.target) if dom.target in Config.audit_scope: m_domains_add(dom) else: discard_data(dom) # IPs if dom.type == "A": if dom.address not in m_ips_already: m_ips_already_append(dom.address) m_ips_add(dom) # Unify m_domains.update(m_ips) m_return = m_domains # Add the information to the host map(info.add_information, m_return) # Set the domain as processed self.state.set(m_domain, True) Logger.log_verbose("DNS analyzer plugin found %d subdomains" % len(m_return)) # Write the info as more user friendly if Logger.MORE_VERBOSE: m_tmp = [] m_tmp_append = m_tmp.append for x in m_return: if getattr(x, "address", False): m_tmp_append("%s (%s)" % (getattr(x, "address"), str(x))) elif getattr(x, "target", False): m_tmp_append("%s (%s)" % (getattr(x, "target"), str(x))) else: m_tmp_append(str(x)) Logger.log_more_verbose("Subdomains found: \n\t+ %s" % "\n\t+ ".join(m_tmp)) return m_return
def run(self, info): # Get the root domain only. root = info.root # Skip localhost. if root == "localhost": return # Skip root domains we've already processed. if self.state.put(root, True): return # Load the subdomains wordlist. try: wordlist = WordListLoader.get_advanced_wordlist_as_list(Config.plugin_args["wordlist"]) except WordlistNotFound: Logger.log_error_verbose("Wordlist '%s' not found.." % Config.plugin_args["wordlist"]) return except TypeError: Logger.log_error_verbose("Wordlist '%s' is not a file." % Config.plugin_args["wordlist"]) return # Load the subdomains whitelist. try: whitelist = WordListLoader.get_advanced_wordlist_as_list(Config.plugin_config["wordlist"]) except WordlistNotFound: Logger.log_error_verbose("Wordlist '%s' not found.." % Config.plugin_config["wordlist"]) return except TypeError: Logger.log_error_verbose("Wordlist '%s' is not a file." % Config.plugin_config["wordlist"]) return # # Set a base line for dinamyc sub-domains # m_virtual_domains = [] for v in (generate_random_string(40) for x in xrange(3)): l_subdomain = ".".join((v, root)) records = DNS.get_a(l_subdomain, also_CNAME=True) for rec in records: if rec.type == "CNAME": m_virtual_domains.append(rec.target) # If 3 subdomains are the same, set the base domain m_base_domain = None if len(set(m_virtual_domains)) == 1: m_base_domain = m_virtual_domains[0] # Configure the progress notifier. self.progress.set_total(len(wordlist)) self.progress.min_delta = 1 # notify every 1% # For each subdomain in the wordlist... found = 0 results = [] visited = set() for prefix in wordlist: # Mark as completed before actually trying. # We can't put this at the end of the loop where it belongs, # because the "continue" statements would skip over this too. self.progress.add_completed() # Build the domain name. name = ".".join((prefix, root)) # Skip if out of scope. if name not in Config.audit_scope: continue # Resolve the subdomain. records = DNS.get_a(name, also_CNAME=True) records.extend( DNS.get_aaaa(name, also_CNAME=True) ) # If no DNS records were found, skip. if not records: continue # If CNAME is the base domain, skip chk = [True for x in records if x.type == "CNAME" and x.target == m_base_domain] if len(chk) > 0 and all(chk): continue # We found a subdomain! found += 1 Logger.log_more_verbose( "Subdomain found: %s" % name) # Create the Domain object for the subdomain. domain = Domain(name) results.append(domain) # # Check for Domain disclosure # if prefix not in whitelist: d = DomainDisclosure(domain, risk = 0, level = "low", title = "Possible subdomain leak", description = "A subdomain was discovered which may be an unwanted information disclosure." ) results.append(d) # For each DNs record, grab the address or name. # Skip duplicated records. for rec in records: if rec.type == "CNAME": location = rec.target elif rec.type in ("A", "AAAA"): location = rec.address else: # should not happen... results.append(rec) domain.add_information(rec) continue if location not in visited: visited.add(location) results.append(rec) domain.add_information(rec) # Log the results. if found: Logger.log( "Found %d subdomains for root domain: %s" % (found, root)) else: Logger.log_verbose( "No subdomains found for root domain: %s" % root) # Return the results. return results
def recv_info(self, info): # Get the root domain only. root = info.root # Skip localhost. if root == "localhost": return # Skip root domains we've already processed. if self.state.put(root, True): return # Load the subdomains wordlist. try: wordlist = WordListLoader.get_advanced_wordlist_as_list( Config.plugin_args["wordlist"]) except WordlistNotFound: Logger.log_error_verbose("Wordlist '%s' not found.." % Config.plugin_args["wordlist"]) return except TypeError: Logger.log_error_verbose("Wordlist '%s' is not a file." % Config.plugin_args["wordlist"]) return # Configure the progress notifier. self.progress.set_total(len(wordlist)) self.progress.min_delta = 1 # notify every 1% # For each subdomain in the wordlist... found = 0 results = [] visited = set() for prefix in wordlist: # Mark as completed before actually trying. # We can't put this at the end of the loop where it belongs, # because the "continue" statements would skip over this too. self.progress.add_completed() # Build the domain name. name = ".".join((prefix, root)) # Skip if out of scope. if name not in Config.audit_scope: continue # Resolve the subdomain. records = DNS.get_a(name, also_CNAME=True) records.extend(DNS.get_aaaa(name, also_CNAME=True)) # If no DNS records were found, skip. if not records: continue # We found a subdomain! found += 1 Logger.log_more_verbose("Subdomain found: %s" % name) # Create the Domain object for the subdomain. domain = Domain(name) results.append(domain) # For each DNs record, grab the address or name. # Skip duplicated records. for rec in records: if rec.type == "CNAME": location = rec.target elif rec.type in ("A", "AAAA"): location = rec.address else: # should not happen... results.append(rec) domain.add_information(rec) continue if location not in visited: visited.add(location) results.append(rec) domain.add_information(rec) # Log the results. if found: Logger.log("Found %d subdomains for root domain: %s" % (found, root)) else: Logger.log_verbose("No subdomains found for root domain: %s" % root) # Return the results. return results