def verify(item, dns_config, server_ip): try: res = resolver.Resolver() res.lifetime = 2.0 res.nameservers = [server_ip] ip = str(res.query(item[0])[0].address) hostname = str( res.query(reversename.from_address(item[1]), 'PTR')[0].target)[:-1] if dns_config.get('hostnames').get(hostname) == ip: return True else: return False except dns.exception.Timeout as e: logging.warning( "DNS operation timed out, {} server was unable to query {}: " "{}".format(server_ip, item[1], e)) return False except Exception as e: logging.exception( "Ran into error trying to verify DNS addresses and hostnames: " "{}".format(e)) return False
def find_srv_records(domain, path=None): records = [] res = resolver.Resolver() res.nameservers.insert(0, "8.8.8.8") res.nameservers.insert(0, "1.1.1.1") res.search = [] # read the data in from the data directory if path is None: file_path = pkg_resources.resource_filename("yawast", "resources/srv.txt") else: file_path = path with open(file_path) as file: for line in file: host = line.strip() + "." + domain + "." try: answers = res.query(host, "SRV", lifetime=3, raise_on_no_answer=False) for data in answers: target = data.target.to_text() port = str(data.port) records.append([host, target, port]) except (resolver.NoAnswer, resolver.NXDOMAIN, exception.Timeout) as error: output.debug(f"SRV: {host} received error: {str(error)}") except (resolver.NoNameservers, resolver.NotAbsolute, resolver.NoRootSOA): output.debug_exception() pass return records
def dns(self, msg): from dns import resolver res = resolver.Resolver() res.nameservers = ['8.8.8.8', '8.8.4.4'] try: myAnswers = resolver.query(str(msg), 'A') message = ''.encode('utf-8') if myAnswers: for rdata in myAnswers: message += str(str(rdata) + '\n').encode('utf-8') self.sender.sendMessage(message.decode('utf-8')) else: self.sender.sendMessage('No results returned...') except: log('Bot.dns', 'Unspecified error', True) self.sender.sendMessage('Query failed') pass
def openresolver_udp_test(school_name, test_ip, report_file): # print(test_ip,end='') socket.setdefaulttimeout(0.1) udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udpresult = udpsock.connect_ex((str(test_ip), 53)) if udpresult == 0: res = resolver.Resolver() res.nameservers = [str(test_ip)] res.lifetime = timeout res.timeout = timeout try: answers = res.query('google.com', tcp=False)[0] scan_ip_report = str(school_name + '的UDP DNS Server:' + str(test_ip) + '-[Accept Query google.com:' + str(answers) + ']--對外開放遞迴查詢!!\n') print(scan_ip_report) with open(report_file, "a") as fp: fp.write(scan_ip_report) except: scan_ip_report = str(school_name + '的UDP DNS Server:' + str(test_ip) + '-[Refused Query google.com]--未開放\n') print(scan_ip_report) with open(report_file, "a") as fp: fp.write(scan_ip_report) udpsock.close()
def find_service_with_consul( consul_resolver_port: int, consul_resolver_nameservers: List[str], consul_service_name: str = "search-service.service.consul" ) -> Tuple[str, int]: """ :param consul_resolver_port: :param consul_resolver_nameservers: :param consul_service_name: :return: """ consul_resolver = dns_resolver.Resolver() consul_resolver.port = consul_resolver_port consul_resolver.nameservers = consul_resolver_nameservers # TODO: Need to refactor the extraction of service name from consul service name try: service_name = consul_service_name.split('.')[0] except IndexError: service_name = consul_service_name try: dnsanswer = consul_resolver.query(consul_service_name, 'A', lifetime=5) except dns_exception.Timeout: raise ConnectionError( f"[dns.exception.Timeout] Can't reach consul resolver at port={consul_resolver_port} !" ) except dns_resolver.NoNameservers: raise ConnectionError( f"Can't find consul service={consul_service_name} => " f"`{service_name}` server not started/synced/checked !") service_ip = str(dnsanswer[0]) dnsanswer_srv = consul_resolver.query("search-service.service.consul", 'SRV') service_port = int(str(dnsanswer_srv[0]).split()[2]) return service_ip, service_port
def do_dns_check(self): """ perform DNS checks against SkyDNS service """ print "\nPerforming DNS queries against SkyDNS...\n" dns_resolver = resolver.Resolver(configure=False) dns_resolver.nameservers.append(self.dns_host) # Set dns_check to 1 (good) by default dns_check = 1 for service in self.openshift_services: name_to_resolve = service['name'] + '.' + service[ 'namespace'] + '.svc.cluster.local' try: dns_answer = dns_resolver.query(name_to_resolve, 'A') except dns_exception.DNSException as e: print "Failed DNS lookup of %s. Error: %s" % (name_to_resolve, e) print "\nTroubleshoot command: dig @%s %s A\n" % ( self.dns_host, name_to_resolve) dns_check = 0 break if self.args.verbose: print "\nQueryring for A record of %s on server %s" % ( name_to_resolve, self.dns_host) print "DNS Answer: %s" % dns_answer.rrset[0].address print "Openshift Answer: %s" % service['ip'] if dns_answer.rrset[0].address != service['ip']: dns_check = 0 print "================================================\n" self.metric_sender.add_metric( {'openshift.master.skydns.query': dns_check})
def getSOA(ns): localRes = resolver.Resolver() localRes.timeout = 5 localRes.lifetime = 5 answer = '' soa = dict() # try to get a SOA, if it fails return ERROR try: answer = localRes.resolve(ns, 'SOA') except Exception as e: print(e) if 'does not contain an answer' in str(e): tempDict = e.kwargs response = tempDict['response'] tempSOA = [] authZone = '' for k in response.authority: authZone = str(k.name) for singleItem in k.items: tempV = str(singleItem.mname) tempSOA.append(tempV) soa[authZone] = tempSOA return soa else: logging.info(f"BROKEN SOA for {ns}") return soa if answer != '': response = answer.response # print(type(response)) rcode = response.rcode() if rcode == 0: return 0 elif rcode == 3: return -1 elif rcode == 2: return "ERROR"
def find_dns_cdn(self, domain, depth=0): """Recursively check a CNAME chain""" from dns import resolver, reversename dns_resolver = resolver.Resolver() dns_resolver.timeout = 1 provider = self.check_cdn_name(domain) logging.debug("Looking up %s", domain) # First do a CNAME check if provider is None: try: answers = dns_resolver.query(domain, 'CNAME') if answers and len(answers): for rdata in answers: name = '.'.join(rdata.target).strip(' .') logging.debug("%s -> %s", domain, name) if name != domain: provider = self.check_cdn_name(name) if provider is None and depth < 10: provider = self.find_dns_cdn(name, depth + 1) if provider is not None: break except Exception: pass # Try a reverse-lookup of the address if provider is None: try: addresses = dns_resolver.query(domain) if addresses: addr = str(addresses[0]) addr_name = reversename.from_address(addr) if addr_name: name = str(dns_resolver.query(addr_name, "PTR")[0]) if name: logging.debug("%s -> %s -> %s", domain, addr, name) provider = self.check_cdn_name(name) except Exception: pass return provider
def main(): # Set DNS servers for the script. Trying to get the best results. resolv = resolver.Resolver() resolv.nameservers = ['138.247.117.11', '8.8.8.8', '8.8.4.4', '1.1.1.1'] # Inistialize Class ns = MalwareStrings() # Run strings. Replace strings.txt with new file or break out into command line argument ns.read_strings('strings.txt') # Regex parse into IPs and Domains ns.parse_strings() # Reverse DNS on IPs ns.reverse_dns(resolv) # DNS lookup on Domains ns.lookup_domains(resolv) # Output everything discovered ns.output_all()
def checkdomainresolved(domain): # TODO: add v6 resolvers. ground_resolver = ["8.8.8.8", "8.8.4.4", "140.82.36.158", "47.88.213.154"] resultlist = [] for item in ground_resolver: tmp = resolver.Resolver() tmp.nameservers = [item] tmp.lifetime = 5.0 # resultlist=[] try: ans = tmp.query(domain) temp = [a.address for a in ans.rrset.items] resultlist.append(temp) except Exception as e: print(domain, e) resultlist.append([]) resultset = set(resultlist[0]) | set(resultlist[1]) | set( resultlist[2]) | set(resultlist[3]) resultlist = list(resultset) if (len(resultlist) == 0): return False, resultlist else: return True, resultlist
def test_clear_txt(self): duckdns_client = DuckDNSClient(TEST_DUCKDNS_TOKEN) txt_record_text = "simple text" duckdns_client.set_txt_record(TEST_DOMAIN, txt_record_text) # wait sometime for propagation of the txt update print("wait for the txt record update to propagate...") time.sleep(DEFAULT_PROPAGATION_SECONDS) duckdns_client.clear_txt_record(TEST_DOMAIN) # wait sometime for propagation of the txt update print("wait for the txt record update to propagate...") time.sleep(DEFAULT_PROPAGATION_SECONDS) # get the cleared txt record from the specified nameserver custom_resolver = resolver.Resolver() custom_resolver.nameservers = NAMESERVER txt_value = custom_resolver.resolve( TEST_DOMAIN, "TXT").response.answer[0][0].strings[0].decode("utf-8") self.assertEqual("", txt_value)
def resolve_domain(self, query): with open("host_file.json") as file_obj: records = json.loads(file_obj.read()) top_domain = subdomain = domain = "" modified = False query = query[:-1] if not bool(validators.url(query)): query_modified = "http://" + query modified = True else: query_modified = query try: domain_obj = tld.get_tld(query_modified, as_object=True) domain = domain_obj.domain top_domain = domain_obj.tld subdomain = domain_obj.subdomain except (tld.exceptions.TldBadUrl, tld.exceptions.TldDomainNotFound): return "Not Found", query try: return records[top_domain][domain][subdomain], query except KeyError: if not modified: if query[:7] == "http://": query_modified = query[7:] else: query_modified = query[8:] else: query_modified = query try: resolve = resolver.Resolver() resolve.nameservers = [EXTERNAL_DNS_IP] ips = resolve.query(query_modified) return ips, query except resolver.NXDOMAIN: return "Not Found", query
def read_user_request(path): my_resolver = dr.Resolver() zones = open(path, 'r') lines = zones.readlines( ) #go trough all the zones and saving them in a list zones.close() numofzones = len(lines) zones = open(path, 'w') for line in lines: #making a request to the dns above to get the ip of the "request"d rname, rtype, ip = line.split(maxsplit=2) my_resolver.nameservers = ['8.8.8.8'] #defing the dns to be a random ip from the list of ips answer = 'no result' try: answer = my_resolver.query( rname ) #making a request to the dns above to get the ip of the "request" answer = str(answer.response.answer[0]).split(' ')[4] except: zones.write(line) zones.close() return 0
def resolv(hostname): """Select and query DNS servers. Args: hostname: string, hostname Returns: ips: list, list of IPs """ ips = list() # Create resolver object res = resolver.Resolver() # Choose the correct DNS servers # Blue DNS servers if hostname.startswith('b-'): res.nameservers = ['172.16.2.10', '172.16.2.11'] # Green DNS servers elif hostname.startswith('g-'): res.nameservers = ['10.0.2.10', '10.0.2.11'] # Default to white DNS servers else: res.nameservers = ['194.47.252.134', '194.47.252.135'] # Query try: query = res.query(hostname) for answer in query: ips.append(answer.address) except resolver.NXDOMAIN: raise CouldNotResolv # Return query result return ips
# -*- coding: utf-8 -*- from __future__ import print_function import grpc from dns import resolver from dns.exception import DNSException from proto import hello_pb2, hello_pb2_grpc # 连接consul服务,作为dns服务器 consul_resolver = resolver.Resolver() consul_resolver.port = 8600 consul_resolver.nameservers = ["127.0.0.1"] def get_ip_port(): '''查询出可用的一个ip,和端口''' try: dnsanswer = consul_resolver.query("search-service.service.consul", "A") dnsanswer_srv = consul_resolver.query("search-service.service.consul", "SRV") except DNSException: return None, None return dnsanswer[0].address, dnsanswer_srv[0].port _HOST, _PORT = get_ip_port() print(_HOST, _PORT) def run():
""" Requirement - dnspython toolkit git clone git://github.com/rthalley/dnspython.git cd dnspython python setup.py install --user """ ACL = [] from dns import resolver res = resolver.Resolver() res.nameservers = ['8.8.8.8'] #Can return multiple values per URL #Some URLS dont resolve, doesnt handle this yet #Strips newline and spaces with open('URLs.txt') as f: for url in f: IP = [] url = url.strip() answers = res.query(url) for rdata in answers: IP.append(rdata.address) for elem in IP: ACL.append(elem) #sort and uniq ACL = sorted(set(ACL)) #Print our ACL
def get(): # Create a new instance of Resolver r = resolver.Resolver() # Set the server address (Google public DNS) r.nameservers = ['8.8.8.8', '8.8.4.4'] # Execute the query to get the resolver answer. The response will be # of type 'TXT' r_answer = r.query('_cloud-netblocks.googleusercontent.com', 'TXT') # For each TXT record (which is only one)... for txt_data in r_answer: # Convert it to text to get the full netblocks info all_netblocks_info = txt_data.to_text() # Replace all the extra info to get just the cloud-netblocks entries all_netblocks_info = (all_netblocks_info.replace( '"v=spf1 ', '').replace('include:', '').replace(' ?all"', '')) # Put them into a list netblocks_list = all_netblocks_info.split() ip_ranges = [] # For each new cloud-netblock... for netblock in netblocks_list: # Query again to find the netblock's IP ranges r_answer = r.query(netblock, 'TXT') # Same proccess as before, this time getting IPs for txt_data in r_answer: txt_data_str = txt_data.to_text() # Remove the 'ip4' part from each entry, to better identify # IPs later txt_data_str = txt_data_str.replace('ip4:', '').replace('ip6', '') values_list = txt_data_str.split() # IP validation: check if any of the values on each TXT # entry (which contains all the SPF configs) is a valid IP for value in values_list: if is_valid_ip(value): ip_ranges.append(value) # SORTING IP ADDRESSES #(http://www.secnetix.de/olli/Python/tricks.hawk#sortips) for i in range(len(ip_ranges)): ip_ranges[i] = "%3s.%3s.%3s.%3s" % tuple(ip_ranges[i].split(".")) ip_ranges.sort() for i in range(len(ip_ranges)): ip_ranges[i] = ip_ranges[i].replace(" ", "") # Format the response html_response = '<h1>Google Cloud outgoing IP ranges</h1>' html_response += ( '<h2>Resolved as suggested in the ' + '<a href="https://cloud.google.com/appengine/kb/#static-ip" target="_blank">docs</a></h2>' ) html_response += '<br>'.join(ip_ranges) html_response += '<h3>List of the above IPs separated by comma (if useful):</h3>' html_response += ', '.join(ip_ranges) html_response += ( '<br><h3>Source code ' + '<a href="https://github.com/cjlallana/google-dns-lookup">here</a>!</h3>' ) return html_response
class DNS_Handler: """Handles Lookups and DNS queries""" resolver = resolver.Resolver() @classmethod def query_dns(cls, domains, records): """ Query DNS records for host. :param domains: Iterable of domains to get DNS Records for :param records: Iterable of DNS records to get from domain. """ results = {k: set() for k in records} for record in records: for domain in domains: try: answers = cls.resolver.query(domain, record) for answer in answers: # Add value to record type results.get(record).add(answer) except (resolver.NoAnswer, resolver.NXDOMAIN, resolver.NoNameservers): # Type of record doesn't fit domain or no answer from NameServer continue return {k: v for k, v in results.items() if v} @classmethod async def grab_whois(cls, host): if not host.naked: return script = "whois {}".format(host.naked).split() log_file = HelpUtilities.get_output_path("{}/whois.txt".format( host.target)) logger = Logger(log_file) process = await create_subprocess_exec(*script, stdout=PIPE, stderr=PIPE) result, err = await process.communicate( ) #err has not been used, Please make sure to implement the variable. if process.returncode == 0: logger.info("{} {} WHOIS information has been retrieved".format( COLORED_COMBOS.GOOD, host)) for line in result.decode().strip().split("\n"): if ":" in line: logger.debug(line) @classmethod async def generate_dns_dumpster_mapping(cls, host, sout_logger): sout_logger.info("{} DNS Dumpster is fetching data for {} ".format( COLORED_COMBOS.INFO, host)) try: page = HelpUtilities.query_dns_dumpster(host=host) if page.status_code == 200: path = HelpUtilities.get_output_path( "{}/dns_mapping.png".format(host.target)) with open(path, "wb") as target_image: target_image.write(page.content) sout_logger.info( "{} DNS Mapping sucessfully fetched for {}".format( COLORED_COMBOS.GOOD, host.target)) else: raise TheDiggerException except TheDiggerException: sout_logger.info( "{} DNS Mapping has Failed. There is a connection error.". format(COLORED_COMBOS.BAD))
def dns_lookup( query, type_A=True, type_AAAA=False, type_CNAME=False, type_NS=False, type_MX=False, type_TXT=False, type_ALL=False, ): all_record_type = [ "A", "AAAA", "AFSDB", "APL", "CAA", "CDNSKEY", "CERT", "CNAME", "DHCID", "DLV", "DNAME", "DS", "HIP", "IPSECKEY", "KEY", "KX", "LOC", "MX", "NAPTR", "NS", "NSEC", "NSEC3", "NSEC3PARAM", "OPENPGPKEY", "PTR", "RRSIG", "RP", "SIG", "SMIMEA", "SOA", "SRV", "SSHFP", "TA", "TKEY", "TLSA", "TSIG", "TXT", "URI", ] record_types_to_query = [] if type_A: record_types_to_query.append("A") if type_AAAA: record_types_to_query.append("AAAA") if type_CNAME: record_types_to_query.append("CNAME") if type_NS: record_types_to_query.append("NS") if type_MX: record_types_to_query.append("MX") if type_TXT: record_types_to_query.append("TXT") if type_ALL: record_types_to_query = all_record_type resolver_instance = resolver.Resolver() res = {"data": []} for record_type in record_types_to_query: try: if record_type == "CNAME": result = resolver_instance.query(query, "CNAME") [ res["data"].append({ "query": str(query), "record_type": "CNAME", "result": str(e) }) for e in result ] while len(result) >= 1: for data in result: result = resolver_instance.query(str(data), "CNAME") [ res["data"].append({ "query": str(data), "record_type": "CNAME", "result": str(e), }) for e in result ] else: result = resolver_instance.query(query, record_type) for data in result: res["data"].append({ "query": query, "record_type": record_type, "result": str(data), }) except: pass return res
class DNSHandler: """Handles DNS queries and lookups""" resolver = resolver.Resolver() @classmethod def query_dns(cls, domains, records): """ Query DNS records for host. :param domains: Iterable of domains to get DNS Records for :param records: Iterable of DNS records to get from domain. """ results = {k: set() for k in records} for record in records: for domain in domains: try: answers = cls.resolver.query(domain, record) for answer in answers: # Add value to record type results.get(record).add(answer) except (resolver.NoAnswer, resolver.NXDOMAIN, resolver.NoNameservers): # Type of record doesn't fit domain or no answer from ns continue return {k: v for k, v in results.items() if v} @classmethod async def grab_whois(cls, host): if not host.naked: return script = "whois {}".format(host.naked).split() log_file = HelpUtilities.get_output_path("{}/whois.txt".format(host.target)) logger = Logger(log_file) process = await create_subprocess_exec( *script, stdout=PIPE, stderr=PIPE ) result, err = await process.communicate() if process.returncode == 0: logger.info("{} {} WHOIS information retrieved".format(COLORED_COMBOS.GOOD, host)) for line in result.decode().strip().split("\n"): if ":" in line: logger.debug(line) @classmethod def generate_dns_dumpster_mapping(cls, host, sout_logger): # Start DNS Dumpster session for the token request_handler = RequestHandler() dnsdumpster_session = request_handler.get_new_session() url = "https://dnsdumpster.com" if host.naked: target = host.naked else: target = host.target payload = { "targetip": target, "csrfmiddlewaretoken": None } sout_logger.info("{} Trying to generate DNS Mapping for {} from DNS dumpster".format( COLORED_COMBOS.INFO, host)) try: dnsdumpster_session.get(url, timeout=10) jar = dnsdumpster_session.cookies for c in jar: if not c.__dict__.get("name") == "csrftoken": continue payload["csrfmiddlewaretoken"] = c.__dict__.get("value") break dnsdumpster_session.post(url, data=payload, headers={"Referer": "https://dnsdumpster.com/"}) time.sleep(3) page = dnsdumpster_session.get("https://dnsdumpster.com/static/map/{}.png".format(target)) if page.status_code == 200: path = HelpUtilities.get_output_path("{}/dns_mapping.png".format(host.target)) with open(path, "wb") as target_image: target_image.write(page.content) sout_logger.info("{} Successfully fetched DNS mapping for {}".format( COLORED_COMBOS.GOOD, host.target) ) except ConnectionError: sout_logger.info("{} Failed to generate DNS mapping. A connection error occurred.".format( COLORED_COMBOS.BAD))
import ipaddress from dns import resolver from libs.logger import logger from libs import utils import settings resv = resolver.Resolver() resv.timeout = settings.DNS_QUERY_TIMEOUT resv.lifetime = settings.DNS_QUERY_TIMEOUT def query_a(domains, queried_domains=None, returned_ips=None): """ Return a list of IP addresses/networks defined in A record of mail domain names. @domains - a list/tuple/set of mail domain names @queried_domains - a set of mail domain names which already queried spf @returned_ips - a set of IP addr/networks of queried mail domain names """ ips = set() queried_domains = queried_domains or set() returned_ips = returned_ips or set() domains = [d for d in domains if d not in queried_domains] for domain in domains: try: qr = resv.query(domain, 'A')
def check_dns_resolution(host_name, dns_servers): """Check forward and reverse resolution of host_name using dns_servers """ # Point the resolver at specified DNS server server_ips = [] for dns_server in dns_servers: try: server_ips = list(a[4][0] for a in socket.getaddrinfo(dns_server, None)) except socket.error: pass else: break if not server_ips: logger.error('Could not resolve any DNS server hostname: %s', dns_servers) return False resolver = dnsresolver.Resolver() resolver.nameservers = server_ips logger.debug('Search DNS server %s (%s) for %s', dns_server, server_ips, host_name) # Get IP addresses of host_name addresses = set() for rtype in 'A', 'AAAA': try: result = resolver.query(host_name, rtype) except dnsexception.DNSException: rrset = [] else: rrset = result.rrset if rrset: addresses.update(r.address for r in result.rrset) if not addresses: logger.error( 'Could not resolve hostname %s using DNS. ' 'Clients may not function properly. ' 'Please check your DNS setup. ' '(Note that this check queries IPA DNS directly and ' 'ignores /etc/hosts.)', host_name) return False no_errors = True # Check each of the IP addresses checked = set() for address in addresses: if address in checked: continue checked.add(address) try: logger.debug('Check reverse address %s (%s)', address, host_name) revname = dnsreversename.from_address(address) rrset = resolver.query(revname, 'PTR').rrset except Exception as e: logger.debug('Check failed: %s %s', type(e).__name__, e) logger.error( 'Reverse DNS resolution of address %s (%s) failed. ' 'Clients may not function properly. ' 'Please check your DNS setup. ' '(Note that this check queries IPA DNS directly and ' 'ignores /etc/hosts.)', address, host_name) no_errors = False else: host_name_obj = dnsname.from_text(host_name) if rrset: names = [r.target.to_text() for r in rrset] else: names = [] logger.debug('Address %s resolves to: %s. ', address, ', '.join(names)) if not rrset or not any(r.target == host_name_obj for r in rrset): logger.error( 'The IP address %s of host %s resolves to: %s. ' 'Clients may not function properly. ' 'Please check your DNS setup. ' '(Note that this check queries IPA DNS directly and ' 'ignores /etc/hosts.)', address, host_name, ', '.join(names)) no_errors = False return no_errors
import time import urllib import ssl # If you want pwhois to handle non standard characters in result # you need to implement this fix on net.py in pythonwhois # https://github.com/joepie91/python-whois/pull/59 # TODO Static type checking mypy http://mypy-lang.org/examples.html # TODO Logging instead of debug # TODO Class # TODO JSON output flag overriding printout to console # TODO get_statuscodes is named wrong # SETTINGS RES = resolver.Resolver() RES.nameservers = ["1.1.1.1"] DEBUG = False EVENT_IP = threading.Event() # Information about the domain is asyncly gatherd here INFO = {} SUGGESTIONS = {"error": [], "warning": [], "notice": []} def main(): """Main.""" _domain_ = get_argument(1, None) parse_search(_domain_) get_information() analyze()
#!/bin/python3 # executing this script will generate a lot of errors, ignore them # errors in threads only close the thread that got the error from dns import resolver from threading import Thread from ipaddress import ip_address from urllib.request import urlretrieve as download res = resolver.Resolver(configure=False) res.nameservers = [ '1.1.1.1', '1.0.0.1', '2606:4700:4700::1111', '2606:4700:4700::1001', #Cloudflare '8.8.8.8', '8.8.4.4', '2001:4860:4860::8888', '2001:4860:4860::8844', #Google Public DNS '208.67.222.222', '208.67.220.220', '2620:0:ccc::2', '2620:0:ccd::2', #OpenDNS '209.244.0.3', '209.244.0.4', #Level 3 '64.6.64.6', '64.6.65.6', '2620:74:1b::1:1', '2620:74:1c::2:2', #Verisign '9.9.9.9',
from dns import resolver from pprint import pprint import time host = 'cmcloudlab1931.info' resolver = resolver.Resolver() mylist = [] for i in range(0, 10): result = resolver.resolve(host) answer = result.response.answer[0] name = answer.name for item in answer.items: mylist.append(str(item)) time.sleep(0) total = len(mylist) unique_list = list((set(mylist))) my_dict = dict() for item in unique_list: percentage = (mylist.count(item) / total) * 100 my_dict[item] = f'{percentage}%' pprint(my_dict) exit() for i in range(0, 20): a = resolver.resolve(host) # pprint(dir(a.response)) # print(a.nameserver) # print(a.response.question) print(a.response.answer)
from dns import reversename, resolver import ipaddress import sys def name_finder(ip_inner): try: rev_name = reversename.from_address(str(ip_inner)) reversed_dns = str(resolver.query(rev_name, 'PTR')[0]) print(ip_inner, " ", reversed_dns) except: print(ip_inner, ' dead') if __name__ == '__main__': resolver.default_resolver = resolver.Resolver(configure=False) if len(sys.argv) != 3: print('Usage: <nameserver ip> <ip-address/network_mask>') resolver.default_resolver.nameservers = [sys.argv[1]] resolver.timeout = 1 resolver.lifetime = 1 for ip in ipaddress.IPv4Network(sys.argv[2]): name_finder(ip)
# variable for meraki domain domain = 'meraki.com' # variable to keep track of the shard number count = 1 # variable used to keep track of shards that have no records. # after 5 consecutive increments we stop script with the assumption # that there are no more active shards. no_value = 0 # check for presence of flag and provide CNAME and A record for shard if args.m: meraki_shard = args.m dns_resolver = resolver.Resolver() try: response = dns_resolver.resolve(meraki_shard) CNAME = response.canonical_name.to_text() A_record = response.rrset.to_text().split()[-1] print(meraki_shard, CNAME, A_record) except: print(f"{meraki_shard} DNS name does not exist") # run script to get all the CNAME and A records for all the active shards. else: # while loop that keeps running.
def main(): consul_resolver = resolver.Resolver() consul_resolver.port = 8600 consul_resolver.nameservers = ["{{ ansible_default_ipv4['address'] }}"] masters_live_array = get_masters_from_consul(consul_resolver) print(masters_live_array)
def process_dmarc_xml(xml_file, output='json', resolve=0, resolve_timeout=2): # open the provided xml file and read the content into a string. try: with open(xml_file, 'rb') as content_file: script_logger.debug('Reading the XML file content.') xml = content_file.read() except EnvironmentError: script_logger.exception( 'Cannot open file="{0}" traceback='.format(dmarc_rua_xml)) exit(0) try: # try to read the xml file root = ET.fromstring(xml) except Exception as exception: # some files are not correctly constructed and that will give a exception # if the exception is a parseerror that remove the problem line and try again. error = str(type(exception).__name__) script_logger.exception('Problem with the xml tree: {0}'.format(error)) if error == 'ParseError': line_number = False if 'line' in str(exception) and 'column' in str(exception): line_regex = re.search(r'line\s+(\d+)', str(exception)) line_number = int(line_regex.group(1)) del xml source_file = open(xml_file, 'r') s_lines = source_file.readlines() source_file.close() if line_number: script_logger.debug('Problem line: {0}'.format( s_lines[line_number - 1])) del s_lines[line_number - 1] with open(xml_file, 'w+') as target_file: for line in s_lines: target_file.write(line) with open(xml_file, 'rb') as content_file: script_logger.debug('Reading the XML file content again.') xml = content_file.read() else: try: problem_dir = os.path.normpath(app_log_dir + os.sep + 'problems') problem_file = os.path.basename(xml_file) new_problem_file = os.path.normpath(problem_dir + os.sep + problem_file) os.rename(xml_file, new_problem_file) script_logger.warning( 'The file is moved to the problem directory please review the file to fix the problem' ) except Exception: script_logger.exception( 'Could not move file to the problem directory, please remove the file manually' ) exit(0) # find the root element of the xml, this should be the feedback element try: root = ET.fromstring(xml) # loop trough the xml and find al the possible items that the xml can have. And store everything # in a multidimensional dict. if an item is not found a None value will be set, this will later on be removed # this dict will later on either be converted into a json or in key=value pairs. # find the 'feedback' item of the xml, and all the items directly below that, that can only occur once for feedback in root.iter('feedback'): report_defaultdata['feedback']['version'] = feedback.findtext( 'version', None) report_defaultdata['feedback']['file_name'] = str( os.path.basename(os.path.normpath(xml_file))) # find the report_metadata info report_defaultdata['feedback']['report_metadata'][ 'org_name'] = feedback.findtext('report_metadata/org_name', None) report_defaultdata['feedback']['report_metadata'][ 'email'] = feedback.findtext('report_metadata/email', None) report_defaultdata['feedback']['report_metadata'][ 'extra_contact_info'] = feedback.findtext( 'report_metadata/extra_contact_info', None) report_defaultdata['feedback']['report_metadata'][ 'report_id'] = feedback.findtext('report_metadata/report_id', None) # find the date_range info report_defaultdata['feedback']['report_metadata']['date_range'][ 'begin'] = feedback.findtext( 'report_metadata/date_range/begin', None) report_defaultdata['feedback']['report_metadata']['date_range'][ 'end'] = feedback.findtext('report_metadata/date_range/end', None) # find the policy_published info report_defaultdata['feedback']['policy_published'][ 'domain'] = feedback.findtext('policy_published/domain', None) report_defaultdata['feedback']['policy_published'][ 'adkim'] = feedback.findtext('policy_published/adkim', None) report_defaultdata['feedback']['policy_published'][ 'aspf'] = feedback.findtext('policy_published/aspf', None) report_defaultdata['feedback']['policy_published'][ 'p'] = feedback.findtext('policy_published/p', None) report_defaultdata['feedback']['policy_published'][ 'sp'] = feedback.findtext('policy_published/sp', None) report_defaultdata['feedback']['policy_published'][ 'pct'] = feedback.findtext('policy_published/pct', None) # find the record info, this tag can occure multiple times, so loop through all of them for record in feedback.iter('record'): report_recorddata = copy.deepcopy(report_defaultdata) # find the identifiers per record. for identifiers in record.findall('identifiers'): report_recorddata['feedback']['record']['identifiers'][ 'header_from'] = identifiers.findtext( 'header_from', None) report_recorddata['feedback']['record']['identifiers'][ 'envelope_from'] = identifiers.findtext( 'envelope_from', None) report_recorddata['feedback']['record']['identifiers'][ 'envelope_to'] = identifiers.findtext( 'envelope_to', None) for dkim in record.findall('./auth_results/dkim'): report_recorddata['feedback']['record']['auth_results'][ 'dkim']['domain'] = dkim.findtext('domain', None) report_recorddata['feedback']['record']['auth_results'][ 'dkim']['selector'] = dkim.findtext('selector', None) report_recorddata['feedback']['record']['auth_results'][ 'dkim']['result'] = dkim.findtext('result', None) report_recorddata['feedback']['record']['auth_results'][ 'dkim']['human_result'] = dkim.findtext( 'human_result', None) for spf in record.findall('./auth_results/spf'): report_recorddata['feedback']['record']['auth_results'][ 'spf']['domain'] = spf.findtext('domain', None) report_recorddata['feedback']['record']['auth_results'][ 'spf']['scope'] = spf.findtext('scope', None) report_recorddata['feedback']['record']['auth_results'][ 'spf']['result'] = spf.findtext('result', None) # a record can have multiple rows, loop through all of them. for row in record.iter('row'): source_ip = row.findtext('source_ip', None) if resolve == 1: from dns import resolver, reversename errors = '' timeout = float(resolve_timeout) resolver = resolver.Resolver() resolver.timeout = timeout resolver.lifetime = timeout try: addr = reversename.from_address(source_ip) answer = resolver.resolve(addr, 'PTR') except Exception as exception: # catch the exeption and give that back (NXDOMAIN/NoAnswer/....) errors = str(type(exception).__name__) script_logger.debug( 'There was a problem with the dns query for {0}' .format(source_ip)) if errors.lower() == 'timeout': try: addr = reversename.from_address(source_ip) answer = resolver.resolve(addr, 'PTR') except Exception as exception: # catch the exeption and give that back (NXDOMAIN/NoAnswer/....) errors = str(type(exception).__name__) if not errors: for rr in answer: hostname = rr else: hostname = errors else: hostname = '-' report_recorddata['feedback']['record']['row'][ 'source_ip'] = str(source_ip) report_recorddata['feedback']['record']['row'][ 'source_hostname'] = str(hostname).lower() report_recorddata['feedback']['record']['row'][ 'count'] = row.findtext('count', None) report_recorddata['feedback']['record']['row'][ 'policy_evaluated']['disposition'] = row.findtext( 'policy_evaluated/disposition', None) report_recorddata['feedback']['record']['row'][ 'policy_evaluated']['dkim'] = row.findtext( 'policy_evaluated/dkim', None) report_recorddata['feedback']['record']['row'][ 'policy_evaluated']['spf'] = row.findtext( 'policy_evaluated/spf', None) report_recorddata['feedback']['record']['row'][ 'policy_evaluated']['reason']['type'] = row.findtext( 'policy_evaluated/reason/type', None) # remove the empty values from the dict report_recorddata = del_none(report_recorddata) if output == 'json': # create a json from the dict jsondata = json.dumps(report_recorddata) result_logger.info(jsondata) elif output == 'kv': # create a 1 dimensional dict from with the keys and values from the multidimensional dict. kvdata = get_kv_dict(report_recorddata) kv = '' for k, v in kvdata.items(): kv = kv + k + '="' + v + '", ' kv = kv.strip(' ,') result_logger.info(kv) except Exception: script_logger.exception( 'A exception occured with file="{0}", traceback='.format(xml_file)) try: problem_dir = os.path.normpath(app_log_dir + os.sep + 'problems') problem_file = os.path.basename(xml_file) new_problem_file = os.path.normpath(problem_dir + os.sep + problem_file) os.rename(xml_file, new_problem_file) script_logger.warning( 'The file is moved to the problem directory please review the file to fix the problem' ) except Exception: script_logger.exception( 'Could not move file to the problem directory, please remove the file manually' ) exit(0)
def get_dns(): return resolver.Resolver().nameservers