def get_NS(self, qname): # first, get all NS record q = message.make_query(qname, dnslib.QTYPE.A) IPlist = [] count = 0 while True and count < 3: try: msg = query.udp(q, DNSlist[0], timeout=1) except exception.Timeout: count += 1 continue break if count >= 3: logging.warning("Getting NS(A) %s failed, too many retries", qname) return ([], dnslib.RCODE.NXDOMAIN) answer = None for anss in msg.answer: #print "Type", rdatatype.to_text(anss.to_rdataset().rdtype) if anss.to_rdataset().rdtype == dnslib.QTYPE.A: #match record type # logging.debug("reply %s", anss) answer = anss if answer is None: logging.warning("Getting NS(A) %s failed, no NS(A)", qname) return ([], dnslib.RCODE.NXDOMAIN) for ans in answer: IPlist.append(ans.to_text()) return IPlist
def drilldown(base, server, limit, depth=0): global queries, l, progress, progressperc q = message.make_query(base, 'PTR') r = tryquery(q, server) queries = queries + 1 # if we get no-error for this node, continue if r.rcode() == 0: # if we have reached the current depth limit, add base to the result set if len(base) == limit: l.append(base) # else if we have not yet reached the limit, continue on if len(base) < limit: # for all possible nibble values for c in '0123456789abcdef': # try to dig down recursively try: drilldown(c+'.'+base, server, limit, depth+1) except SystemExit: sys.exit(0) except KeyboardInterrupt: print >> sys.stderr, '\naborted, partial results follow' pass except TimeoutError: print >> sys.stderr, 'timeout at: %s after %s queries' % (base, str(queries)) exit() pass except: print >> sys.stderr, 'error at: %s' % base pass
def query_data(name, server, query_type): serv_addr = resolver.resolve(server, "A")[0].address name_message = message.make_query(name, query_type) return name_message, serv_addr
def _actually_resolve(self, name, timeout): timeout = timeout / float(len(self.nameservers)) try: for server in self.nameservers: # Try each nameserver in succession. self.addr = server query = make_query(name, A) send(query.to_wire()) start = time.time() remaining = timeout while True: # Handle the possibility of responses that are not to our # original request - they are ignored and we wait for a # response that matches our query. item, data = first(datagram=True, sleep=remaining) if item == "datagram": response = from_wire(data) if query.is_response(response): if response.answer: a_records = [r for r in response.answer if r.rdtype == A] return [item.address for item in a_records[0].items] raise NotFound else: # Not a response to our query - continue waiting for # one that is. remaining = remaining - (time.time() - start) elif item == "sleep": break else: raise Timeout(name) finally: self.addr = self.primary
def fetch(dns_index_req): dns_index = dns_index_req[0] domain = dns_index_req[1] query_type = dns_index_req[2] q = message.make_query(domain, query_type) rcode = q.rcode() count = 0 while True and count < 3: try: msg = query.udp(q, DNSlist[dns_index], timeout=1) except exception.Timeout: count += 1 continue break if count >= 3: return ([], rcode) ips = [] #print msg.answer answer = None for anss in msg.answer: #print "Type", rdatatype.to_text(anss.to_rdataset().rdtype) if anss.to_rdataset().rdtype == query_type: #match record type answer = anss if answer is None: return (ips, rcode) for ans in answer: ips.append(ans.to_text()) return (ips, rcode)
def dns_response(self, domain, nameserver, retries=0): record_types = (rdatatype.NS, rdatatype.A, rdatatype.AAAA) records = [] rcodes = {} for rtype in record_types: try: request = dnsmessage.make_query(domain, rtype) sourceIP = self.get_source_ip() response_data = dnsquery.udp(q=request, where=nameserver, timeout=float( constants.REQUEST_TIMEOUT), source=sourceIP) rcodes[rtype] = response_data.rcode() records += response_data.answer + response_data.additional + response_data.authority except: if retries < int(constants.REQUEST_TRIES): return self.dns_response(domain, nameserver, retries + 1) else: rcodes['timeout'] = True return {"records": "", "rcodes": rcodes} return { "records": "\n".join([record.to_text() for record in records]), "rcodes": rcodes }
def resolve(self, qnames, where, port=53, rdtype=rdatatype.A): print "AAAAAAAAAAAAAAAAAAAAAAAAAA" self.itime = time.time() # init time: timestamp we started resolving self.stime = 0 # send time: smalles timestamp for this # timeslice we send a query self.btime = 0 # send time: smalles timestamp for this burst self.qtime = 0 # query time: largest timestamp we send a query self.qcount = 0 # queries (== names) asked self.rcount = 0 # reply count self.scount = 0 # send count self.scount_slice = 0 # sent count this timeslice self.tcount = 0 # timeout count self.bcount = 0 # burst count this timeslice for qname in qnames: # check if we can send more queries this timeslice if self.scount_slice >= self.max_queries_per_timeslice: self._wait_timeslice() #self._info() # check if we need to re-send any query self._resend_queries() # if there are to many outstanding queries, wait for one if len(self.outstanding_queries) >= self.max_outstanding: self._wait_for_outstanding_query() # make a new query q = message.make_query(qname, rdtype, rdataclass.IN) # and send it self._send_query(q, where, port) # query count self.qcount += 1 # check if a reply is waiting self._recv_reply() self._wait_for_outstanding_queries()
def query( self, name, src_ip ): m = message.make_query( name, 1, 1 ) try: query.udp( m, '224.0.0.251', port=5353, timeout=20, source_port=5353, source=src_ip ) except dns.exception.Timeout: return False return True
def __init__(self, server): asyncore.dispatcher.__init__(self) self.r = resolver.Resolver() self.create_socket(socket.AF_INET, socket.SOCK_DGRAM) self.connect((self.r.nameservers[0], self.r.port)) self.query = message.make_query('_xmpp-server._tcp.%s' % server, rdatatype.SRV, rdataclass.IN).to_wire()
def test_base(base): global queries q = message.make_query(base, 'PTR') r = tryquery(q, server) queries = queries + 1 if r.rcode() == 0: return 1 else: return 0
def dnssec_check(domain, nameservers=False): """ Dig and use name servers from response to perform a DNSSEC validation Arguments: domain (str): domain to be validated nameservers (dict, optional): {name: ip} for authoritative nameservers Returns: ((bool), (response or None)) Raises: AttributeError: 'NoneType' object has no attribute 'answer' (res) IndexError: list index out of range (res.answer) """ if not nameservers: try: info = probe(domain) nameservers = info[domain.strip('www.') + '.']['NS'] except Exception as e: raise e req, res, answered = None, None, False for k, v in nameservers.items(): # get dns key for zone req = message.make_query(domain, rdatatype.DNSKEY, want_dnssec=True) res = query.udp(req, v) # if response code is 0 if not res.rcode(): if res.answer: # answer will have two RRSETs, DNSKEY and RRSIG if len(res.answer) == 2: answered = True break if answered: # create the dns.name object name = dns_name.from_text(domain) try: dnssec.validate(res.answer[0], res.answer[1], {name: res.answer[0]}) except dnssec.ValidationFailure: # be wary and do cautious something return False, res except Exception as e: raise e else: # all ok, valid self signed dnssec key for domain return True, res else: return False, None
def get_tld(domain): for ns in ROOT_SERVERS: try: res = query.udp(message.make_query(domain, rdatatype.NS), ns) return get_next_ns(res) except query.BadResponse: if ns == ROOT_SERVERS[-1]: print("Something has gone horribly wrong, all root DNS servers are down.") exit(1) continue print("Cannot get TLD of %s" % domain) return None
def names_to_nameservers(names): zone_nss = {} for name in names: domain, zone = name.split('.', 1) if zone not in zone_nss: zone_nss[zone] = get_zone_nameservers(zone + '.') msg = make_query(name + '.', NS) ns = choice(zone_nss[zone]) response = dns.query.udp(msg, ns) yield (name, tuple( sorted(rr.target.to_text() for rr in response.authority[0] if rr.rdtype == NS)))
def run(self): dns_index_req = self.dns_index_req dns_index = dns_index_req[0] domain = dns_index_req[1] query_type = dns_index_req[2] queue = dns_index_req[3] request = dns_index_req[4] reply_callback = dns_index_req[5] flag = dns_index_req[6] q = message.make_query(domain, query_type) q.id = request.header.id rcode = q.rcode() count = 0 start = time.time() * 1000 while True and count < 3: try: msg = query.udp(q, DNSlist[dns_index], timeout=1, flag=flag, r_callback=reply_callback) except exception.Timeout: count += 1 continue break if count >= 3: logging.warning("Worker thread %d too many retries", dns_index) queue.put(([], rcode)) return rcode ips = [] answer = None logging.debug("Worker thread %d gets reply %s", dns_index, msg.answer) for anss in msg.answer: #print "Type", rdatatype.to_text(anss.to_rdataset().rdtype) if anss.to_rdataset().rdtype == query_type: #match record type # logging.debug("reply %s", anss) answer = anss if answer is None: logging.warning("Worker thread %d empty response for %s",\ dns_index, domain) queue.put(([], rcode)) return 1 for ans in answer: ips.append(ans.to_text()) end = time.time() * 1000 logging.debug("Worker thread %d got answer, delay: %dms", dns_index, end - start) queue.put((ips, rcode)) #time.sleep(0) return 0
def tryquery(target): global queries # pick a random dns server dns_server = random.choice(dns_server_list) q = message.make_query(target, 'PTR') while True: try: queries = queries + 1 response = query.udp(q, dns_server, timeout=2) return response except exception.Timeout: # change dns server if the current query timeout dns_server = random.choice(dns_server_list) except Exception: dns_server = random.choice(dns_server_list)
def resp_popular_1(self, dgram, data, rtime): """ Callback function for a response to a clean cache query """ csv,probe = data # Keep track of the minimum resolution time self._min_time = min(self._min_time, rtime) # See if we actually got an answer self.rcodes[dgram.rcode()] += 1 # If got an answer, keep track of time if dgram.rcode() == dc.NOERROR: self.clean_cache.append(rtime) # Send follow up query query = dm.make_query(dgram.question[0].name, dgram.question[0].rdtype, dgram.question[0].rdclass) probe.send(query, (self.address, 53), self.resp_popular_2, csv) self._int_queries += 1 self._write_resp(dgram, rtime, 'clean', csv)
def resolve_name(name): _log.debug("DNS resolve for %s", name) if libnet.is_ipaddr(name): name = reversename.from_address(name) msg = message.make_query(name, "ANY") resp = query.tcp(msg, _config["nameserver"]) dnslines = [] for rr in resp.answer: rrtype = rdatatype.to_text(rr.rdtype) if rrtype in _config["dns_rr_types"]: if rrtype != "TXT": dnslines += ["{} {} {}".format(name, rrtype, str(r) if rrtype != "TXT" else str(r).tolower()) for r in rr] dnslines.sort() result = "\n".join(dnslines) return result
def get_ns(domain): """ Test to get the full zone file :param domain: :return: """ info = [] hosts = [] try: mess = message.make_query(domain, rdatatype.NS) response = query.udp(mess, "8.8.8.8") info.append("{0}".format(response)) except: info.append("Error in ns lookup") submit = (info, hosts) return submit
def resp_popular_1(self, dgram, data, rtime): """ Callback function for a response to a clean cache query """ csv, probe = data # Keep track of the minimum resolution time self._min_time = min(self._min_time, rtime) # See if we actually got an answer self.rcodes[dgram.rcode()] += 1 # If got an answer, keep track of time if dgram.rcode() == dc.NOERROR: self.clean_cache.append(rtime) # Send follow up query query = dm.make_query(dgram.question[0].name, dgram.question[0].rdtype, dgram.question[0].rdclass) probe.send(query, (self.address, 53), self.resp_popular_2, csv) self._int_queries += 1 self._write_resp(dgram, rtime, 'clean', csv)
def run(self): dns_index_req = self.dns_index_req dns_index = dns_index_req[0] domain = dns_index_req[1] query_type = dns_index_req[2] queue = dns_index_req[3] request = dns_index_req[4] reply_callback = dns_index_req[5] flag = dns_index_req[6] q = message.make_query(domain, query_type) q.id = request.header.id rcode = q.rcode() count = 0 start = time.time()*1000 while True and count < 3: try: msg = query.udp(q, DNSlist[dns_index], timeout=1, flag=flag, r_callback=reply_callback) except exception.Timeout: count += 1 continue break if count >= 3: logging.warning("Worker thread %d too many retries", dns_index) queue.put(([], rcode)) return rcode ips = [] answer = None logging.debug("Worker thread %d gets reply %s", dns_index, msg.answer) for anss in msg.answer: #print "Type", rdatatype.to_text(anss.to_rdataset().rdtype) if anss.to_rdataset().rdtype == query_type: #match record type # logging.debug("reply %s", anss) answer = anss if answer is None: logging.warning("Worker thread %d empty response for %s",\ dns_index, domain) queue.put(([], rcode)) return 1 for ans in answer: ips.append(ans.to_text()) end = time.time()*1000 logging.debug("Worker thread %d got answer, delay: %dms", dns_index, end-start) queue.put((ips, rcode)) #time.sleep(0) return 0
def drilldown(base, server, limit, depth=0): global queries, l, progress, progressperc, cont, results, autogen q = message.make_query(base, 'PTR') r = tryquery(q, server) queries = queries + 1 if r.rcode() == 0: if len(base) == limit: d = parse_to_dict(r) d['ARPA'] = base store(d) results = results + 1 if len(base) < limit: for c in '0123456789abcdef': if depth < len(progress): progress[depth] = int(c, 16) drilldown(c + '.' + base, server, limit, depth + 1)
def create_answer(name, rrset): rdtype = rrset.rdtype rdclass = rrset.rdclass if not isinstance(name, Name): name = from_text(name) query = make_query(name, rdtype, rdclass) response = make_response(query) response.answer = [rrset] response.index = {(ANSWER, name, rdclass, rdtype, 0, None): rrset} response.find_rrset(response.answer, name, rdclass=rdclass, rdtype=rdtype) rdtype = RdataType.make(rdtype) rdclass = RdataClass.make(rdclass) answer = Answer(name, rdtype, rdclass, response) return answer
async def query_worker(self, host, q_type): try: if q_type == 'A': mq_type = 1 elif q_type == 'AAAA': mq_type = 28 query = message.make_query(host.decode('utf-8'), mq_type) query = query.to_wire() s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) await self.loop.sock_connect(s, (self.config['dns'], 53)) await self.loop.sock_sendall(s, query) result = await asyncio.wait_for(self.loop.sock_recv(s, 1024), 4) await self.clean_up(s, None) result = message.from_wire(result) return self.decode(str(result), q_type) except Exception as error: traceback.clear_frames(error.__traceback__) error.__traceback__ = None await self.clean_up(s, None)
def run(self): query_info = self.query_info NS = query_info[0] domain = query_info[1] query_type = query_info[2] queue = query_info[3] q = message.make_query(domain, query_type) rcode = q.rcode() count = 0 start = time.time()*1000 while True and count < 3: try: msg = query.udp(q, NS, timeout=1) except exception.Timeout: count += 1 continue break if count >= 3: logging.warning("Worker thread for %s, too many retries", NS) queue.put(([], rcode)) return rcode ips = [] answer = None logging.debug("Worker thread for %s gets reply %s", NS, msg.answer) for anss in msg.answer: #print "Type", rdatatype.to_text(anss.to_rdataset().rdtype) if anss.to_rdataset().rdtype == query_type: #match record type # logging.debug("reply %s", anss) answer = anss if answer is None: logging.warning("Worker thread for %s empty response for %s",\ NS, domain) queue.put(([], rcode)) return 1 for ans in answer: ips.append(ans.to_text()) end = time.time()*1000 logging.debug("Worker thread for %s got answer, delay: %dms", NS, end-start) queue.put((ips, rcode)) #time.sleep(0) return 0
def recursive_query(domain, ns): res = query.udp(message.make_query(domain, rdatatype.A), ns) answers = [] if res.answer: answers.append(res.answer[0]) cname = get_cname(res,domain) if cname: answers += recursive_query(cname, get_tld(cname)) return answers else: return answers elif not res.additional: authority = get_authority(res, domain) if authority: ns = recursive_query(authority, get_tld(authority)) if ns: return recursive_query(domain, ns[0][0].to_text()) answers.append(res.answer[0]) return answers else: return recursive_query(domain, get_next_ns(res))
def drilldown(base, server, limit, depth=0): global queries, l, progress, progressperc if depth == len(progress): progressperc = 0 for i in range(len(progress)): progressperc = progressperc + (progress[i] / (16.0**(i+1)))*100 print >> sys.stderr, '\r%*s, %s queries done, %s found, %.2f%% done' % (limit, base, queries, len(l), progressperc), q = message.make_query(base, 'PTR') r = tryquery(q, server) queries = queries + 1 # print '%s: %s' % (base, r.rcode()) if r.rcode() == 0: if len(base) == limit: l.append(base) if len(base) < limit: for c in '0123456789abcdef': if depth < len(progress): progress[depth]=int(c, 16) #print "progress[%s]=%s" % (depth, progress[depth]) drilldown(c+'.'+base, server, limit, depth+1)
def udp_timing(qname, where, rdtype=rdatatype.A, rdclass=rdataclass.IN, timeout=5.0, port=53, af=None, source=None, source_port=0, ignore_unexpected=False, one_rr_per_rrset=False): rc = 'FAIL' q = message.make_query(qname, rdtype, rdclass) wire = q.to_wire() (af, destination, source) = query._destination_and_source(af, where, port, source, source_port) s = socket.socket(af, socket.SOCK_DGRAM, 0) try: expiration = query._compute_expiration(timeout) s.setblocking(0) if source is not None: s.bind(source) query._wait_for_writable(s, expiration) t1 = time() s.sendto(wire, destination) while 1: query._wait_for_readable(s, expiration) (wire, from_address) = s.recvfrom(65535) t2 = time() if from_address == destination or \ (dns.inet.is_multicast(where) and \ from_address[1] == destination[1]): rc = 'OK' if not ignore_unexpected: rc = 'UnexpectedSource' break finally: s.close() r = message.from_wire(wire, keyring=q.keyring, request_mac=q.mac, one_rr_per_rrset=one_rr_per_rrset) if not q.is_response(r): rc = 'BadResponse' return (r, q, t1, t2), rc
def drilldown(base, server, limit, depth=0): global queries, l, progress, progressperc if depth == len(progress): progressperc = 0 for i in range(len(progress)): progressperc = progressperc + (progress[i] / (16.0**(i + 1))) * 100 print >> sys.stderr, '\r%*s, %s queries done, %s found, %.2f%% done' % ( limit, base, queries, len(l), progressperc), q = message.make_query(base, 'PTR') r = tryquery(q, server) queries = queries + 1 # print '%s: %s' % (base, r.rcode()) if r.rcode() == 0: if len(base) == limit: l.append(base) if len(base) < limit: for c in '0123456789abcdef': if depth < len(progress): progress[depth] = int(c, 16) #print "progress[%s]=%s" % (depth, progress[depth]) drilldown(c + '.' + base, server, limit, depth + 1)
def doh_query(domain, dnsname, detected): message = dns.make_query(domain, 1) dns_req = base64.urlsafe_b64encode(message.to_wire()).\ decode("UTF8").rstrip("=") try: resp = requests.get( dnssec[dnsname]['url'], params={"dns": dns_req}, headers={"Content-type": "application/dns-message"}) except requests.RequestException as reqerror: detected[dnsname] = dict(report_DoH='Error with DNSSec Service', domain=domain) return detected if "application/dns-message" not in resp.headers["Content-Type"]: detected[dnsname] = dict(report_DoH='DNS is not response', domain=domain, A='') return detected answers = [ response.to_text().split("\n") for response in dns.from_wire(resp.content).answer ] ips = [answer.split()[4] for answer in answers[0]] if not ips: detected[dnsname] = dict(report_DoH='Domain does not exist', domain=domain, A='') elif dnssec[dnsname]['malware_ip'] in ips: detected[dnsname] = dict(report_DoH='Malware', domain=domain, A='') else: detected[dnsname] = dict(report_DoH='Good', domain=domain, A=ips) return detected
def _actually_resolve(self, name, timeout): timeout = timeout / float(len(self.nameservers)) try: for server in self.nameservers: # Try each nameserver in succession. self.addr = server query = make_query(name, A) send(query.to_wire()) start = time.time() remaining = timeout while True: # Handle the possibility of responses that are not to our # original request - they are ignored and we wait for a # response that matches our query. item, data = first(datagram=True, sleep=remaining) if item == 'datagram': response = from_wire(data) if query.is_response(response): if response.answer: a_records = [ r for r in response.answer if r.rdtype == A ] return [ item.address for item in a_records[0].items ] raise NotFound else: # Not a response to our query - continue waiting for # one that is. remaining = remaining - (time.time() - start) elif item == 'sleep': break else: raise Timeout(name) finally: self.addr = self.primary
async def _get_dns_response(qname, record_type): query_bytes = message.make_query(qname, record_type).to_wire() response_bytes = await dns_c2.handler.generate_dns_tunneling_response_bytes( query_bytes) return message.from_wire(response_bytes)
def dnsck_query( dns_server: str, dns_query: str, record_type: str, iterations: int, tcp: bool = False, nosleep: bool = False, ) -> int: """Perform a DNS query for a set number of iterations. Args: dns_server (str): IP address of server. dns_query (str): Query to lookup. record_type (str): Record type. iterations (int): Number of iterations. tcp (bool): Use TCP for query. nosleep (bool): Disable sleep. Returns: int: Number of errors. """ result_code_dict: DefaultDict[str, int] = defaultdict(int) query_times = [] # type: List[float] record_number = 0 # type: int response_errors = 0 # type: int iteration_count = 0 # type: int try: make_dns_query = message.make_query(dns_query, record_type.upper(), use_edns=True) except rdatatype.UnknownRdatatype: print("Unknown record type, try again.") sys.exit(1) print( f"Performing {iterations} queries to server {dns_server} for domain {dns_query}", f"with record type {record_type.upper()}.\n", ) try: for iteration in range(iterations): print(f"[Query {iteration + 1} of {iterations}]") try: if tcp: dns_response = query.tcp(make_dns_query, dns_server, timeout=10) else: dns_response = query.udp(make_dns_query, dns_server, timeout=10) if dns_response.answer: for answer in dns_response.answer: print(answer) record_number = len(answer) else: print("No records returned.") elapsed_time = dns_response.time * 1000 # type: float if elapsed_time < 500: result_code = rcode.to_text( dns_response.rcode()) # type: str result_code_dict[result_code] += 1 iteration_count += 1 else: result_code = "Degraded" result_code_dict[result_code] += 1 iteration_count += 1 response_errors += 1 except exception.Timeout: print("Query timeout.") result_code = "Timeout" result_code_dict[result_code] += 1 elapsed_time = 10000 iteration_count += 1 response_errors += 1 if not nosleep: time.sleep(1) query_times.append(elapsed_time) print(f"Records returned: {record_number}") print(f"Response time: {elapsed_time:.2f} ms") print(f"Response status: {result_code}\n") except KeyboardInterrupt: print("Program terminating...") print("Response status breakdown:") for query_rcode, count in result_code_dict.items(): print(f"{count} {query_rcode}") print( f"\nSummary: Performed {iteration_count} queries to server {dns_server}", f"for domain {dns_query} with record type {record_type.upper()}.", f"\nResponse errors: {response_errors / iteration_count * 100:.2f}%", ) print( f"Average response time: {sum(query_times) / len(query_times):.2f} ms\n" ) return response_errors
def attack(domain_name): server = '47.104.185.138' port = 80 dns_query = message.make_query(domain_name, 'A') response = query.udp(dns_query, server, port)
def _get_dns_response(qname, record_type): query_bytes = message.make_query(qname, record_type).to_wire() response_bytes = loop.run_until_complete( dns_c2.handler.generate_dns_tunneling_response_bytes(query_bytes)) return message.from_wire(response_bytes)
def padencode(domain): import dns.message as message msg = message.make_query(domain, 'A') return binascii.hexlify(msg.to_text().encode())
def main(bind, source, output, csv, progress): """ Run the tests. There are three stages: 1) sanity check: confirm that the resolvers will actually answer queries and that the answers look legitimate. 2) query for each name once to test response time with cache in 'real' state. Repeat query a second time to test response time with prewarmed cache. 3) Compute statistics once all results fetched. """ # Output detailed results to a csv file if csv: print >>csv, ','.join(('Provider', 'IP', 'Hostname', 'Type', 'Test', 'Duration', \ 'RCode', 'Num_Answers', 'TTL', 'Responses')) probe = Probe(bind) probe.run() # Test each of the sanity hostnames for i, sanity in enumerate(source.sanity): if progress: output.write('\rrunning sanity checks... ( {0} / {1} )'.format( i + 1, len(source.sanity))) output.flush() for service in source.recursives: for recursive in service.recursives: query = dm.make_query(sanity.hostname, dt.A) probe.send(query, (recursive.address, 53), recursive.resp_sanity_1, (csv, sanity.addresses)) recursive.new_query() time.sleep(0.01) time.sleep(0.1) time.sleep(1) # Send queries for each of the popular hostnames # A response to any of these queries will invoke a follow-up query for i, popular in enumerate(source.popular): if progress: output.write('\rrunning performance tests... ( {0} / {1} )'.format( i + 1, len(source.popular))) output.flush() for service in source.recursives: for recursive in service.recursives: query = dm.make_query(popular, dt.A) probe.send(query, (recursive.address, 53), recursive.resp_popular_1, (csv, probe)) recursive.new_query() time.sleep(0.01) time.sleep(0.1) time.sleep(2) probe.close() if progress: output.write('\r') output.flush() # Ignore resolvers that we never received responses from rdns = [] for service in source.recursives: for recursive in service.recursives: if recursive.rcodes[dc.NOERROR] > 0: rdns.append(recursive) else: print >> sys.stderr, 'Received no answers from %s, dropping from consideration.' % ( recursive.address) if len(rdns) == 0: print >> output, 'No RDNS to consider!' return # Formating output lines = [] line = [ 'IP_Address', 'Provider', 'Mean', 'Median', 'SD', 'MD', 'Tail', 'Prewarm', 'Minimum' ] lines.append((line, '')) lengths = [len(col) for col in line] typical = sorted(rdns, key=lambda r: r.clean_stats.median) tail = sorted(rdns, key=lambda r: r.clean_stats.md_tail_mean)[0] shortest = sorted(rdns, key=lambda r: r.min_time)[0] prewarm = sorted(rdns, key=lambda r: r.prewarm_stats.median)[0] for r in typical: postfix = [] if r == typical[0]: postfix.append('best typical performance') if r == tail: postfix.append('best tail performance') if r == shortest: postfix.append('fastest network path') if r == prewarm: postfix.append('best prewarmed performance') if len(postfix) > 0: postfix = '<-- ' + ', '.join(postfix) else: postfix = '' line = [r.address, r.service.name] line.extend(map(lambda v: format(v, '.3f'), [r.clean_stats.mean, r.clean_stats.median, r.clean_stats.sd, \ r.clean_stats.md, r.clean_stats.md_tail_median, r.prewarm_stats.median, r.min_time])) lines.append((line, postfix)) lengths = [max(lengths[i], len(line[i])) for i in range(len(lengths))] for line, postfix in lines: print >> output, ' '.join([ '{:>{width}}'.format(v, width=w) for v, w in zip(line, lengths) ]).rstrip(), postfix
authority = get_authority(res, domain) if authority: ns = recursive_query(authority, get_tld(authority)) if ns: return recursive_query(domain, ns[0][0].to_text()) answers.append(res.answer[0]) return answers else: return recursive_query(domain, get_next_ns(res)) if __name__ == "__main__": if len(sys.argv) < 2: print(USAGE) exit(1) domain = sys.argv[1] question = message.make_query(domain, rdatatype.A).question[0] when = datetime.datetime.now().ctime() start = time.time() answers = recursive_query(domain, get_tld(domain)) end = time.time() query_time = (end-start)*1000 print("\nQUESTION SECTION:") print(question) print("\nANSWER SECTION:") for a in answers: print(a) print("\nQuery time: %.2fms" % query_time) print("WHEN: %s\n" % when)
def collective_query(zone, rrtype, name_servers, answerproc=None): if name_servers is None: return None retval = [] # For now: Serial query for ns in name_servers: # Within a NS record, alternative address may be available. # We are going to assume that they are equivalent, so any one # of the addresses may provide an answer. res = resolver.Resolver(configure=False) res.use_edns(0, 0, 4096) nsas = [] try: for nsa in local_resolver.query(name.from_text(ns), rdtype=rdatatype.AAAA): nsas.append(str(nsa)) except resolver.NXDOMAIN: pass try: for nsa in local_resolver.query(name.from_text(ns), rdtype=rdatatype.A): nsas.append(str(nsa)) except resolver.NXDOMAIN: pass if len(nsas) > 0: request = message.make_query( name.from_text(zone), rrtype, rdataclass.IN, use_edns=True, # endsflags=0, payload=4096, want_dnssec=True) backoff = 0.10 response = None done = False start = time.time() timeout = local_resolver._compute_timeout(start) while (response is None) and (not done): for nsa in nsas: try: response = query.udp(request, nsa, timeout) errcode = response.rcode() if errcode == rcode.NOERROR: done = True break if errcode == rcode.YXDOMAIN: raise YXDOMAIN if errcode == rcode.NXDOMAIN: break except: response = None continue try: timeout = local_resolver._compute_timeout(start) except exception.Timeout: done = True break sleep_time = min(timeout, backoff) time.sleep(sleep_time) backoff = backoff * 2 if response is None: retval.append(None) else: print 'Answer is:', response.answer if answerproc is None: answerproc = lambda x: x retval.append(answerproc(response.answer)) if len(retval) == 0: return None return retval
def main(bind, source, output, csv, progress): """ Run the tests. There are three stages: 1) sanity check: confirm that the resolvers will actually answer queries and that the answers look legitimate. 2) query for each name once to test response time with cache in 'real' state. Repeat query a second time to test response time with prewarmed cache. 3) Compute statistics once all results fetched. """ # Output detailed results to a csv file if csv: print >>csv, ','.join(('Provider', 'IP', 'Hostname', 'Type', 'Test', 'Duration', \ 'RCode', 'Num_Answers', 'TTL', 'Responses')) probe = Probe(bind) probe.run() # Test each of the sanity hostnames for i,sanity in enumerate(source.sanity): if progress: output.write('\rrunning sanity checks... ( {0} / {1} )'.format(i+1, len(source.sanity))) output.flush() for service in source.recursives: for recursive in service.recursives: query = dm.make_query(sanity.hostname, dt.A) probe.send(query, (recursive.address, 53), recursive.resp_sanity_1, (csv, sanity.addresses)) recursive.new_query() time.sleep(0.01) time.sleep(0.1) time.sleep(1) # Send queries for each of the popular hostnames # A response to any of these queries will invoke a follow-up query for i,popular in enumerate(source.popular): if progress: output.write('\rrunning performance tests... ( {0} / {1} )'.format(i+1, len(source.popular))) output.flush() for service in source.recursives: for recursive in service.recursives: query = dm.make_query(popular, dt.A) probe.send(query, (recursive.address, 53), recursive.resp_popular_1, (csv, probe)) recursive.new_query() time.sleep(0.01) time.sleep(0.1) time.sleep(2) probe.close() if progress: output.write('\r') output.flush() # Ignore resolvers that we never received responses from rdns = [] for service in source.recursives: for recursive in service.recursives: if recursive.rcodes[dc.NOERROR] > 0: rdns.append(recursive) else: print >>sys.stderr, 'Received no answers from %s, dropping from consideration.' % (recursive.address) if len(rdns) == 0: print >>output, 'No RDNS to consider!' return # Formating output lines = [] line = ['IP_Address', 'Provider', 'Mean', 'Median', 'SD', 'MD', 'Tail', 'Prewarm', 'Minimum'] lines.append((line, '')) lengths = [len(col) for col in line] typical = sorted(rdns, key = lambda r: r.clean_stats.median) tail = sorted(rdns, key = lambda r: r.clean_stats.md_tail_mean)[0] shortest = sorted(rdns, key = lambda r: r.min_time)[0] prewarm = sorted(rdns, key = lambda r: r.prewarm_stats.median)[0] for r in typical: postfix = [] if r == typical[0]: postfix.append('best typical performance') if r == tail: postfix.append('best tail performance') if r == shortest: postfix.append('fastest network path') if r == prewarm: postfix.append('best prewarmed performance') if len(postfix) > 0: postfix = '<-- ' + ', '.join(postfix) else: postfix = '' line = [r.address, r.service.name] line.extend(map(lambda v: format(v, '.3f'), [r.clean_stats.mean, r.clean_stats.median, r.clean_stats.sd, \ r.clean_stats.md, r.clean_stats.md_tail_median, r.prewarm_stats.median, r.min_time])) lines.append((line,postfix)) lengths = [max(lengths[i], len(line[i])) for i in range(len(lengths))] for line,postfix in lines: print >>output, ' '.join(['{:>{width}}'.format(v, width = w) for v,w in zip(line,lengths)]).rstrip(), postfix
def makeQuery(self, server): self.query = message.make_query('_xmpp-server._tcp.%s' % server, rdatatype.SRV, rdataclass.IN).to_wire()
def probe(domain): """ Recursive query similar to dig+trace from a root server. Arguments: domain (str): full domain name Returns: { 'domain': domain, 'root_ns': ns, For each domain part (str: domain part name): { 'SOA': {} or { 'mname': (str: record mname), 'rname': (str: record rname), 'serial': (str: record serial, 'refresh': (int: record refresh), 'retry': (int: record retry), 'expire': (int: record expire), 'default_ttl': (int: record minimum) }, 'A': {} or { For each name server in dns response (str: name server name): (str: name server ip) }, 'NS': {} or { For each name server in dns response (str: name server name): (str: name server ip) }, 'timeout': (bool), 'ns_queried': '(name server ip after loop)', 'TXT': ['records', 'in', 'txt'] } } Raises: ValueError: (domain, 'not a valid domain name') """ results = {'domain': domain} if domain_re(domain): parts = parse(domain) ns = choice(root_servers) results['root_ns'] = ns for part in parts[1:]: results[part] = {} results[part] = {'SOA': {}, 'A': {}, 'NS': {}, 'timeout': False} name = dns_name.from_text(part) req = message.make_query(name, rdatatype.NS) req_txt = message.make_query(name, rdatatype.TXT) try: res = query.udp(req, ns, timeout=5) res_txt = query.udp(req_txt, ns, timeout=5) except dns.exception.Timeout as e: # if timeout, skip the response results[part]['timeout'] = True logger.log(logger.level, e) continue if res: if res.rcode: rcode = res.rcode() if rcode != dns_rcode.NOERROR: if rcode == dns_rcode.NXDOMAIN: e = Exception(f'{part} does not exist') else: e = Exception(dns_rcode.to_text(rcode)) logger.log(logger.level, e) continue else: e = Exception('rcode not in response') logger.log(logger.level, e) continue rrsets = None if res.authority: rrsets = res.authority elif res.additional: rrsets = [res.additional] else: rrsets = res.answer for rrset in rrsets: for rr in rrset: # check for start of authority if rr.rdtype == rdatatype.SOA: for k in ('mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'): results[part]['SOA'][k if k != 'minimum'\ else 'default_ttl'] = getattr(rr, k) # check for glue records if no SOA # assign name server from glue record # on the parent domain to next query elif rr.rdtype == rdatatype.A: if ip_re(rr.items[0].address): ns = rr.items[0].address results[part]['A'][rr.name] = ns else: e = Exception( 'A record ip is incorrectly formatted') logger.log(logger.level, [e, rr.items[0].address]) # check for NS records if no A record elif rr.rdtype == rdatatype.NS: authority = rr.target try: ns = resolver.query(authority)\ .rrset[0].to_text() if ip_re(ns): results[part]['NS']\ [authority.to_text()] = ns results[part]['ns_queried'] = ns else: e = Exception( 'NS record ip is incorrectly formatted' ) logger.log(logger.level, [e, ns]) except (resolver.NoAnswer, resolver.NoNameservers, resolver.NXDOMAIN, resolver.YXDOMAIN) as e: logger.log(logger.level, e) continue results[part]['TXT'] = [] if res_txt.answer: # dns.query.udp returns an answer object for rrset in res_txt.answer: for rr in rrset: results[part]['TXT'].append(rr.to_text().strip('"')) else: try: res_txt = resolver.query(part, 'TXT') except (resolver.NoAnswer, resolver.NoNameservers, resolver.NXDOMAIN, resolver.YXDOMAIN) as e: logger.log(logger.level, e) continue # dns.resolver.query returns a response.answer object for rrset in res_txt.response.answer: for item in rrset: results[part]['TXT']\ .append(item.to_text().strip('"')) # check to see if we have no SOA records after querying all parts if not any([ bool(results[part]['SOA']) for part in results if part.endswith('.') ]): # skip '.' and 'com.' and dig from previous results for part in list(results)[2:]: if results[part]['NS']: #if not SOA yet, choose a name server from previous ns query ns = choice(list(results[part]['NS'].values())) req = message.make_query(part, rdatatype.SOA) res = query.udp(req, ns) results[part]['ns_queried'] = ns # if timeout, continue to next domain part if not res: continue elif res.answer: #soa records are only answers to queries if res.answer[0].rdtype == rdatatype.SOA: # in rrset [0] , in rr record [0] soa = res.answer[0][0] for k in ('mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'): results[part]['SOA'][k if k != 'minimum' \ else 'default_ttl'] = getattr(soa, k) return results else: e = ValueError(domain, 'not a valid domain name') logger.log(logger.level, e) raise e
def dns_doh(domain, threat): detected = {} message = doh.make_query(domain, 'A') dns_req = base64.urlsafe_b64encode(message.to_wire()).\ decode("UTF8").rstrip("=") try: resp = requests.get(dnssec['elevenpaths']['url'], params={"dns": dns_req}, headers={"Content-type": ct}) except requests.RequestException: return detected if ct not in resp.headers["Content-Type"]: detected = {} return detected ips = [] answers = [] for response in doh.from_wire(resp.content).answer: answers = response.to_text().split("\n") for answer in answers: output = answer.split() ips.append(output[len(output) - 1]) if dnssec['elevenpaths']['malware_ip'] in ips: detected[threat] = {} detected[threat]['report_DoH'] = 'Malware' detected[threat]['domain'] = domain detected[threat]['A'] = '' detected[threat]['MX'] = '' elif not ips: detected = () else: detected[threat] = {} detected[threat]['report_DoH'] = 'Good' detected[threat]['domain'] = domain detected[threat]['A'] = ips mess_mx = doh.make_query(domain, 'MX') dns_mx = base64.urlsafe_b64encode(mess_mx.to_wire()).\ decode("UTF8").rstrip("=") try: res_mx = requests.get(dnssec['elevenpaths']['url'], params={"dns": dns_mx}, headers={"Content-type": ct}) except requests.RequestException: detected[threat]['MX'] = '' return detected if ct not in res_mx.headers["Content-Type"]: detected[threat]['MX'] = '' return detected detected[threat]['MX'] = [] ans = [] for response in doh.from_wire(res_mx.content).answer: ans = response.to_text().split("\n") for ans_mx in ans: output = ans_mx.split() detected[threat]['MX'].append(output[len(output) - 1]) return detected