def parse_network_data(this): net_data = this.raw_packet offset = 0 header = Header.fromData(net_data, offset) offset += len(header) query = QE.fromData(net_data, offset) offset += len(query) a_rr_list = [] ns_rr_list = [] ar_rr_list = [] for i in range(header._ancount): if offset < len(net_data): (a_rr, a_rr_len) = RR.fromData(net_data, offset) offset += a_rr_len a_rr_list.append(a_rr) for i in range(header._nscount): if offset < len(net_data): (ns_rr, ns_rr_len) = RR.fromData(net_data, offset) offset += ns_rr_len ns_rr_list.append(ns_rr) for i in range(header._arcount): if offset < len(net_data): (ar_rr, ar_rr_len) = RR.fromData(net_data, offset) offset += ar_rr_len if not ar_rr._type == -1: ar_rr_list.append(ar_rr) this.header = header this.query = query this.a_rr_list = a_rr_list this.ns_rr_list = ns_rr_list this.ar_rr_list = ar_rr_list
def printDeconstructOutgoing(data): """ Utility function for printing binary-packed query """ header = Header.fromData(data) question_entry = QE.fromData(data, offset=len(header)) resource_records = [] offset = len(header) + len(question_entry) while True: try: resource_record = RR.fromData(data, offset=offset) offset += resource_record[1] resource_records.append(resource_record) except struct.error as error: break print(header, "\n") print(question_entry, "\n") for resource_record in resource_records: if (resource_record[0]._type == 1): print("authoritative record\n", resource_record[0]) elif (resource_record[0]._type == 2): print("name server record\n", resource_record[0]) elif (resource_record[0]._type == RR.TYPE_CNAME): print("canonical_name record\n", resource_record[0]) elif (resource_record[0]._type == RR.TYPE_AAAA): print("name server record\n", resource_record[0])
def getEmptyRecord(data): """ Returns an empty record with type = RR.TYPE_UNKNOWN """ header = Header.fromData(data) question_entry = QE.fromData(data, offset=len(header)) rr = RR.fromData(data, offset=len(header) + len(question_entry)) return data[-(rr[1]):]
def parse_header(data): ''' # Function for parsing binary data to construct header Construct a header object of the packet from the input binary data, then obtain all header elements and put them into a dictionary :param data: binary data received from a server as an answer for a request :return: dictionary containing all the header parameters ''' header = Header.fromData(data) n_qe = header._qdcount n_ans = header._ancount n_auth = header._nscount n_add = header._arcount header_id = header._id return {'qdcount': n_qe, 'ancount': n_ans, 'nscount': n_auth, 'arcount': n_add, 'id': header_id}
def deconstructData(data): """ Deconstructs/Unpacks given binary data to Header, QE objects and a list of RR objects """ header = Header.fromData(data) question_entry = QE.fromData(data, offset=len(header)) resource_records = [] offset = len(header) + len(question_entry) while True: try: resource_record = RR.fromData(data, offset=offset) offset += resource_record[1] resource_records.append(resource_record[0]) except struct.error as error: break return (header, question_entry, resource_records)
def printDeconstructIncoming(data): """ Another utility function for printing incoming data with no resource records """ header = Header.fromData(data) question_entry = QE.fromData(data, offset=len(header)) resource_record = RR.fromData(data, offset=(len(header) + len(question_entry))) # resource_record2 = RR.fromData(data,offset=(len(header) + len(question_entry) + resource_record[1])) print("Header Is ##################################") print(header) print("Header len", len(header)) print("Question entry is ############################") print(question_entry) print("Question len", len(question_entry)) print("Resource record is #########################") print(resource_record[0]) print("###################")
def resolve_query(header_id, initial_domain_name, domain_name, current_ip, initial_ip, is_cname): print("iteration") ''' This is the main "powerhouse" function that serves a client request. Upon receiveing a domain name, it first checks if it is contained in tha cache. If the cache contains information about the requested domain name, the function returns it. The function will recursively search for the domain name asked for as specified in the lecture notes. Cache also helps reducing queries by providing sub-domains for a domain name. :param header_id: ID of the initial client query :param initial_domain_name: the domain name the client was requesting :param domain_name: the domain name this function is currently trying to resolve :param current_ip: the current IP address we want to send a follow-up query to :param initial_ip: the first IP address that our LNS sends a query to (always the Root Server in our case) :param is_cname: :return: a dictionary of lists containing RRs categorized by the section they belong to in the packet ''' global acache global cnamecache global nscache global global_cname rr_return = {'answers': [], 'authorities': [], 'additional': [], 'additional_A': []} # Only for the very first iteration (where domain_name == initial_domain_name) check if the requested domain name # is inside the cache if acache.contains(domain_name) and domain_name == initial_domain_name and not global_cname: print("containsx") # Get the IP and ttl from the cache and construct an RR_A object for answer section ip = acache.getIpAddresses(domain_name) ttl = acache.getExpiration(domain_name, ip[0]) rr_answer = RR_A(domain_name, ttl, ip[0]) #ip[0].toNetwork() if ip[0] is not in byte form # Append the RR_A into the dictionary's answer section rr_return['answers'].append(rr_answer) # Constructing the authority section by looping through all possible sub-domains of domain_name and get the # highest-qualified one that exists in the nscache lowest_domain = get_subdomain_order(domain_name) for i in range(len(lowest_domain)): this_domain = DomainName(lowest_domain[i]) if nscache.contains(this_domain): # Get the list of name servers associated with this domain name something = nscache.get(this_domain) for j in range(len(something)): # For each name server associated with this domain name, retrieve the ttl and construct an # RR_NS object and add it to the authority section of the dictionary ttl = something[j][1] nsdn = nscache.get_nsdn(this_domain) rr_authorities = RR_NS(this_domain, ttl, nsdn[j]) rr_return['authorities'].append(rr_authorities) # Check the IP address of all the name servers in the authority section, create an RR_A object and # add it to the additional section of the dictionary if acache.contains(nsdn[j]): this_ip = acache.getIpAddresses(nsdn[j]) this_ttl = acache.getExpiration(nsdn[j], this_ip[0]) rr_additional = RR_A(nsdn[j], this_ttl, this_ip[0]) rr_return['additional'].append((rr_additional)) # If we found an existing sub-domain, look no further break return rr_return # During the first iteration check what is the highest-qualified sub-domain that exists in the cache. If we find such # sub-domain, jump straight to that sub-domain's IP address instead of doing more queries and causing traffic if current_ip == initial_ip and not is_cname and not global_cname: lowest_domain = get_subdomain_order(domain_name) for i in range(len(lowest_domain)): this_domain = DomainName(lowest_domain[i]) if nscache.contains(this_domain): # Once we find that sub-domain, retrieve its IP address from the cache and recursively call # serve_request() using the new IP address as target nsdn = nscache.get_nsdn(this_domain) next_ip = acache.getIpAddresses(nsdn[0]) return resolve_query(header_id, initial_domain_name, domain_name, next_ip, initial_ip, is_cname) # Construct the packet send_packet_header = Header(header_id, 0, 0, 1) send_packet_question = QE(1, domain_name) send_packet = send_packet_header.pack() + send_packet_question.pack() # Send the packet to the target IP address and wait for an answer cs.sendto(send_packet, (current_ip, 53)) reply, a = cs.recvfrom(512) # Parse the reply and construct the RRs header_record = parse_header(reply) rr = get_records(reply, header_record['ancount'], header_record['nscount']) # Cache all the RRs; this stage is only reached when these RRs are not duplicate, otherwise we would have reached # them before, without needing to cause traffic if header_record['ancount'] != 0: #len(rr['answers']) != 0: for i in rr['answers']: if i._type == 1: acache.put(domain_name, i._addr, i._ttl) elif i._type == 5: cnamecache.put(domain_name, i._cname, i._ttl) if header_record['nscount'] != 0: for i in rr['authorities']: if i._type == 2: nscache.put(i._dn, i._nsdn, i._ttl, True) if len(rr['additional']) != 0: for i in rr['additional']: if i._type == 1: acache.put(i._dn, i._addr, i._ttl) '''print("acache is:") print(acache.__str__()) print("nscache is:") print(nscache.__str__()) print("cnamecache is:") print(cnamecache.__str__())''' # Main recursive part try: # If there is no answer section and there is an additional section, make the same query targetting the first # RR_A in additional section if len(rr['additional']) != 0: next_ip = bin_to_str(rr['additional_A'].pop(0)._addr) return resolve_query(header_id, initial_domain_name, domain_name, next_ip, initial_ip, is_cname) # If we have an answer section elif header_record['ancount'] != 0: # IF we found the domain name the client originally requested, return thr records if initial_domain_name == domain_name: return rr # If we found a domain name from an authority section, we target this new domain's IP address with our query else: next_ip = bin_to_str(rr['answers'][0]._addr) return resolve_query(header_id, initial_domain_name, initial_domain_name, next_ip, initial_ip, is_cname) # If there is no answer and no RR_A additional section, we need to search for a domain name of the first RR_NS # from the authority section recursively else: next_domain_name = rr['authorities'][0]._nsdn return resolve_query(header_id, initial_domain_name, next_domain_name, ROOTNS_IN_ADDR, initial_ip, is_cname) # If the server we send a question to does not respond, go ask the next server in the additional section except timeout: next_ip = bin_to_str(rr['additional_A'].pop(0)._addr) return resolve_query(header_id, initial_domain_name, initial_domain_name, next_ip, initial_ip, is_cname)
# servers: setdefaulttimeout(TIMEOUT) cs = socket(AF_INET, SOCK_DGRAM) # This is a simple, single-threaded server that takes successive # connections with each iteration of the following loop: while 1: (data, client_address,) = ss.recvfrom(512) # DNS limits UDP msgs to 512 bytes if not data: logger.error("client provided no data") continue # Parse the header and answer section of the client query retrieving the domain name lengthData = len(data) query_header = Header.fromData(data) header_id = query_header._id question = QE.fromData(data, 12) initial_qe_dn = question._dn global_cname = False # Use a function to ask for the requested domain name packet = resolve_query(header_id, initial_qe_dn, initial_qe_dn, ROOTNS_IN_ADDR, ROOTNS_IN_ADDR, True) new_packet = packet # If the answer section contains CNAME then take that domain name and dig until there is no CNAME in the answer while packet['answers'][0]._type == 5: # Look for the new domain name and call the resolve_query function on it global_cname = True
def recursiveLookup(data, address): """ Main recursive function - does everything """ answers = [] addresses = [] nameservers = [] cnames = [] originalHeader, originalQuery, originalRecords = deconstructData(data) print("ORIGINAL are", originalRecords) # If query took more than 50 seconds, returns server fail if int(time()) - newQueryTime > 50: """ 50 seconds on a safer side for timeout, returning SERV_FAIL RCODE in Header """ print("TOOK A LOT OF TIME") originalHeader._rcode = Header.RCODE_SRVFAIL originalHeader._ra = 1 originalHeader._aa = 0 originalHeader._qr = 1 return originalHeader.pack() + originalQuery.pack() + packRecords( originalRecords) #If answer already in cache, extract from cache if acache.contains(originalQuery._dn): finalRecords = b'' answers = getAddressFromCache(acache, originalQuery._dn) #If length of answers is not zero, get from cache, else resolve manually if len(answers) > 0: nameservers, additionals = getNameserversFromCache( nscache, acache, originalQuery._dn) """ Adjust header answer count for sending client back answer """ originalHeader._ancount = len(answers) if len(nameservers) > 0: answers.extend(nameservers) if len(answers) > 0: answers.extend(additionals) for record in answers: """ Packing records into binary """ try: # print(record) finalRecords += record.pack() except (struct.error, Exception) as e: pp.pprint(e) continue """ Adjust header attirbutes accordingly for sending client back answer """ originalHeader._nscount = len(nameservers) originalHeader._arcount = len(additionals) originalHeader._qr = 1 originalHeader._aa = 0 originalHeader._ra = 1 return originalHeader.pack() + originalQuery.pack() + finalRecords # Manual resolving code cs.sendto(data, (address, 53)) try: # Send to new address and get data back (serverData, serverAddress) = cs.recvfrom(512) except (timeout, error): #If timeout (greater than default timeout of 5 seconds of dig) , returns None return None header, question_entry, resource_records = deconstructData(serverData) if header._rcode == Header.RCODE_NAMEERR or header._rcode == Header.RCODE_SRVFAIL: originalHeader._rcode = Header.RCODE_SRVFAIL originalHeader._ra = 1 originalHeader._aa = 0 originalHeader._qr = 1 print(originalRecords) return originalHeader.pack() + originalQuery.pack( ) + EMPTY_RESOURCE_RECORD if header._qdcount > 1: print("CAN'T RESOLVE MULTIPLE QUERIES, EXITING...") """ Can't resolve multiple queries as mentioned in negative requirements """ return data if (header._ancount > 0): """ Answer(s) exists already, no need to go further """ for i in range(header._ancount): answers.append(resource_records[i]) for i in range(header._ancount, len(resource_records)): if resource_records[i]._type == RR.TYPE_NS: nameservers.append(resource_records[i]) putInCache(resource_records[i]) elif resource_records[i]._type == RR.TYPE_A: addresses.append(resource_records[i]) putInCache(resource_records[i]) answer_record = resource_records[0] if answer_record._type == RR.TYPE_A: """ If address, return binary packed server data """ return serverData elif answer_record._type == 5: """ If type is CNAME, resolve the cname again from ROOT_DNS """ if cnamecache.contains(answer_record._dn ) and cnamecache.getCanonicalNameExpiration( answer_record._dn) >= int(time()) - now: # pass cname = cnamecache.getCanonicalName(answer_record._dn) furtherCnames = findMoreCnames(cname) cname_addresses = [] if len(furtherCnames) == 0: """ Only single CNAME, so just find addresses of the CNAME """ cname_addresses.extend(getAddressFromCache(acache, cname)) if len(cname_addresses) > 0: cname_addresses.insert(0, answer_record) cname_nameservers, cname_additionals = getNameserversFromCache( nscache, acache, cname) cname_cache_bin = getRecords(cname_addresses, cname_additionals, cname_nameservers, data) """ Set appropriate header attrubutes w.r.t number of records of each type (ancount,nscount and arcount) """ originalHeader._ancount = len(cname_addresses) originalHeader._nscount = len(cname_nameservers) originalHeader._arcount = len(cname_additionals) originalHeader._qr = 1 originalHeader._aa = 0 originalHeader._ra = 1 originalHeader._rd = 0 return originalHeader.pack() + originalQuery.pack( ) + cname_cache_bin else: """ If we could not find any addresses for cnames, resolve manually """ pass else: """ CNAME itself has cnames(s), so get all CNAMES and use those """ try: cflag = 0 cname_addresses.extend(furtherCnames) """ Get answers, nameservers and additionals """ cname_addresses.extend( getAddressFromCache(acache, cname)) cname_nameservers, cname_additionals = getNameserversFromCache( nscache, acache, cname) for furtherCname in furtherCnames: # print("LOOK FOR THE CNAME",cname,"FOR ADDRESSES") if (len( getAddressFromCache( acache, furtherCname._cname)) > 0): """ Extra condition to make sure addresses of cnames haven't expired yet If length of list of addresses of cname is zero, then resolve cnames again """ cflag = 1 # else: # qec = furtherCnames[-1]._dn # query = constructQuery(originalHeader,qec,QE.TYPE_A) + EMPTY_RESOURCE_RECORD # cnameCacheData = recursiveLookup(query,ROOTNS_IN_ADDR) # ch,cq,crr = deconstructData(cnameCacheData) # ch._ancount += len(furtherCnames) # crr_bin = b'' # for furtherCname in furtherCnames: # crr_bin += furtherCname.pack() # for cr in crr: # crr_bin += cr.pack() # return ch.pack() + cq.pack() + crr_bin """ Add all additional and glue records into pre-exisiting records to report all possible high-level authorities for each CNAME """ cname_addresses.extend( getAddressFromCache(acache, furtherCname._cname)) cname_nameservers_temp, cname_additionals_temp = getNameserversFromCache( nscache, acache, furtherCname._cname) cname_additionals.extend(cname_additionals_temp) cname_nameservers.extend(cname_nameservers_temp) cname_addresses.insert(0, answer_record) # print("FINAL CNAME ADDRS ARE",cname_addresses) cname_cache_bin = getRecords(cname_addresses, cname_additionals, cname_nameservers, data) """ Adjust header attributes accordingly for returning answer """ originalHeader._ancount = len(cname_addresses) originalHeader._nscount = len(cname_nameservers) originalHeader._arcount = len(cname_additionals) originalHeader._qr = 1 originalHeader._aa = 0 originalHeader._ra = 1 originalHeader._rd = 0 if cflag == 1: return originalHeader.pack() + originalQuery.pack( ) + cname_cache_bin else: """ If the addresses expired, need to find address of CNAME manually """ # print("NEED TO SEARCH SOME CNAME") pass except (struct.error, error, Exception) as e: pass else: """ CNAME not in cache, resolve CNAME manually """ putInCache(answer_record) # print("Canonical record detected:",answer_record) address_to_search = str(answer_record._cname) # print(address_to_search) cnameData = constructQuery( header=Header.fromData(data), domain_name=DomainName(address_to_search), type=QE.TYPE_A) + EMPTY_RESOURCE_RECORD """ Recursive call for CNAME """ cnameServerData = recursiveLookup(cnameData, ROOTNS_IN_ADDR) if cnameServerData is not None: """ Data got is not empty, form the answer by packing the data and return the answer """ anscount = arcount = nscount = 0 cnameHeader, cnameQuestionEntry, cnameResourceRecords = deconstructData( cnameServerData) cnameResourceRecordsBin = b'' try: cnameResourceRecordsBin += answer_record.pack() except struct.error: pass anscount += 1 cnt = 0 for cnameRecord in cnameResourceRecords: """ Pack cnames in binary """ try: if cnameRecord._type == RR.TYPE_CNAME: cnameResourceRecordsBin = cnameResourceRecordsBin + cnameRecord.pack( ) anscount += 1 putInCache(cnameRecord) elif cnameRecord._type == RR.TYPE_A: putInCache(cnameRecord) cnameResourceRecordsBin = cnameResourceRecordsBin + cnameRecord.pack( ) anscount += 1 else: break cnt += 1 except struct.error: continue """ Pack authorities and glue records in binary """ rem = cnameResourceRecords[cnt:] # print("REMAINING RECORDS PLEASE",rem) for cnameRecord in rem: try: if cnameRecord._type == RR.TYPE_NS: cnameResourceRecordsBin = cnameResourceRecordsBin + cnameRecord.pack( ) nscount += 1 putInCache(cnameRecord) elif cnameRecord._type == RR.TYPE_A: cnameResourceRecordsBin = cnameResourceRecordsBin + cnameRecord.pack( ) arcount += 1 putInCache(cnameRecord) except struct.error: continue oldQuestionDomain = question_entry._dn oldQuestionString = oldQuestionDomain.__str__() cnameQuestionEntry = QE(type=QE.TYPE_A, dn=oldQuestionDomain) cnameHeader._aa = 0 cnameHeader._ra = 1 cnameHeader._qr = 1 cnameHeader._ancount = anscount cnameHeader._nscount = nscount cnameHeader._arcount = arcount cnameHeader._rd = 0 newCnameServerData = cnameHeader.pack( ) + cnameQuestionEntry.pack() + cnameResourceRecordsBin # print("New data returned should be:") # printDeconstructOutgoing(newCnameServerData) return newCnameServerData else: print("ERROR - NO CANNONICAL RECORD FOUND, RETURNING...") return None else: """ No answers found, start searching glue records """ flag = 0 ns = [] add = [] """ Utility for loops for grouping records into ns(nameservers) list and add(addresses) lisr """ for resource_record in resource_records: if resource_record._type == RR.TYPE_NS: ns.append(resource_record) putInCache(resource_record) elif resource_record._type == RR.TYPE_A: add.append(resource_record) putInCache(resource_record) for record in resource_records: if record._type == RR.TYPE_A: putInCache(record) new_address = str(InetAddr.fromNetwork(record._addr)) """---------------------------RECURSIVE CALL-------------------------------)""" newServerData = recursiveLookup(data, new_address) if (newServerData is not None): """ Get header (in h), question entry (in q) and records (in rr) for sending back to client for resolved answer """ h, q, rr = deconstructData(newServerData) for i in range(h._ancount): answers.append(rr[i]) putInCache(rr[i]) for i in range(h._ancount, len(rr)): if rr[i]._type == RR.TYPE_NS: nameservers.append(rr[i]) putInCache(rr[i]) elif rr[i]._type == RR.TYPE_A: addresses.append(rr[i]) putInCache(rr[i]) """ Utility code for additional glue records and authorities """ addresses.append(record) for nameserver in ns: putInCache(nameserver) if str(record._dn) == str(nameserver._nsdn): if nameserver not in nameservers: nameservers.append(nameserver) break print("Success! Sending response to Client") """ Set the header appropriately w.r.t its attributes for returning final answer """ flag = 1 h._ancount = len(answers) h._nscount = len(nameservers) h._arcount = len(addresses) h._aa = 0 h._ra = 1 h._qr = 1 h._rd = 1 rr_bin = getRecords(answers, addresses, nameservers, newServerData) # pp.pprint(answers) # printDeconstructOutgoing(h.pack() + question_entry.pack() + rr_bin) return h.pack() + question_entry.pack() + rr_bin # return newServerData else: print("DIDNT GET ANYTHING BACK, continue") continue if flag == 1: break if flag == 0: """ NO glue records as well, resolve authority(ies) and then resolve original question entry """ for record in resource_records: """ Check all authorities and start resolving them to resolve original query """ if (record._type == RR.TYPE_NS): """ record found is nameserver authority """ putInCache(record) # nameservers.append(record) new_address = str(record._nsdn) # print("SEARCH FOR", new_address) """ Construct query """ nsData = constructQuery( header=Header.fromData(data), domain_name=DomainName(new_address), type=QE.TYPE_A) + EMPTY_RESOURCE_RECORD """-----------------------NS RECURSIVE CALL----------------)""" nsServerData = recursiveLookup(nsData, ROOTNS_IN_ADDR) if nsServerData is not None: # print("Got from root") # printDeconstructOutgoing(nsServerData) nsHeader, nsQuestionEntry, nsResourceRecords = deconstructData( nsServerData) if (len(nsResourceRecords) == 0): """ Didnt find any answers for the authority, continue resolving another authority """ continue if (nsResourceRecords[0]._type == RR.TYPE_SOA): """ Not supposed to have SOA, so return None """ # print("SOA detected") return None nsIpAddress = ROOTNS_IN_ADDR if nsHeader._ancount == 0: """ Didnt find any answers for the authority, continue resolving another authority """ continue else: """ Extract ip address of answer record got in resolving authority """ nsIpAddress = str( InetAddr.fromNetwork( nsResourceRecords[0]._addr)) # print("Success! Sending response to Client") # nsIpAddress = str(InetAddr.fromNetwork(nsResourceRecords[0]._addr)) # print("Data to resolve") # printDeconstructOutgoing(data) # print("Resolved NS IP, now resolve:",question_entry,"using",nsIpAddress) """----------------RECURSIVE CALL TO RESOLVE ORIGINAL QUESTION----------------""" newServerData = recursiveLookup(data, nsIpAddress) if (newServerData is not None): """ Get header, question entry and resource records Put in cache as well """ h, q, rr = deconstructData(newServerData) for i in range(h._ancount): answers.append(rr[i]) putInCache(rr[i]) for i in range(h._ancount, len(rr)): if rr[i]._type == RR.TYPE_NS: nameservers.append(rr[i]) putInCache(rr[i]) elif rr[i]._type == RR.TYPE_A: addresses.append(rr[i]) putInCache(rr[i]) # print("Success! Sending response to Client") flag = 1 """ Set the header appropriately w.r.t its attributes for returning final answer """ h._ancount = len(answers) h._nscount = len(nameservers) h._arcount = len(addresses) h._aa = False h._ra = True h._qr = 1 h._rd = 1 rr_bin = getRecords(answers, addresses, nameservers, newServerData) # pp.pprint(answers) # printDeconstructOutgoing(h.pack() + question_entry.pack() + rr_bin) return h.pack() + question_entry.pack() + rr_bin # return newServerData # print("Success! Sending response to Client") # printDeconstructOutgoing(newServerData) # return newServerData else: """ If we couldt do anything for this authority, then try the next one """ continue elif (record._type == RR.TYPE_SOA): """ Handling SOA by returning just the original data """ # print("SOA detected, returning...") return serverData else: continue print("Literally nothing can be done") """ Server tried everything but failed to resolve, return SERV_FAIL RCODE in Header """ """ EMPTY RECORD just for returning in failures """ returnRecord = EMPTY_RESOURCE_RECORD header._rcode = Header.RCODE_SRVFAIL return header.pack() + question_entry.pack() + returnRecord
def recursiveLookup(dn: DomainName) -> str: if type(dn) == RR_SOA: return dn print("Lookup", dn) cacheStatus = isInCache(dn) if cacheStatus == 1: print("Found IP for", dn) return getIpsFromCache(dn)[0] elif cacheStatus == 2: print("Found NS for", dn) toLookup = getGlueNamesFromCache(dn)[count] elif cacheStatus == 3: print("Found CN for", dn) toLookup = cnamecache.getCanonicalName(dn) stack.append(dn) elif cacheStatus == 0: # No data is cached print("No cached data for", dn) payload = Header( 50000, opcode=Header.OPCODE_QUERY, rcode=Header.RCODE_NOERR, qr=0, # 1 -response, 0=query qdcount=1, # Questions # ).pack() payload += QE(dn=dn, type=QE.TYPE_A).pack() parentIp = recursiveLookup(dn.parent()) if type(parentIp) == RR_SOA: return parentIp cs.sendto(payload, (parentIp, 53)) data = cs.recvfrom(512)[0] # Unpack client response data headerLen = len(Header.fromData(data)) questionLen = len(QE.fromData(data, offset=headerLen)) offset = headerLen + questionLen while offset < len(data): # Iterate by framesize curFrame = RR.fromData(data, offset=offset) rr = curFrame[0] if type(rr) == RR_NS: print("Caching NS", rr.__str__()) nscache.put(rr._dn, rr._nsdn, (time() + rr._ttl), True) elif type(rr) == RR_A: print("Caching A", rr.__str__()) acache.put( rr._dn, InetAddr.fromNetwork(rr._addr), (time() + rr._ttl), authoritative=False, ) elif type(rr) == RR_AAAA: pass elif type(rr) == RR_CNAME: print("Caching CNAME", rr.__str__()) cnamecache.put(rr._dn, rr._cname, (time() + rr._ttl)) stack.append(rr._dn) elif type(rr) == RR_SOA: print("Exiting: Found SOA", rr.__str__()) return rr else: logger.log(DEBUG2, "Unknown RR: {}".format(rr)) offset += curFrame[1] toLookup = dn print("Looking again for", toLookup) cacheStatus = isInCache(toLookup) if cacheStatus in [2, 3]: # No glues exist for this dn reloadGlues = recursiveLookup( getGlueNamesFromCache(toLookup)[0]) if type(reloadGlues) == RR_SOA: return reloadGlues resIps = getIpsFromCache(getGlueNamesFromCache(toLookup)[0]) elif cacheStatus == 3: resIps = recursiveLookup(getGlueNamesFromCache(toLookup)[0]) else: resIps = getIpsFromCache(toLookup) return resIps[0]
resIps = [] if nscache.contains(dn): resIps = [ns[0] for ns in nscache.get(dn)] else: resIps = [cnamecache.getCanonicalName(dn)] return resIps # This is a simple, single-threaded server that takes successive # connections with each iteration of the following loop: while 1: ss.settimeout(None) (data, client_address) = ss.recvfrom(512) # DNS limits UDP msgs to 512 bytes if not data and not Header.fromData(data)._qdcount == 1: logger.log(DEBUG2, "client provided no data") continue ss.settimeout(60) queryHeader = Header.fromData(data) queryQuestion = QE.fromData(data, offset=12) queryDomain = queryQuestion._dn print("Query received from client is:\n%s" % (hexdump(data))) count = 2 stack = [] try: def recursiveLookup(dn: DomainName) -> str: if type(dn) == RR_SOA: return dn
def recurser(question, ipQuerried): queryHeader = Header.fromData(question) queryQE = QE.fromData(question, queryHeader.__len__()) if acache.contains(queryQE._dn): ips = acache.getIpAddresses(queryQE._dn) foundRRA = 0 for ip in ips: ttl = acache.getExpiration(queryQE._dn, ip) - int(time()) if ttl < 0: # too late for this record acache.cache.pop(queryQE._dn) else: answers.append(RR_A(queryQE._dn, ttl, inet_aton(ip))) foundRRA = 1 if foundRRA is 1: newHeader = Header(queryHeader._id, 0, 0, 1, ancount=len(answers)) newQE = QE(dn=queryQE._dn) return newHeader.pack() + newQE.pack() elif cnamecache.contains(queryQE._dn): cn = cnamecache.getCanonicalName(queryQE._dn) ttl = cnamecache.getCanonicalNameExpiration(queryQE._dn) - int(time()) if ttl < 0: cnamecache.cache.pop(queryQE._dn) else: newHeader = Header(queryHeader._id, 0, 0, 1) newQE = QE(dn=cn) reply = recurser(newHeader.pack() + newQE.pack(), ROOTNS_IN_ADDR) if reply != None: answers.append(RR_CNAME(queryQE._dn, ttl, cn)) return reply try: cs.sendto(question, (ipQuerried, 53)) ( nsreply, server_address, ) = cs.recvfrom(2048) # some queries require more space except timeout: return None if len(nsreply) < 43: return None # handle case where there is an empty response # Store these for later use when we want to solve CNAMEs or NSs queryHeader = Header.fromData(nsreply) queryQE = QE.fromData(nsreply, queryHeader.__len__()) originalQ = str(queryQE).split("IN")[0].strip() offset = queryHeader.__len__() + queryQE.__len__() # We'll need these for parsing, trust me minRRLineLen = len(nsreply) - offset - 1 rrCounter = 0 nsAuthorities = [] rra = [] cnames = [] queryRRTuples = [] # Parsing all returned RRs while minRRLineLen < len(nsreply) - offset: # Get next glue line auxRRline = RR.fromData(nsreply, offset) # Append to RR list, update offset queryRRTuples.append(auxRRline) offset += queryRRTuples[rrCounter][1] queryRR = queryRRTuples[rrCounter][0] if queryRR.__class__ == RR_NS: nsAuthorities.append(queryRR) elif queryRR.__class__ == RR_A: rra.append(queryRR) elif queryRR.__class__ == RR_CNAME: cnames.append(queryRR) # Update minimum line length for safety stop if minRRLineLen > auxRRline[1]: minRRLineLen = auxRRline[1] rrCounter += 1 # Start the handling of RRs # Case where we only got NS back if len(rra) == 0 and len(cnames) == 0: for auth in nsAuthorities: reply = recurser(question, str(auth._nsdn)) return reply # Cache NS for later if len(nsAuthorities) > 0: for ns in nsAuthorities: nscache.put(ns._dn, ns._nsdn, ns._ttl + int(time()), authoritative=True) # Cache CNAMEs for later and querry them if len(cnames) > 0: for queryRR in cnames: if cnamecache.contains(queryRR._dn) == False: cnamecache.put(queryRR._dn, queryRR._cname, queryRR._ttl + int(time())) answers.append(queryRR) newHeader = Header(randint(1, 65000), Header.OPCODE_QUERY, Header.RCODE_NOERR, qdcount=1) newQE = QE(dn=queryRR._cname) newQuery = newHeader.pack() + newQE.pack() reply = recurser(newQuery, ROOTNS_IN_ADDR) return reply # Cache all RR_As for later, look if we got the one we are looking for if len(rra) > 0: for queryRR in rra: if acache.contains(queryRR._dn) == False: acache.put(queryRR._dn, inet_ntoa(queryRR._addr), queryRR._ttl + int(time()), authoritative=True) parts = queryRR.__str__().split("A") ip = parts[len(parts) - 1].strip() # Found required answer if queryRR._dn == originalQ: # Add all answers counter = 0 while counter < len(rra): if rra[counter]._dn == originalQ: answers.append(rra[counter]) rra.pop(counter) counter += 1 return nsreply else: reply = recurser(question, ip) return reply
) = ss.recvfrom(512) # DNS limits UDP msgs to 512 bytes # Making sure those globals are ready to be used answers = [] auths = [] additionals = [] if not data: log.error("client provided no data") continue else: # Saving all this data for response reconstrucion later question = data ip = ROOTNS_IN_ADDR initialHeader = Header.fromData(data) initialQE = QE.fromData(data, initialHeader.__len__()) initialId = initialHeader._id # The call to solve the main query nsreply = recurser(question, ip) if nsreply == None: # Respond with name error newHeader = Header(initialId, 0, Header.RCODE_NAMEERR, 1) response = newHeader.pack() + initialQE.pack() else: # Save these because we were using globals (never a good practice - too late now, sue me) finalAns = [] finalAns.extend(answers)