def test_sendReplyLoggingWithAnswers(self): """ If L{server.DNSServerFactory.sendReply} logs a message for answers, authority, additional if the supplied a message has records in any of those sections. """ self.patch(server.time, "time", lambda: 86402) m = dns.Message() m.answers.append(dns.RRHeader(payload=dns.Record_A("127.0.0.1"))) m.authority.append(dns.RRHeader(payload=dns.Record_A("127.0.0.1"))) m.additional.append(dns.RRHeader(payload=dns.Record_A("127.0.0.1"))) m.timeReceived = 86401 f = server.DNSServerFactory(verbose=2) assertLogMessage( self, [ "Answers are <A address=127.0.0.1 ttl=None>", "Authority is <A address=127.0.0.1 ttl=None>", "Additional is <A address=127.0.0.1 ttl=None>", "Processed query in 1.000 seconds", ], f.sendReply, protocol=NoopProtocol(), message=m, address=None, )
def test_expiredTTLLookup(self): """ When the cache is queried exactly as the cached entry should expire but before it has actually been cleared, the cache does not return the expired entry. """ r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() # Make sure timeouts never happen, so entries won't get cleared: clock.callLater = lambda *args, **kwargs: None c = cache.CacheResolver( { dns.Query(name=b"example.com", type=dns.A, cls=dns.IN): (clock.seconds(), r) }, reactor=clock) clock.advance(60.1) return self.assertFailure(c.lookupAddress(b"example.com"), dns.DomainError)
def test_min_ttl(): r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() query = dns.Query(name=b"example.com", type=dns.A, cls=dns.IN) e = ExtendCacheResolver(reactor=clock, minTTL=100) e.cacheResult(query, r) clock.advance(70) # minTTL is 100 seconds so it won't expire assert query in e.cache # Now it is expired clock.advance(30.1) assert query not in e.cache
def test_addressRecord2(self): """Test DNS 'A' record queries with multiple answers""" return self.namesTest( self.resolver.lookupAddress('host.test-domain.com'), [ dns.Record_A('123.242.1.5', ttl=19283784), dns.Record_A('0.255.0.255', ttl=19283784) ])
def test_constructorExpires(self): """ Cache entries passed into L{cache.CacheResolver.__init__} get cancelled just like entries added with cacheResult """ r = ( [ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ], ) clock = task.Clock() query = dns.Query(name=b"example.com", type=dns.A, cls=dns.IN) c = cache.CacheResolver({query: (clock.seconds(), r)}, reactor=clock) # 40 seconds is enough to expire the entry because expiration is based # on the minimum TTL. clock.advance(40) self.assertNotIn(query, c.cache) return self.assertFailure(c.lookupAddress(b"example.com"), dns.DomainError)
def test_normalLookup(self): """ When a cache lookup finds a cached entry from 1 second ago, it is returned with a TTL of original TTL minus the elapsed 1 second. """ r = ( [ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ], ) clock = task.Clock() c = cache.CacheResolver(reactor=clock) c.cacheResult(dns.Query(name=b"example.com", type=dns.A, cls=dns.IN), r) clock.advance(1) def cbLookup(result): self.assertEqual(result[0][0].ttl, 59) self.assertEqual(result[1][0].ttl, 49) self.assertEqual(result[2][0].ttl, 39) self.assertEqual(result[0][0].name.name, b"example.com") return c.lookupAddress(b"example.com").addCallback(cbLookup)
def test_addressRecord3(self): """Test DNS 'A' record queries with edge cases""" return self.namesTest( self.resolver.lookupAddress('host-two.test-domain.com'), [ dns.Record_A('255.255.255.254', ttl=19283784), dns.Record_A('0.0.0.0', ttl=19283784) ])
def test_cachedResultExpires(self): """ Once the TTL has been exceeded, the result is removed from the cache. """ r = ( [ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ], ) clock = task.Clock() c = cache.CacheResolver(reactor=clock) query = dns.Query(name=b"example.com", type=dns.A, cls=dns.IN) c.cacheResult(query, r) clock.advance(40) self.assertNotIn(query, c.cache) return self.assertFailure(c.lookupAddress(b"example.com"), dns.DomainError)
def test_mailExchangeRecord(self): """ The DNS client can issue an MX query and receive a response including an MX record as well as any A record hints. """ return self.namesTest( self.resolver.lookupMailExchange(b"test-domain.com"), [dns.Record_MX(10, b"host.test-domain.com", ttl=19283784), dns.Record_A(b"123.242.1.5", ttl=19283784), dns.Record_A(b"0.255.0.255", ttl=19283784)])
def lookupAddress(self, name, timeout = None): res = searchFileFor(self.file, name) if res: return defer.succeed([ (dns.RRHeader(name, dns.A, dns.IN, self.ttl, dns.Record_A(res, self.ttl)),), (), () ]) return defer.fail(failure.Failure(dns.DomainError(name)))
def _doDynamicResponse(self, query): """ Calculate the response to a query. """ reply_ip = self._ns_ip query_ip = self._geoip.ip_2_city(self.peer_address.host) print(json.dumps(query_ip, indent=4, sort_keys=True)) print_blue("Receiving >>>>>> ....") print_green(str(query_ip)) print_blue("Replying with <<<<<< ....") labels = query.name.name.split(b'.') if labels[0].lower().startswith(self._ns_pattern): print_red('NS: ' + str(reply_ip)) else: for ip in self._IPs: if query_ip['country_name'] != ip['country_name']: reply_ip = ip['ip'] self._IPs.remove(ip) break print_red(reply_ip) name = query.name.name # labels = name.split(b'.') # parts = labels[0].split(self._pattern) # lastOctet = bytes(parts[1]) answer = dns.RRHeader(name=name, payload=dns.Record_A(address=reply_ip)) # payload=dns.Record_A(address=b'%s.%s' % (self._network, lastOctet))) answers = [answer] authority = [] additional = [] return answers, authority, additional
def query(self, query, timeout=None): if not query.type in (dns.A, dns.SOA): log.debug('Received incompatible query for type %s record' % query.type) return defer.fail(error.DomainError()) name = query.name.name.decode("utf8") if name in self.ns_records: return [self.ns_records[name]], [self.authority], [] match = self._matcher.match(name) try: answers = [] if match: answer = dns.RRHeader( name=name, payload=dns.Record_A( address=str(netaddr.IPAddress(int(match.group(1)))))) answers.append(answer) authorities = [self.authority] additional = [] return answers, authorities, additional except: log.failure("Failure in serving address for query %s" % name) return defer.fail(error.DomainError())
def get(self, name, default=tuple()): # Return NS entries log.info("Original '{name}'", name=name) if name == self.base: return (dns.Record_NS(self.base, ttl=604800), ) + self.extra_records if not name.endswith(self.base): return default try: # Remove base name and trailing dot local_name = name[:-len(self.base) - 1] # ip_address handles bytes as a big integer, need str _name = local_name.decode('utf-8') # Try to handle other representations for IPv6 if "-" in _name or _name.count(".") > 3: _name = _name.replace("-", ":").replace(".", ":") # Try to read an IP address out of this ip = ip_address(_name) except: # If any of that goes wrong, return NX return default try: if ip.version == 6: record = dns.Record_AAAA(address=ip.exploded, ttl=604800) elif ip.version == 4: record = dns.Record_A(address=ip.exploded, ttl=604800) else: raise NotImplementedError("What's dis? v8?") except: return default return (record, )
def _lookup(self, name, cls, type, timeout): log.msg("Looking up type %s records for hostname: %s" % (type, name)) all_types = self.names.lookup(name, type) results = [] authority = [] additional = [] if len(all_types) > 0: log.msg("Got results.") else: log.msg("No results.") for type, records in all_types.items(): for data, metadata in records.items(): if type == A: payload = dns.Record_A(data) elif type == CNAME: # TODO: Add proper CNAME support that sends corresponding "A" records. payload = dns.Record_CNAME(data) elif type == MX: payload = dns.Record_MX(metadata["preference"], data) elif type == NS: payload = dns.Record_NS(data) header = dns.RRHeader(name, type=type, payload=payload, ttl=metadata["ttl"], auth=True) results.append(header) return defer.succeed((results, authority, additional))
def __parseLine(self, line): tokens = line.split(None, 2) # reject if incorrectly formatted. if len(tokens) != 3: raise RuntimeError( "line '%s': wrong # of tokens %d." %(line, len(tokens))) rname, rtype, rvalue = tokens # # if rvalue is a list, make sure to store it as one! if rvalue.startswith("["): rvalue = json.loads(rvalue) # create correct payload payload = None if rtype == "A": payload = dns.Record_A(address=rvalue) elif rtype == "CNAME": payload = dns.Record_CNAME(name=rvalue) elif rtype == "MX": payload = dns.Record_MX(name=rvalue[0], preference=int(rvalue[1])) elif rtype == "NS": payload = dns.Record_NS(name=rvalue) elif rtype == "SOA": payload = dns.Record_SOA(mname=rvalue[0], rname=rvalue[1]) elif rtype == "TXT": payload = dns.Record_TXT(data=[rvalue]) else: raise "cannot parse line!" return dns.RRHeader(name=rname, type=self.__query_types[rtype], payload=payload, ttl=0) # set TTL to 0 for now so that we can
def _do_A_response(self, name=None): payload = dns.Record_A(ttl=10, address=config.PUBLIC_IP) answer = dns.RRHeader(name=name, payload=payload, type=dns.A) answers = [answer] authority = [] additional = [] return answers, authority, additional
def _doDynamicResponse(self, query): name = query.name.name record = dns.Record_A(address=b"127.0.0.1") answer = dns.RRHeader(name=name, payload=record) authority = [] additional = [] return [answer], authority, additional
def test_non_srv_answer(self): """ test the behaviour when the dns server gives us a spurious non-SRV response """ service_name = b"test_service.example.com" lookup_deferred = Deferred() dns_client_mock = Mock() dns_client_mock.lookupService.return_value = lookup_deferred cache = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) resolve_d = resolver.resolve_service(service_name) self.assertNoResult(resolve_d) lookup_deferred.callback(( [ dns.RRHeader(type=dns.A, payload=dns.Record_A()), dns.RRHeader(type=dns.SRV, payload=dns.Record_SRV(target=b"host")), ], None, None, )) servers = self.successResultOf(resolve_d) self.assertEquals(len(servers), 1) self.assertEquals(servers, cache[service_name]) self.assertEquals(servers[0].host, b"host")
def host(localname): class listener(RecordUpdateListener): def __init__(self): self.addrs = [] self.time = time.time() def update_record(self, zc, now, record): if record.type == _TYPE_A and len(record.address) == 4: self.addrs.append( socket.inet_ntop(socket.AF_INET, record.address)) l = listener() q = DNSQuestion(localname, _TYPE_A, _CLASS_IN) self.zeroconf.add_listener(l, q) out = DNSOutgoing(_FLAGS_QR_QUERY) out.add_question(q) self.zeroconf.send(out) while len(l.addrs) == 0 and time.time() - l.time < timeout: time.sleep(0.1) self.zeroconf.remove_listener(l) answers = [ dns.RRHeader(name=query.name.name, ttl=ttl, type=dns.A, payload=dns.Record_A(addr)) for addr in l.addrs ] return answers, [], []
def lookupNameservers(self, name, timeout=None): """ Answer NS record requests """ name_is_self = name in [self.wildcard_domain, self.ns_domain] if name.endswith('.' + self.wildcard_domain) or name_is_self: # If we're responsible for this domain, return NS records payload = dns.Record_NS(name=self.ns_domain) answer = dns.RRHeader(name=name, type=dns.NS, payload=payload, auth=True, ttl=TTL) # Additional section: NS ip address additional_payload = dns.Record_A(address=self.my_ip) additional_answer = dns.RRHeader(name=name, payload=additional_payload, ttl=TTL) answers = [answer] authority = [] additional = [additional_answer] return defer.succeed((answers, authority, additional)) # fail for domains that are not handled by our server return defer.fail(failure.Failure(dns.AuthoritativeDomainError(name)))
def testCNAMEAdditional(self): """Test additional processing for CNAME records""" return self.namesTest( self.resolver.lookupAddress('cname.test-domain.com'), [ dns.Record_CNAME('test-domain.com', ttl=19283784), dns.Record_A('127.0.0.1', ttl=19283784) ])
def lookupAddress(self, name, timeout=None): # find out if this is a .kad. request if name in self.cache and self.cache[name].is_valid(): ip = self.cache[name].get_ip() # get the result if ip == "0.0.0.0": # doesn't exist return self._lookup(name, dns.IN, dns.A, timeout) return defer.succeed([(dns.RRHeader(name, dns.A, dns.IN, self.ttl, dns.Record_A(ip, self.ttl)), ), (), ()]) else: try: self.socket.send(name) # our p2p server should answer within 5 ms socks = dict(self.poller.poll(timeout=5)) if self.socket in socks and socks[self.socket] == zmq.POLLIN: # format is "IP TTL" msg = self.socket.recv().split(' ') self.cache[name] = CacheEntry(msg[0], msg[1]) if msg[0] == "0.0.0.0": # entry doesn't exist return self._lookup(name, dns.IN, dns.A, timeout) return self.lookupAddress(name) except zmq._zmq.ZMQError: log.msg("please start p2p-dns server") return self._lookup(name, dns.IN, dns.A, timeout)
def setUp(self): self.results = None self.d = defer.Deferred() self.d.addCallback(self._gotResults) self.controller = client.AXFRController('fooby.com', self.d) self.soa = dns.RRHeader(name='fooby.com', type=dns.SOA, cls=dns.IN, ttl=86400, auth=False, payload=dns.Record_SOA(mname='fooby.com', rname='hooj.fooby.com', serial=100, refresh=200, retry=300, expire=400, minimum=500, ttl=600)) self.records = [ self.soa, dns.RRHeader(name='fooby.com', type=dns.NS, cls=dns.IN, ttl=700, auth=False, payload=dns.Record_NS(name='ns.twistedmatrix.com', ttl=700)), dns.RRHeader(name='fooby.com', type=dns.MX, cls=dns.IN, ttl=700, auth=False, payload=dns.Record_MX(preference=10, exchange='mail.mv3d.com', ttl=700)), dns.RRHeader(name='fooby.com', type=dns.A, cls=dns.IN, ttl=700, auth=False, payload=dns.Record_A(address='64.123.27.105', ttl=700)), self.soa ]
def get(self, name, default=tuple()): # Return NS entries if name == self.base: return (dns.Record_NS(self.base, ttl=TTL),) + self.extra_records if not name.endswith(self.base): return default # Remove base name local_name = name[: -len(self.base)] # Ensure trailing dot if local_name[-1:] != b".": return default # Remove trailing dot. local_name = local_name[:-1] try: address = WordsController.words_to_IP(self.base, local_name) log.debug( "Got {address} for {name}", address=address, name=name, ) if address.version == 6: record = dns.Record_AAAA(address=address.compressed, ttl=TTL) elif address.version == 4: record = dns.Record_A(address=address.compressed, ttl=TTL) else: raise NotImplementedError("Unknown version {}".format(address.version)) except: return default return (record,)
def _aRecords(self, name): return tuple([ dns.RRHeader(name, dns.A, dns.IN, self.ttl, dns.Record_A(addr, self.ttl)) for addr in search_file_for_all(hosts_module.FilePath(self.file), name) if hosts_module.isIPAddress(addr) ])
def test_resolve(self): dns_client_mock = Mock() service_name = "test_service.examle.com" host_name = "example.com" ip_address = "127.0.0.1" answer_srv = dns.RRHeader(type=dns.SRV, payload=dns.Record_SRV(target=host_name, )) answer_a = dns.RRHeader(type=dns.A, payload=dns.Record_A(address=ip_address, )) dns_client_mock.lookupService.return_value = ([answer_srv], None, None) dns_client_mock.lookupAddress.return_value = ([answer_a], None, None) cache = {} servers = yield resolve_service(service_name, dns_client=dns_client_mock, cache=cache) dns_client_mock.lookupService.assert_called_once_with(service_name) dns_client_mock.lookupAddress.assert_called_once_with(host_name) self.assertEquals(len(servers), 1) self.assertEquals(servers, cache[service_name]) self.assertEquals(servers[0].host, ip_address)
def _do_dynamic_response(self, query): labels = query.name.name.split(b'.') try: x = int(labels[0]) y = int(labels[1]) except ValueError: return [], [], [] if not 0 <= y < len(the_map): return [], [], [] if not 0 <= x < len(the_map[y]): return [], [], [] item = the_map[y][x] if item.isspace(): address = OTHERS[(y * 9973 + x * 433) % len(OTHERS)] else: address = TARGET address = '.'.join(str(ord(ch)) for ch in address) answer = dns.RRHeader( name=query.name.name, payload=dns.Record_A(address=address) ) return [answer], [], []
def _Record_A(self, query): answers = [dns.RRHeader( name=query.name.name, type=query.type, payload=dns.Record_A(address=self.lookup_result['ip'], ttl=5), auth=True)] return answers, [], []
def test_represent_answer_a(self): a_record = dns.RRHeader(payload=dns.Record_A(address="1.1.1.1"), type=dns.A) self.assertEqual(dnst.representAnswer(a_record), { 'ipv4': '1.1.1.1', 'answer_type': 'A' })
def test_cache_size(): r1 = ([ dns.RRHeader(b"example1.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example1.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example1.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) r2 = ([ dns.RRHeader(b"example2.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.2", 60)) ], [ dns.RRHeader(b"example2.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.2", 50)) ], [ dns.RRHeader(b"example2.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.2", 40)) ]) r3 = ([ dns.RRHeader(b"example3.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.3", 60)) ], [ dns.RRHeader(b"example3.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.3", 50)) ], [ dns.RRHeader(b"example3.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.3", 40)) ]) query1 = dns.Query(name=b"example1.com", type=dns.A, cls=dns.IN) query2 = dns.Query(name=b"example2.com", type=dns.A, cls=dns.IN) query3 = dns.Query(name=b"example3.com", type=dns.A, cls=dns.IN) clock = task.Clock() e = ExtendCacheResolver(reactor=clock, cacheSize=2) e.cacheResult(query1, r1) assert query1 in e.cache e.cacheResult(query2, r2) assert query2 in e.cache e.cacheResult(query3, r3) assert query3 in e.cache # query1 is out due to cache size limit assert query1 not in e.cache