def test_multipleConcurrentRequests(self): """ L{client.Resolver.query} issues a request for each different concurrent query. """ protocol = StubDNSDatagramProtocol() resolver = client.Resolver(servers=[("example.com", 53)]) resolver._connectedProtocol = lambda: protocol queries = protocol.queries # The first query should be passed to the underlying protocol. firstQuery = dns.Query(b"foo.example.com", dns.A) resolver.query(firstQuery) self.assertEqual(len(queries), 1) # A query for a different name is also passed to the underlying # protocol. secondQuery = dns.Query(b"bar.example.com", dns.A) resolver.query(secondQuery) self.assertEqual(len(queries), 2) # A query for a different type is also passed to the underlying # protocol. thirdQuery = dns.Query(b"foo.example.com", dns.A6) resolver.query(thirdQuery) self.assertEqual(len(queries), 3)
def test_query_svc(client_mock: mock.Mock, resolver: LocalResolver): """.svc host must be completed with .cluster.local.""" query = dns.Query("some-pod.my-service.ns.svc") resolver.query(query) client_mock.Resolver.return_value.query.assert_called_with( dns.Query("some-pod.my-service.ns.svc.cluster.local", dns.A, mock.ANY), timeout=mock.ANY, )
def test_query_service_ns(client_mock: mock.Mock, resolver: LocalResolver): """ Service name + namespace must be completed with .svc.cluster.local. """ query = dns.Query("service.given-ns") resolver.query(query) client_mock.Resolver.return_value.query.assert_called_with( dns.Query("service.given-ns.svc.cluster.local", dns.A, mock.ANY), timeout=mock.ANY, )
def test_cache_size(): r1 = ([ dns.RRHeader(b"example1.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example1.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example1.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) r2 = ([ dns.RRHeader(b"example2.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.2", 60)) ], [ dns.RRHeader(b"example2.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.2", 50)) ], [ dns.RRHeader(b"example2.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.2", 40)) ]) r3 = ([ dns.RRHeader(b"example3.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.3", 60)) ], [ dns.RRHeader(b"example3.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.3", 50)) ], [ dns.RRHeader(b"example3.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.3", 40)) ]) query1 = dns.Query(name=b"example1.com", type=dns.A, cls=dns.IN) query2 = dns.Query(name=b"example2.com", type=dns.A, cls=dns.IN) query3 = dns.Query(name=b"example3.com", type=dns.A, cls=dns.IN) clock = task.Clock() e = ExtendCacheResolver(reactor=clock, cacheSize=2) e.cacheResult(query1, r1) assert query1 in e.cache e.cacheResult(query2, r2) assert query2 in e.cache e.cacheResult(query3, r3) assert query3 in e.cache # query1 is out due to cache size limit assert query1 not in e.cache
def testQuery(self): for n in self.names: for dnstype in range(1, 17): for dnscls in range(1, 5): # encode the query f = StringIO() dns.Query(n, dnstype, dnscls).encode(f) # decode the result f.seek(0, 0) result = dns.Query() result.decode(f) self.assertEquals(result.name.name, n) self.assertEquals(result.type, dnstype) self.assertEquals(result.cls, dnscls)
def test_min_ttl(): r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() query = dns.Query(name=b"example.com", type=dns.A, cls=dns.IN) e = ExtendCacheResolver(reactor=clock, minTTL=100) e.cacheResult(query, r) clock.advance(70) # minTTL is 100 seconds so it won't expire assert query in e.cache # Now it is expired clock.advance(30.1) assert query not in e.cache
def discoveredAuthority(self, auth, name, cls, type, timeout): from twisted.names import client q = dns.Query(name, type, cls) r = client.Resolver(servers=[(auth, dns.PORT)]) d = r.queryUDP([q], timeout) d.addCallback(r.filterAnswers) return d
def ipToHostname(self, ad): # Try to determine the hostname of the provided address. # Returns a deferred, which will callback but never errback. # If successful, the callback argument is a hostname string, # None otherwise. revip = '.'.join(str(ord(o)) for o in ad.getRawIP()[::-1]) host = "%s.in-addr.arpa" % revip def cb(result): try: hostname = result[0][0].payload.name.name if not hostname: return None except: return None return hostname def eb(failure): return None d = self.resolver.query(dns.Query(host, type=dns.PTR)) d.addCallbacks(cb, eb) return d
def _lookup(self, name, cls, type, timeout, addr=None, edns=None): q = dns.Query(name, type, cls) #d = self.resolvers[0].query(q, timeout) d = defer.fail(failure.Failure(dns.DomainError(name))) for r in self.resolvers[1:]: d = d.addErrback(FailureHandler(r.query, q, timeout, addr, edns)) return d
def test_expiredTTLLookup(self): """ When the cache is queried exactly as the cached entry should expire but before it has actually been cleared, the cache does not return the expired entry. """ r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() # Make sure timeouts never happen, so entries won't get cleared: clock.callLater = lambda *args, **kwargs: None c = cache.CacheResolver( { dns.Query(name=b"example.com", type=dns.A, cls=dns.IN): (clock.seconds(), r) }, reactor=clock) clock.advance(60.1) return self.assertFailure(c.lookupAddress(b"example.com"), dns.DomainError)
def test_pendingEmptiedInPlaceOnError(self): """ When the TCP connection attempt fails, the L{client.Resolver.pending} list is emptied in place. It is not replaced with a new empty list. """ reactor = proto_helpers.MemoryReactor() resolver = client.Resolver(servers=[("192.0.2.100", 53)], reactor=reactor) d = resolver.queryTCP(dns.Query("example.com")) host, port, factory, timeout, bindAddress = reactor.tcpClients[0] prePending = resolver.pending self.assertEqual(len(prePending), 1) class SentinelException(Exception): pass factory.clientConnectionFailed( reactor.connectors[0], failure.Failure(SentinelException()) ) self.failureResultOf(d, SentinelException) self.assertIs(resolver.pending, prePending) self.assertEqual(len(prePending), 0)
def pump(self): self.requested -= 1 if self.requested > REQS: return last_seen = self.systems.last_seen_remote_mid if last_seen % bromine.CONFIG[ 'ackperiod'] == 0 and self.last_ack != last_seen: self.last_ack = last_seen self.score_board.push_ack(last_seen) else: self.score_board.last_seen_remote_mid = last_seen for d in self.systems.data: self.transport.write(d) for ack in self.systems.acks: remote_last_seen_remote_mid = ack[0] self.score_board.retire(remote_last_seen_remote_mid) self.systems.commit() host = self.score_board.transmit() query = dns.Query(host, dns.CNAME, dns.IN) task = self.resolver.queryUDP([query], [20 * SLOW]) task.addCallback(self.ok_) task.addErrback(self.error_)
def lookupAddress(self, name, timeout=None, addr = None, edns = None): # 查找 HOST 表 print addr result =self.config.hosts.get(name, "None") ''' 此处可编写查找方法 输出变量result为ip值 类型为字符串 ''' #构造并发包 if result is not "None": def packResult(value): return [ (dns.RRHeader(name, dns.A, dns.IN, self.config.hosts_ttl, dns.Record_A(value, self.config.hosts_ttl)),), (), (), ] # result.addCallback(packResult) return packResult(result) server = self.config.servers[self.config.default_server] # 根据服务器建立 dns.Query,并执行查询 q = dns.Query(name, dns.A, dns.IN) r = client.Resolver(servers=[(server["host"], server["port"])]) d = None if server["tcp"]: if timeout is None: timeout = 10 d = r.queryTCP([q], timeout) else: d = r.queryUDP([q], timeout) d.addCallback(r.filterAnswers) return d
def test_multipleConcurrentFailure(self): """ If the result of a request is an error response, the Deferreds for all concurrently issued requests associated with that result fire with the L{Failure}. """ protocol = StubDNSDatagramProtocol() resolver = client.Resolver(servers=[("example.com", 53)]) resolver._connectedProtocol = lambda: protocol queries = protocol.queries query = dns.Query(b"foo.example.com", dns.A) firstResult = resolver.query(query) secondResult = resolver.query(query) class ExpectedException(Exception): pass queries.pop()[-1].errback(failure.Failure(ExpectedException())) return defer.gatherResults( [ self.assertFailure(firstResult, ExpectedException), self.assertFailure(secondResult, ExpectedException), ] )
def test_protocolShutDownAfterTimeout(self): """ The L{DNSDatagramProtocol} created when an interim timeout occurs is also disconnected from its transport after the Deferred returned by its query method completes. """ resolver = client.Resolver(servers=[("example.com", 53)]) protocols = [] result = defer.Deferred() results = [defer.fail(failure.Failure(DNSQueryTimeoutError(None))), result] class FakeProtocol: def __init__(self): self.transport = StubPort() def query(self, address, query, timeout=10, id=None): protocols.append(self) return results.pop(0) resolver._connectedProtocol = FakeProtocol resolver.query(dns.Query(b"foo.example.com")) self.assertFalse(protocols[1].transport.disconnected) result.callback(dns.Message()) self.assertTrue(protocols[1].transport.disconnected)
def _lookup(self, name, cls, type, timeout): """ Build a L{dns.Query} for the given parameters and dispatch it via UDP. If this query is already outstanding, it will not be re-issued. Instead, when the outstanding query receives a response, that response will be re-used for this query as well. @type name: C{str} @type type: C{int} @type cls: C{int} @return: A L{Deferred} which fires with a three-tuple giving the answer, authority, and additional sections of the response or with a L{Failure} if the response code is anything other than C{dns.OK}. """ key = (name, type, cls) waiting = self._waiting.get(key) if waiting is None: self._waiting[key] = [] d = self.queryUDP([dns.Query(name, type, cls)], timeout) def cbResult(result): for d in self._waiting.pop(key): d.callback(result) return result d.addCallback(self.filterAnswers) d.addBoth(cbResult) else: d = defer.Deferred() waiting.append(d) return d
def testBindAndTNamesStyle(self): # Bind style = One big single message m = self._makeMessage() m.queries = [dns.Query('fooby.com', dns.AXFR, dns.IN)] m.answers = self.records self.controller.messageReceived(m, None) self.assertEqual(self.results, self.records)
def test_transfer(self): """ An attempt is made to transfer the zone for the domain the L{SecondaryAuthority} was constructed with from the server address it was constructed with when L{SecondaryAuthority.transfer} is called. """ secondary = SecondaryAuthority.fromServerAddressAndDomain( ('192.168.1.2', 1234), 'example.com') secondary._reactor = reactor = MemoryReactorClock() secondary.transfer() # Verify a connection attempt to the server address above host, port, factory, timeout, bindAddress = reactor.tcpClients.pop(0) self.assertEqual(host, '192.168.1.2') self.assertEqual(port, 1234) # See if a zone transfer query is issued. proto = factory.buildProtocol((host, port)) transport = StringTransport() proto.makeConnection(transport) msg = Message() # DNSProtocol.writeMessage length encodes the message by prepending a # 2 byte message length to the buffered value. msg.decode(StringIO(transport.value()[2:])) self.assertEqual([dns.Query('example.com', dns.AXFR, dns.IN)], msg.queries)
def _lookup(self, name, cls, type, timeout): now = self._reactor.seconds() q = dns.Query(name, type, cls) try: when, (ans, auth, add) = self.cache[q] except KeyError: if self.verbose > 1: log.msg('Cache miss for ' + repr(name)) return defer.fail(failure.Failure(dns.DomainError(name))) else: if self.verbose: log.msg('Cache hit for ' + repr(name)) diff = now - when try: result = ( [dns.RRHeader(r.name.name, r.type, r.cls, r.ttl - diff, r.payload) for r in ans], [dns.RRHeader(r.name.name, r.type, r.cls, r.ttl - diff, r.payload) for r in auth], [dns.RRHeader(r.name.name, r.type, r.cls, r.ttl - diff, r.payload) for r in add]) except ValueError: return defer.fail(failure.Failure(dns.DomainError(name))) else: return defer.succeed(result)
def _lookup(self, name, cls, type, timeout): """ Build a L{dns.Query} for the given parameters and dispatch it to each L{IResolver} in C{self.resolvers} until an answer or L{error.AuthoritativeDomainError} is returned. @type name: C{str} @param name: DNS name to resolve. @type type: C{int} @param type: DNS record type. @type cls: C{int} @param cls: DNS record class. @type timeout: Sequence of C{int} @param timeout: Number of seconds after which to reissue the query. When the last timeout expires, the query is considered failed. @rtype: L{Deferred} @return: A L{Deferred} which fires with a three-tuple of lists of L{twisted.names.dns.RRHeader} instances. The first element of the tuple gives answers. The second element of the tuple gives authorities. The third element of the tuple gives additional information. The L{Deferred} may instead fail with one of the exceptions defined in L{twisted.names.error} or with C{NotImplementedError}. """ if not self.resolvers: return defer.fail(error.DomainError()) q = dns.Query(name, type, cls) d = self.resolvers[0].query(q, timeout) for r in self.resolvers[1:]: d = d.addErrback(FailureHandler(r.query, q, timeout)) return d
def test_normalLookup(self): """ When a cache lookup finds a cached entry from 1 second ago, it is returned with a TTL of original TTL minus the elapsed 1 second. """ r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() c = cache.CacheResolver(reactor=clock) c.cacheResult(dns.Query(name=b"example.com", type=dns.A, cls=dns.IN), r) clock.advance(1) def cbLookup(result): self.assertEquals(result[0][0].ttl, 59) self.assertEquals(result[1][0].ttl, 49) self.assertEquals(result[2][0].ttl, 39) self.assertEquals(result[0][0].name.name, b"example.com") return c.lookupAddress(b"example.com").addCallback(cbLookup)
def test_cachedResultExpires(self): """ Once the TTL has been exceeded, the result is removed from the cache. """ r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() c = cache.CacheResolver(reactor=clock) query = dns.Query(name=b"example.com", type=dns.A, cls=dns.IN) c.cacheResult(query, r) clock.advance(40) self.assertNotIn(query, c.cache) return self.assertFailure(c.lookupAddress(b"example.com"), dns.DomainError)
def test_constructorExpires(self): """ Cache entries passed into L{cache.CacheResolver.__init__} get cancelled just like entries added with cacheResult """ r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() query = dns.Query(name=b"example.com", type=dns.A, cls=dns.IN) c = cache.CacheResolver({query: (clock.seconds(), r)}, reactor=clock) # 40 seconds is enough to expire the entry because expiration is based # on the minimum TTL. clock.advance(40) self.assertNotIn(query, c.cache) return self.assertFailure(c.lookupAddress(b"example.com"), dns.DomainError)
def test_lookup(self): c = cache.CacheResolver({ dns.Query(name=b'example.com', type=dns.MX, cls=dns.IN): (time.time(), ([], [], [])) }) return c.lookupMailExchange(b'example.com').addCallback( self.assertEqual, ([], [], []))
def main(reactor): proto = dns.DNSDatagramProtocol(controller=None) reactor.listenUDP(0, proto) d = proto.query(('8.8.8.8', 53)), [dns.Query('www.google.com', dns.AAAA)] d.addCallback(printResult) return d
def test_protocolShutDownAfterFailure(self): """ If the L{Deferred} returned by L{DNSDatagramProtocol.query} fires with a failure, the L{DNSDatagramProtocol} is still disconnected from its transport. """ class ExpectedException(Exception): pass resolver = client.Resolver(servers=[("example.com", 53)]) protocols = [] result = defer.Deferred() class FakeProtocol: def __init__(self): self.transport = StubPort() def query(self, address, query, timeout=10, id=None): protocols.append(self) return result resolver._connectedProtocol = FakeProtocol queryResult = resolver.query(dns.Query(b"foo.example.com")) self.assertFalse(protocols[0].transport.disconnected) result.errback(failure.Failure(ExpectedException())) self.assertTrue(protocols[0].transport.disconnected) return self.assertFailure(queryResult, ExpectedException)
def test_singleConcurrentRequest(self): """ L{client.Resolver.query} only issues one request at a time per query. Subsequent requests made before responses to prior ones are received are queued and given the same response as is given to the first one. """ protocol = StubDNSDatagramProtocol() resolver = client.Resolver(servers=[('example.com', 53)]) resolver._connectedProtocol = lambda: protocol queries = protocol.queries query = dns.Query(b'foo.example.com', dns.A, dns.IN) # The first query should be passed to the underlying protocol. firstResult = resolver.query(query) self.assertEqual(len(queries), 1) # The same query again should not be passed to the underlying protocol. secondResult = resolver.query(query) self.assertEqual(len(queries), 1) # The response to the first query should be sent in response to both # queries. answer = object() response = dns.Message() response.answers.append(answer) queries.pop()[-1].callback(response) d = defer.gatherResults([firstResult, secondResult]) def cbFinished(responses): firstResponse, secondResponse = responses self.assertEqual(firstResponse, ([answer], [], [])) self.assertEqual(secondResponse, ([answer], [], [])) d.addCallback(cbFinished) return d
def test_config_with_correct_server_config_of_only_root_domain_and_only_address(tmpdir): p = tmpdir.mkdir('conf').join('dispatch.conf') p.write('server=/com/127.0.0.1') d = DispatchResolver(str(p.realpath()), servers=[("127.0.0.2", 53)]) queries = [dns.Query(b'com', dns.A, dns.IN)] addr = d.pickServer(queries) assert addr == ("127.0.0.1", 53)
def _lookup(self, name, cls, type, timeout, addr=None): if not self.resolvers: return defer.fail(dns.DomainError()) q = dns.Query(name, type, cls) d = self.resolvers[0].query(q, timeout, addr) for r in self.resolvers[1:]: d = d.addErrback(FailureHandler(r.query, q, timeout, addr)) return d
def _lookup(self, name, cls, type, timeout): q = dns.Query(name, type, cls) d = self.resolvers[0].query(q, timeout) for r in self.resolvers[1:]: d = d.addErrback( FailureHandler(r.query, q, timeout) ) return d