def test_expiredTTLLookup(self): """ When the cache is queried exactly as the cached entry should expire but before it has actually been cleared, the cache does not return the expired entry. """ r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() # Make sure timeouts never happen, so entries won't get cleared: clock.callLater = lambda *args, **kwargs: None c = cache.CacheResolver( { dns.Query(name=b"example.com", type=dns.A, cls=dns.IN): (clock.seconds(), r) }, reactor=clock) clock.advance(60.1) return self.assertFailure(c.lookupAddress(b"example.com"), dns.DomainError)
def createResolver(servers=None, resolvconf=None, hosts=None): if hosts is None: hosts = b'/etc/hosts' if platform.getType() == 'posix' else r'c:\windows\hosts' theResolver = Resolver(resolvconf, servers) hostResolver = hostsModule.Resolver(hosts) chain = [hostResolver, cache.CacheResolver(), theResolver] return resolve.ResolverChain(chain)
def test_normalLookup(self): """ When a cache lookup finds a cached entry from 1 second ago, it is returned with a TTL of original TTL minus the elapsed 1 second. """ r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() c = cache.CacheResolver(reactor=clock) c.cacheResult(dns.Query(name=b"example.com", type=dns.A, cls=dns.IN), r) clock.advance(1) def cbLookup(result): self.assertEquals(result[0][0].ttl, 59) self.assertEquals(result[1][0].ttl, 49) self.assertEquals(result[2][0].ttl, 39) self.assertEquals(result[0][0].name.name, b"example.com") return c.lookupAddress(b"example.com").addCallback(cbLookup)
def prepare_run(run_env): #load main config logger.info('start to load conf/sdns.yaml ......') conf = loadconfig('../conf/sdns.yaml') #load dns record config file logger.info('start to init IP pool ......') Finder = ippool.IPPool(conf['IPDATA'], conf['AFILE']) run_env['finder'] = Finder logger.info('start to load A,SOA,NS record ......') Amapping = loadconfig(conf['AFILE']) NSmapping = loadconfig(conf['NSFILE']) SOAmapping = loadconfig(conf['SOAFILE']) CNAMEmapping = loadconfig(conf['CNAMEFILE']) # set up a resolver that uses the mapping or a secondary nameserver dnsforward = [] for i in conf['dnsforward_ip']: dnsforward.append((i, conf['dnsforward_port'])) for ifc, ip in get_local_ip(): # create the protocols SmartResolver = dnsserver.MapResolver(Finder, Amapping, NSmapping, SOAmapping, CNAMEmapping, servers=dnsforward) f = dnsserver.SmartDNSFactory(caches=[cache.CacheResolver()], clients=[SmartResolver]) p = dns.DNSDatagramProtocol(f) f.noisy = p.noisy = False run_env['tcp'].append([f, ip]) run_env['udp'].append([p, ip])
def main(): from twisted.names import cache dns_cache = [cache.CacheResolver(verbose=config["verbose"])] dns_clients = [] tcp_f = server.DNSServerFactory(config["zones"], dns_cache, dns_clients, config["verbose"]) udp_f = dns.DNSDatagramProtocol(tcp_f) tcp_s = internet.TCPServer(config["dnsport"], tcp_f, interface=config["dnsinterface"]) udp_s = internet.UDPServer(config["dnsport"], udp_f, interface=config["dnsinterface"]) # Start DNS services tcp_s.startService() udp_s.startService() if WordsController.data: # Create HTTP server endpoint only if at least a zone is registered endpoint = endpoints.serverFromString(reactor, config["apiendpoint"]) endpoint.listen(Site(WordsController.app.resource())) print("Starting reactor") reactor.run()
def test_cachedResultExpires(self): """ Once the TTL has been exceeded, the result is removed from the cache. """ r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() c = cache.CacheResolver(reactor=clock) query = dns.Query(name=b"example.com", type=dns.A, cls=dns.IN) c.cacheResult(query, r) clock.advance(40) self.assertNotIn(query, c.cache) return self.assertFailure(c.lookupAddress(b"example.com"), dns.DomainError)
def __init__(self): self._query = 0 self._lock = Lock() self._cache = cache.CacheResolver() rlist = [self._cache, client.Resolver('/etc/resolv.conf')] self._resolve = resolve.ResolverChain(rlist)
def test_constructorExpires(self): """ Cache entries passed into L{cache.CacheResolver.__init__} get cancelled just like entries added with cacheResult """ r = ([ dns.RRHeader(b"example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader(b"example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() query = dns.Query(name=b"example.com", type=dns.A, cls=dns.IN) c = cache.CacheResolver({query: (clock.seconds(), r)}, reactor=clock) # 40 seconds is enough to expire the entry because expiration is based # on the minimum TTL. clock.advance(40) self.assertNotIn(query, c.cache) return self.assertFailure(c.lookupAddress(b"example.com"), dns.DomainError)
def test_lookup(self): c = cache.CacheResolver({ dns.Query(name=b'example.com', type=dns.MX, cls=dns.IN): (time.time(), ([], [], [])) }) return c.lookupMailExchange(b'example.com').addCallback( self.assertEqual, ([], [], []))
def prepare_run(run_env): for ifc, ip in get_local_ip(): # create the protocols SmartResolver = dnsserver.MapResolver(config) f = dnsserver.SmartDNSFactory(caches=[cache.CacheResolver()], clients=[SmartResolver]) p = dns.DNSDatagramProtocol(f) f.noisy = p.noisy = False run_env['tcp'].append([f, ip]) run_env['udp'].append([p, ip])
def create_application(): rd = txredisapi.lazyRedisConnectionPool() redisBackend = RedisResolverBackend(rd, servers=[('8.8.8.8', 53)]) application = service.Application("txdnsredis") srv_collection = service.IServiceCollection(application) dnsFactory = server.DNSServerFactory(caches=[cache.CacheResolver()], clients=[redisBackend]) internet.TCPServer(53, dnsFactory).setServiceParent(srv_collection) internet.UDPServer( 53, dns.DNSDatagramProtocol(dnsFactory)).setServiceParent(srv_collection) return application
def create_resolver(servers=None, resolvconf=None, hosts=None): if platform.getType() == 'posix': if resolvconf is None: resolvconf = b'/etc/resolv.conf' if hosts is None: hosts = b'/etc/hosts' the_resolver = client.Resolver(resolvconf, servers) host_resolver = Resolver(hosts) else: if hosts is None: hosts = r'c:\windows\hosts' from twisted.internet import reactor bootstrap = client._ThreadedResolverImpl(reactor) host_resolver = Resolver(hosts) the_resolver = root.bootstrap(bootstrap, resolverFactory=client.Resolver) return resolve.ResolverChain([host_resolver, cache.CacheResolver(), the_resolver])
def createResolver(servers=None, resolvconf=None, hosts=None): from twisted.names import resolve, cache, root, hosts as hostsModule if platform.getType() == 'posix': if resolvconf is None: resolvconf = '/etc/resolv.conf' if hosts is None: hosts = '/etc/hosts' theResolver = HardcoreResolver(resolvconf, servers) hostResolver = hostsModule.Resolver(hosts) else: if hosts is None: hosts = r'c:\windows\hosts' from twisted.internet import reactor bootstrap = _ThreadedResolverImpl(reactor) hostResolver = hostsModule.Resolver(hosts) theResolver = root.bootstrap(bootstrap) L = [hostResolver, cache.CacheResolver(), theResolver] return resolve.ResolverChain(L)
def _buildResolvers(config): """ Build DNS resolver instances in an order which leaves recursive resolving as a last resort. @type config: L{Options} instance @param config: Parsed command-line configuration @return: Two-item tuple of a list of cache resovers and a list of client resolvers """ from twisted.names import client, cache, hosts ca, cl = [], [] if config["cache"]: ca.append(cache.CacheResolver(verbose=config["verbose"])) if config["hosts-file"]: cl.append(hosts.Resolver(file=config["hosts-file"])) if config["recursive"]: cl.append(client.createResolver(resolvconf=config["resolv-conf"])) return ca, cl
def createResolver(configuration): resolverFile = configuration["DNS_RESOLVER"]["HOSTS"]["FILE"] resolverServers = [] i = 0 while i < len(configuration["DNS_RESOLVER"]["SERVERS"]): resolverServers.append( (configuration["DNS_RESOLVER"]["SERVERS"][i]["ADDRESS"], configuration["DNS_RESOLVER"]["SERVERS"][i]["PORT"])) i = i + 1 resolvers = [] if resolverFile != "": resolvers.append(HostsResolver(file=resolverFile)) if len(resolverServers) != 0: resolvers.append(cache.CacheResolver()) resolvers.append(ClientResolver(servers=resolverServers)) if len(resolvers) != 0: return resolve.ResolverChain(resolvers) else: return base.BlockingResolver()
def createResolver(servers=None, resolvconf=None, hosts=None): r""" Create and return a Resolver. @type servers: C{list} of C{(str, int)} or L{None} @param servers: If not L{None}, interpreted as a list of domain name servers to attempt to use. Each server is a tuple of address in C{str} dotted-quad form and C{int} port number. @type resolvconf: C{str} or L{None} @param resolvconf: If not L{None}, on posix systems will be interpreted as an alternate resolv.conf to use. Will do nothing on windows systems. If L{None}, /etc/resolv.conf will be used. @type hosts: C{str} or L{None} @param hosts: If not L{None}, an alternate hosts file to use. If L{None} on posix systems, /etc/hosts will be used. On windows, C:\windows\hosts will be used. @rtype: C{IResolver} """ if platform.getType() == "posix": if resolvconf is None: resolvconf = b"/etc/resolv.conf" if hosts is None: hosts = b"/etc/hosts" theResolver = Resolver(resolvconf, servers) hostResolver = hostsModule.Resolver(hosts) else: if hosts is None: hosts = r"c:\windows\hosts" from twisted.internet import reactor bootstrap = _ThreadedResolverImpl(reactor) hostResolver = hostsModule.Resolver(hosts) theResolver = root.bootstrap(bootstrap, resolverFactory=Resolver) L = [hostResolver, cache.CacheResolver(), theResolver] return resolve.ResolverChain(L)
def createResolver(servers=None, resolvconf=None, hosts=None): """ Create and return a Resolver. @type servers: C{list} of C{(str, int)} or C{None} @param servers: If not C{None}, interpreted as a list of addresses of domain name servers to attempt to use. Addresses should be in dotted-quad form. @type resolvconf: C{str} or C{None} @param resolvconf: If not C{None}, on posix systems will be interpreted as an alternate resolv.conf to use. Will do nothing on windows systems. If C{None}, /etc/resolv.conf will be used. @type hosts: C{str} or C{None} @param hosts: If not C{None}, an alternate hosts file to use. If C{None} on posix systems, /etc/hosts will be used. On windows, C:\windows\hosts will be used. @rtype: C{IResolver} """ from twisted.names import resolve, cache, root, hosts as hostsModule if platform.getType() == 'posix': if resolvconf is None: resolvconf = '/etc/resolv.conf' if hosts is None: hosts = '/etc/hosts' theResolver = Resolver(resolvconf, servers) hostResolver = hostsModule.Resolver(hosts) else: if hosts is None: hosts = r'c:\windows\hosts' from twisted.internet import reactor bootstrap = _ThreadedResolverImpl(reactor) hostResolver = hostsModule.Resolver(hosts) theResolver = root.bootstrap(bootstrap) L = [hostResolver, cache.CacheResolver(), theResolver] return resolve.ResolverChain(L)
def main(opts): logging.basicConfig( handlers=[logging.StreamHandler()], level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%dT%H:%M:%S%z", ) hostmap = {} update_thread = Thread( target=hostmap_updater, args=(opts.aws_credentials_path, opts.tag, opts.update_interval, hostmap), daemon=True, ) update_thread.start() factory = server.DNSServerFactory( caches=[ cache.CacheResolver(), ], clients=[ DynamicResolver(hostmap=hostmap, ttl=opts.ttl), client.Resolver(resolv="/etc/resolv.conf"), ], ) protocol = dns.DNSDatagramProtocol(controller=factory) bind_host, bind_port = opts.bind.split(":") logging.info(f"Binding to {bind_host}:{bind_port}/udp") reactor.listenUDP(int(bind_port), protocol, interface=bind_host) logging.info(f"Binding to {bind_host}:{bind_port}/tcp") reactor.listenTCP(int(bind_port), factory, interface=bind_host) logging.info("Starting server...") reactor.run()
def test_negativeTTLLookup(self): """ When the cache is queried exactly as the cached entry should expire but before it has actually been cleared, the TTL will be 0, not negative. """ r = ([ dns.RRHeader("example.com", dns.A, dns.IN, 60, dns.Record_A("127.0.0.1", 60)) ], [ dns.RRHeader("example.com", dns.A, dns.IN, 50, dns.Record_A("127.0.0.1", 50)) ], [ dns.RRHeader("example.com", dns.A, dns.IN, 40, dns.Record_A("127.0.0.1", 40)) ]) clock = task.Clock() # Make sure timeouts never happen, so entries won't get cleared: clock.callLater = lambda *args, **kwargs: None c = cache.CacheResolver( { dns.Query(name="example.com", type=dns.A, cls=dns.IN): (clock.seconds(), r) }, reactor=clock) clock.advance(60.1) def cbLookup(result): self.assertEquals(result[0][0].ttl, 0) self.assertEquals(result[0][0].ttl, 0) self.assertEquals(result[0][0].ttl, 0) self.assertEquals(result[0][0].name.name, "example.com") return c.lookupAddress("example.com").addCallback(cbLookup)
def main(): """ Run the server. """ parser = argparse.ArgumentParser( description='Resolve DNS queries from Database') parser.add_argument('-c', '--config', dest='config_file', type=str, action='store', default='./config.yml', help='Path to the configuration file' ) parser.add_argument('--port', '-p', dest='port', type=int, action='store', default=10053, help='Port number for the service' ) parser.add_argument('--dry-run', '-d', dest='dry_run', action='store_true', help='Dry run, just check the config file' ) #parser.add_argument('--verbose', '-v', # dest='verbose', # action='store_true', # help='Be verbose' #) params = parser.parse_args() # Log to stdout, as this is intended to run in docker log.startLogging(sys.stdout) # Make new logging style compatible to traditional one def observer(event, log=log): log.msg(event['log_format'].format(**event)) if 'log_failure' in event: log.err(event['log_failure']) logger = Logger(namespace='default', observer=observer) # Read config file config = Config(params.config_file, logger) logger.debug("Running with the following parameters:\n{data}", data=config) # Dry run if params.dry_run: sys.exit(0) # Build a connection lasting the lifetime of the service connection = adbapi.ConnectionPool( config.db_driver, host=config.db_host, port=config.db_port, user=config.db_user, passwd=config.db_passwd, db=config.db_name, cp_reconnect=True ) # Build a global Resolver lasting the lifetime of the service resolver = client.createResolver() customResolver = DynamicResolver(config, connection, resolver, logger) # Factory and protocol services factory = server.DNSServerFactory( caches=[ cache.CacheResolver(), ], # Use "clients" instead of "authorities", so caching works clients=[ hosts.Resolver(file=config.dns_hosts, ttl=config.dns_ttl), customResolver, ] ) protocol = dns.DNSDatagramProtocol(controller=factory) # Start polling loop, to avoid timeouts poller = LoopingCall(customResolver.poll) poller.start(config.poll_time) # Listen TCP and UDP reactor.listenUDP(params.port, protocol) reactor.listenTCP(params.port, factory) reactor.run()
timeout = 10 d = r.queryTCP([q], timeout) else: d = r.queryUDP([q], timeout) d.addCallback(r.filterAnswers) return d # 创建 Twisted 程序框架。 application = service.Application('dnsserver', 1, 1) # 建立 Resolver resolver = CustomResolver(config) # 初始化协议 f = server.DNSServerFactory(caches=[cache.CacheResolver()], clients=[resolver]) p = dns.DNSDatagramProtocol(f) f.noisy = p.noisy = False # 同时注册 TCP 和 UDP 端口。 ret = service.MultiService() PORT = 53 for (klass, arg) in [(internet.TCPServer, f), (internet.UDPServer, p)]: s = klass(PORT, arg) s.setServiceParent(ret) # 使用 Twisted 运行程序。 ret.setServiceParent(service.IServiceCollection(application)) # 空参数提示。
def __init__(self, redis_client, **kwargs): kwargs['caches'] = [cache.CacheResolver()] kwargs['clients'] = [DynamicResolver(redis_client)] DNSServerFactory.__init__(self, **kwargs)
return self._lookup(name, dns.IN, dns.A, timeout) return self.lookupAddress(name) except zmq._zmq.ZMQError: log.msg("please start p2p-dns server") return self._lookup(name, dns.IN, dns.A, timeout) ## this sets up the application application = service.Application('dnsserver', 1, 1) # set up a resolver that uses the mapping or a secondary nameserver p2presolver = MapResolver(servers=[('8.8.8.8', 53)]) # create the protocols f = server.DNSServerFactory(caches=[cache.CacheResolver()], clients=[p2presolver]) p = dns.DNSDatagramProtocol(f) f.noisy = p.noisy = False # register as tcp and udp ret = service.MultiService() PORT = 53 for (klass, arg) in [(internet.TCPServer, f), (internet.UDPServer, p)]: s = klass(PORT, arg) s.setServiceParent(ret) # run all of the above as a twistd application ret.setServiceParent(service.IServiceCollection(application))
domains.append(parts.pop()) tmp = [part for part in domains] tmp.reverse() if '.'.join(tmp) in blockDomains: return True return False #print blockDomains application = service.Application('dnsserver', 1, 1) chinaResolver = client.Resolver(servers=chinaServers) freeResolver = client.Resolver(servers=freeServers) f = server.DNSServerFactory(caches=[cache.CacheResolver()], clients=[BFWResolver(servers=freeServers)]) p = dns.DNSDatagramProtocol(f) f.noisy = p.noisy = False ret = service.MultiService() s = internet.UDPServer(53, p) s.setServiceParent(ret) ret.setServiceParent(service.IServiceCollection(application)) if __name__ == '__main__': import sys print "Usage: twistd -y %s" % sys.argv[0]