def skipIfLocalhostV4(self): # The port used here doesn't matter, but some systems require it # to be non-zero if we do not also pass AI_PASSIVE. addrinfo = self.io_loop.run_sync(lambda: Resolver().resolve("localhost", 80)) families = set(addr[0] for addr in addrinfo) if socket.AF_INET6 not in families: self.skipTest("localhost does not resolve to ipv6")
def initialize(self, io_loop, max_clients=10, hostname_mapping=None, max_buffer_size=104857600, resolver=None, defaults=None): """Creates a AsyncHTTPClient. Only a single AsyncHTTPClient instance exists per IOLoop in order to provide limitations on the number of pending connections. force_instance=True may be used to suppress this behavior. max_clients is the number of concurrent requests that can be in progress. Note that this arguments are only used when the client is first created, and will be ignored when an existing client is reused. hostname_mapping is a dictionary mapping hostnames to IP addresses. It can be used to make local DNS changes when modifying system-wide settings like /etc/hosts is not possible or desirable (e.g. in unittests). max_buffer_size is the number of bytes that can be read by IOStream. It defaults to 100mb. """ super(SimpleAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) self.max_clients = max_clients self.queue = collections.deque() self.active = {} self.max_buffer_size = max_buffer_size self.resolver = resolver or Resolver(io_loop=io_loop) if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping)
class DockerAuthenticator(LocalAuthenticator): """A version that performs local system user creation from within a docker container. """ resolver = UnixResolver(resolver=Resolver(), socket_path='/restuser.sock') AsyncHTTPClient.configure(None, resolver=resolver) client = AsyncHTTPClient() def system_user_exists(self, user): # user_id is stored in state after looking it up return user.state and 'user_id' in user.state @gen.coroutine def add_system_user(self, user): """Add a new user. This adds the user to the whitelist, and creates a system user by accessing a simple REST api. """ try: resp = yield self.client.fetch('http://unix+restuser/' + user.name, method='POST', body='{}') except HTTPError as e: self.log.error("Failed to create %r", user.name, exc_info=True) raise # todo: save the user id into the whitelist or somewhere info = json.loads(resp.body.decode('utf8', 'replace')) self.log.info("Created user %s with uid %i", user.name, info['uid']) if user.state is None: user.state = {} user.state['user_id'] = info['uid'] self.db.commit()
def main(): args = parse_command_line() if not args: args = ['localhost', 'www.google.com', 'www.facebook.com', 'www.dropbox.com'] resolvers = [Resolver(), ThreadedResolver()] if twisted is not None: from tornado.platform.twisted import TwistedResolver resolvers.append(TwistedResolver()) if pycares is not None: from tornado.platform.caresresolver import CaresResolver resolvers.append(CaresResolver()) family = { 'unspec': socket.AF_UNSPEC, 'inet': socket.AF_INET, 'inet6': socket.AF_INET6, }[options.family] for host in args: print('Resolving %s' % host) for resolver in resolvers: addrinfo = yield resolver.resolve(host, 80, family) print('%s: %s' % (resolver.__class__.__name__, pprint.pformat(addrinfo))) print()
def __init__(self, resolver: Optional[Resolver] = None) -> None: if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver() self._own_resolver = True
def do_register(): resolver = UnixResolver(socket_file=args.control_unix_socket, resolver=Resolver()) AsyncHTTPClient.configure(None, resolver=resolver) client = AsyncHTTPClient() mtype = 'application/json' headers = {'Content-Type': mtype} body = json.dumps({ "name": args.name, "type": args.type, "description": args.description }) try: response = yield client.fetch('http://unixsocket/ports/{}'.format( args.port), method='PUT', headers=headers, body=body) except HTTPError as he: print("Could not register port: {}".format(he), file=sys.stderr) sys.exit(1) except Exception as e: logging.exception("Could not register port") sys.exit(1) print(response.body)
class TCPClientTest(AsyncTestCase): def setUp(self): super(TCPClientTest, self).setUp() self.server = None self.client = TCPClient() def start_server(self, family): if family == socket.AF_UNSPEC and "TRAVIS" in os.environ: self.skipTest("dual-stack servers often have port conflicts on travis") self.server = TestTCPServer(family) return self.server.port def stop_server(self): if self.server is not None: self.server.stop() self.server = None def tearDown(self): self.client.close() self.stop_server() super(TCPClientTest, self).tearDown() def skipIf10.0.0.7V4(self): # The port used here doesn't matter, but some systems require it # to be non-zero if we do not also pass AI_PASSIVE. addrinfo = self.io_loop.run_sync(lambda: Resolver().resolve("10.0.0.7", 80)) families = set(addr[0] for addr in addrinfo) if socket.AF_INET6 not in families: self.skipTest("10.0.0.7 does not resolve to ipv6")
def setUp(self): super(TrickleTCPTest, self).setUp() sock, port = bind_unused_port() self.port = port self.server = TestTCPServer(self.io_loop) self.server.add_socket(sock) self.resolver = Resolver()
def __init__(self, sock): self._iostream = IOStream(sock) self._resolver = Resolver() self._readtimeout = 0 self._connecttimeout = 0 self._rbuffer = StringIO(b'') self._rbuffer_size = 0
def __init__(self, host='localhost', port=6379, db=0, password=None, socket_timeout=None, socket_connect_timeout=None, retry_on_timeout=False, encoding="utf-8", encoding_errors='strict', decode_responses=False, parser_class=PythonParser): self.pid = os.getpid() self.host = host self.port = int(port) self.db = db self.password = password self._timeout = None self.io_loop = IOLoop.current() self.socket_timeout = socket_timeout self.socket_connect_timeout = socket_connect_timeout or socket_timeout self.retry_on_timeout = retry_on_timeout self.encoder = Encoder(encoding, encoding_errors, decode_responses) self._stream = None self._parser = parser_class() self.resolver = Resolver() self._description_args = { 'host': self.host, 'port': self.port, 'db': self.db, } self._connect_callbacks = []
def initialize(self, max_clients=10, hostname_mapping=None, max_buffer_size=104857600, resolver=None, defaults=None, max_header_size=None, max_body_size=None): """Creates a AsyncHTTPClient. Only a single AsyncHTTPClient instance exists per IOLoop in order to provide limitations on the number of pending connections. ``force_instance=True`` may be used to suppress this behavior. Note that because of this implicit reuse, unless ``force_instance`` is used, only the first call to the constructor actually uses its arguments. It is recommended to use the ``configure`` method instead of the constructor to ensure that arguments take effect. ``max_clients`` is the number of concurrent requests that can be in progress; when this limit is reached additional requests will be queued. Note that time spent waiting in this queue still counts against the ``request_timeout``. ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses. It can be used to make local DNS changes when modifying system-wide settings like ``/etc/hosts`` is not possible or desirable (e.g. in unittests). ``max_buffer_size`` (default 100MB) is the number of bytes that can be read into memory at once. ``max_body_size`` (defaults to ``max_buffer_size``) is the largest response body that the client will accept. Without a ``streaming_callback``, the smaller of these two limits applies; with a ``streaming_callback`` only ``max_body_size`` does. .. versionchanged:: 4.2 Added the ``max_body_size`` argument. """ super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults) self.max_clients = max_clients self.queue = collections.deque() self.active = {} self.waiting = {} self.max_buffer_size = max_buffer_size self.max_header_size = max_header_size self.max_body_size = max_body_size # TCPClient could create a Resolver for us, but we have to do it # ourselves to support hostname_mapping. if resolver: self.resolver = resolver self.own_resolver = False else: self.resolver = Resolver() self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping) self.tcp_client = TCPClient(resolver=self.resolver)
def __init__(self, resolver=None): if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver() self._own_resolver = True
def skipIfLocalhostV4(self): # The port used here doesn't matter, but some systems require it # to be non-zero if we do not also pass AI_PASSIVE. Resolver().resolve('localhost', 80, callback=self.stop) addrinfo = self.wait() families = set(addr[0] for addr in addrinfo) if socket.AF_INET6 not in families: self.skipTest("localhost does not resolve to ipv6")
def __init__(self, resolver=None, io_loop=None): self.io_loop = io_loop or IOLoop.current() if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self._own_resolver = True
def main(): app = Application([('/', HelloHandler)]) server = HTTPServer(app) server.add_socket(bind_unix_socket(SOCKPATH)) resolver = UnixResolver(resolver=Resolver()) AsyncHTTPClient.configure(None, resolver=resolver) response = yield AsyncHTTPClient().fetch('http://unixsocket/') print response.body
def _resolve(self, ioloop): """ Resolve host addr (domain) Args: ioloop (IOLoop): io_loop to use Returns: Tuple of address family and ip address """ resolver = Resolver(io_loop=ioloop) addrinfo = yield resolver.resolve(self.addr, int(self.port), socket.AF_UNSPEC) raise gen.Return(addrinfo[0])
def __init__(self, io_loop, hostname_mapping=None, max_buffer_size=104857600, resolver=None, defaults=None, idle_timeout=30.0): """Creates a AsyncHTTPClient. Only a single AsyncHTTPClient instance exists per IOLoop in order to provide limitations on the number of pending connections. force_instance=True may be used to suppress this behavior. max_clients is the number of concurrent requests that can be in progress. Note that this arguments are only used when the client is first created, and will be ignored when an existing client is reused. hostname_mapping is a dictionary mapping hostnames to IP addresses. It can be used to make local DNS changes when modifying system-wide settings like /etc/hosts is not possible or desirable (e.g. in unittests). max_buffer_size is the number of bytes that can be read by IOStream. It defaults to 100mb. """ self.io_loop = io_loop self.queue = collections.deque() self.active = {} self.max_buffer_size = max_buffer_size if resolver: self.resolver = resolver self.own_resolver = False else: self.resolver = Resolver(io_loop=self.io_loop) self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping) self.defaults = dict(HTTPRequest._DEFAULTS) if defaults is not None: self.defaults.update(defaults) self.connection = KeepAliveHTTPConnection(self.io_loop, self, self.max_buffer_size, self.resolver) self.idle_timeout = idle_timeout self._idle_timeout_callback = None self.logger = logging.getLogger(self.__class__.__name__)
def initialize(self, io_loop, max_clients=10, hostname_mapping=None, max_buffer_size=104857600, resolver=None, defaults=None, max_header_size=None): """Creates a AsyncHTTPClient. # 一个IOLoop实例只有一个AsyncHTTPClient实例 Only a single AsyncHTTPClient instance exists per IOLoop in order to provide limitations on the number of pending connections. force_instance=True may be used to suppress this behavior. max_clients is the number of concurrent requests that can be in progress. Note that this arguments are only used when the client is first created, and will be ignored when an existing client is reused. hostname_mapping is a dictionary mapping hostnames to IP addresses. It can be used to make local DNS changes when modifying system-wide settings like /etc/hosts is not possible or desirable (e.g. in unittests). max_buffer_size is the number of bytes that can be read by IOStream. It defaults to 100mb. """ super(SimpleAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) self.max_clients = max_clients # 最大连接数 self.queue = collections.deque() # 来一个小小的队列 self.active = {} self.waiting = {} self.max_buffer_size = max_buffer_size self.max_header_size = max_header_size # TCPClient could create a Resolver for us, but we have to do it # ourselves to support hostname_mapping. # 这里估计是一个解析DNS的配置,先不理会 if resolver: self.resolver = resolver self.own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping) # 这里来了一个tcp的连接 self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop)
def _request_for_tornado_client(urlstring, method="GET", body=None, headers=None): """A utility that provides a context that handles HTTP, HTTPS, and HTTP+UNIX request. Creates a tornado HTTPRequest object with a URL that tornado's HTTPClients can accept. If the request is made to a unix socket, temporarily configure the AsyncHTTPClient to resolve the URL and connect to the proper socket. """ parts = urlsplit(urlstring) if parts.scheme in ["http", "https"]: pass elif parts.scheme == "http+unix": # If unix socket, mimic HTTP. parts = SplitResult( scheme="http", netloc=parts.netloc, path=parts.path, query=parts.query, fragment=parts.fragment, ) class UnixSocketResolver(Resolver): """A resolver that routes HTTP requests to unix sockets in tornado HTTP clients. Due to constraints in Tornados' API, the scheme of the must be `http` (not `http+unix`). Applications should replace the scheme in URLS before making a request to the HTTP client. """ def initialize(self, resolver): self.resolver = resolver def close(self): self.resolver.close() async def resolve(self, host, port, *args, **kwargs): return [(socket.AF_UNIX, urldecode_unix_socket_path(host))] resolver = UnixSocketResolver(resolver=Resolver()) AsyncHTTPClient.configure(None, resolver=resolver) else: raise Exception("Unknown URL scheme.") # Yield the request for the given client. url = urlunsplit(parts) request = HTTPRequest(url, method=method, body=body, headers=headers) yield request
def raw_fetch(self, headers, body): client = SimpleAsyncHTTPClient(self.io_loop) conn = RawRequestHTTPConnection( self.io_loop, client, httpclient._RequestProxy(httpclient.HTTPRequest(self.get_url("/")), dict(httpclient.HTTPRequest._DEFAULTS)), None, self.stop, 1024 * 1024, Resolver(io_loop=self.io_loop)) conn.set_request(b"\r\n".join( headers + [utf8("Content-Length: %d\r\n" % len(body))]) + b"\r\n" + body) response = self.wait() client.close() response.rethrow() return response
def __init__(self, host, port, io_loop, key): self.io_loop = io_loop self.resolver = Resolver() self._callbacks = {} self._connected = False self.queue = deque() self.key = key self.stream = None self.pepv_act_resp = None self.prof = {} with stack_context.ExceptionStackContext(self._handle_exception): self.resolver.resolve(host, port, socket.AF_INET, callback=self._on_resolve)
def __init__(self, host, port, io_loop): self.io_loop = io_loop self.resolver = Resolver() self.stream = None self.queue = deque() self._callbacks = {} self._connected = False self.read_state = self.READ_HEAD self.prev_response = None self.prof = {} with stack_context.ExceptionStackContext(self._handle_exception): self.resolver.resolve(host, port, socket.AF_INET, callback=self._on_resolve)
def __init__(self, io_loop=None, hostname_mapping=None, max_buffer_size=104857600, max_header_size=None, max_body_size=None): super(HTTPAdapter, self).__init__() self.max_buffer_size = max_buffer_size self.max_header_size = max_header_size self.max_body_size = max_body_size self.io_loop = io_loop or IOLoop.current() self.resolver = Resolver() if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping) self.tcp_client = TCPClient(resolver=self.resolver)
def do_unregister(): resolver = UnixResolver(socket_file=args.control_unix_socket, resolver=Resolver()) AsyncHTTPClient.configure(None, resolver=resolver) client = AsyncHTTPClient() try: response = yield client.fetch('http://unixsocket/ports/{}'.format( args.port), method='DELETE') except HTTPError as he: print("Could not unregister port: {}".format(he), file=sys.stderr) sys.exit(1) except Exception as e: logging.exception("Could not unregister port") sys.exit(1) print(response.body)
def __init__(self, io_loop, request): self.connect_future = TracebackFuture() self.read_future = None self.read_queue = collections.deque() self.events = [] if old_tornado: self.resolver = Resolver(io_loop=io_loop) super(EventSourceClient, self).__init__(io_loop, None, request, lambda: None, self._on_http_response, 104857600, self.resolver) else: self.tcp_client = TCPClient(io_loop=io_loop) super(EventSourceClient, self).__init__(io_loop, None, request, lambda: None, self._on_http_response, 104857600, self.tcp_client, 65536)
def __init__(self, io_loop, request): self.connect_future = Future() self.read_future = None self.read_queue = collections.deque() self.key = base64.b64encode(os.urandom(16)) scheme, sep, rest = request.url.partition(':') scheme = {'ws': 'http', 'wss': 'https'}[scheme] request.url = scheme + sep + rest request.headers.update({ 'Upgrade': 'websocket', 'Connection': 'Upgrade', 'Sec-WebSocket-Key': self.key, 'Sec-WebSocket-Version': '13', }) super(WebSocketClientConnection, self).__init__( io_loop, None, request, lambda: None, self._on_http_response, 104857600, Resolver(io_loop=io_loop))
def initialize( # type: ignore self, max_clients: int = 10, hostname_mapping: Dict[str, str] = None, max_buffer_size: int = 104857600, resolver: Resolver = None, defaults: Dict[str, Any] = None, max_header_size: int = None, max_body_size: int = None, ) -> None: super(SimpleAsyncUDPClient, self).initialize(defaults=defaults) self.max_clients = max_clients self.queue = ( collections.deque() ) # type: Deque[Tuple[object, UDPRequest, Callable[[UDPResponse], None]]] self.active = ( {} ) # type: Dict[object, Tuple[UDPRequest, Callable[[UDPResponse], None]]] self.waiting = ( {} ) # type: Dict[object, Tuple[UDPRequest, Callable[[UDPResponse], None], object]] self.max_buffer_size = max_buffer_size self.max_header_size = max_header_size self.max_body_size = max_body_size # TCPClient could create a Resolver for us, but we have to do it # ourselves to support hostname_mapping. if resolver: self.resolver = resolver self.own_resolver = False else: self.resolver = Resolver() self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping) self.tcp_client = UDPClient(resolver=self.resolver)
def do_test(ips): print len(ips) rss = [] rs = [] old = 0 ips = list(set(ips)) for i, item in enumerate(ips): resolver = OverrideResolver( Resolver(), mapping={'clarkhillgo1.appspot.com': item.replace('\n', '')}) request = HTTPRequest('https://clarkhillgo1.appspot.com', validate_cert=False) client = SimpleAsyncHTTPClient(resolver=resolver, force_instance=True) rs.append((client.fetch(request), item, client)) while len(rs) > 0: if old != len(rs): print time.time(), len(rs) old = len(rs) for f in rs: if f[0].done(): rs = [_ for _ in rs if _ != f] if f[0].exception(): f[2].close() # print 'exception: ', f[0].exception() else: body = f[0].result().body if 'GoAgent' in body: print body, f[1] rss.append(f[1]) f[2].close() yield gen.sleep(0.5) if rss: with open('./all_good_ip.txt', 'a') as f: for r in rss: f.writelines(r + '\n')
def __init__(self, io_loop, prefix): self._prefix = prefix unix_resolver = UnixResolver(resolver=Resolver(), sockpath=prefix) super(AsyncUnixHTTPClient, self).initialize(io_loop, resolver=unix_resolver)
import re import socket from tornado import gen from tornado.ioloop import IOLoop from tornado.netutil import Resolver from trickle import Trickle resolver = Resolver() @gen.coroutine def download(): sock = socket.socket(socket.AF_INET) trick = Trickle(sock) addr_info = yield resolver.resolve( 'xkcd.com', 80, socket.AF_INET) sock_addr = addr_info[0][1] yield trick.connect(sock_addr) yield trick.write(b'GET / HTTP/1.1\r\nHost: xkcd.com\r\n\r\n') headers = yield trick.read_until(b'\r\n\r\n') match = re.search(br'Content-Length: (\d+)\r\n', headers) content_length = int(match.group(1))