class TrickleHTTPTest(AsyncHTTPTestCase): # Tests with an HTTPServer. def setUp(self): super(TrickleHTTPTest, self).setUp() self.resolver = Resolver() def get_app(self): return Application([('/', TrickleTestHandler)]) @gen_test def test_http(self): addr_info = yield self.resolver.resolve( 'localhost', self.get_http_port(), socket.AF_INET) sock_addr = addr_info[0][1] trick = Trickle( socket.socket(socket.AF_INET), io_loop=self.io_loop) yield trick.connect(sock_addr) yield trick.write(b'GET / HTTP/1.1\r\n\r\n') headers = yield trick.read_until(b'\r\n\r\n') match = re.search(br'Content-Length: (\d+)\r\n', headers) content_length = int(match.group(1)) body = yield trick.read_bytes(content_length) self.assertEqual(b'hello', body)
class TCPClient(object): """A non-blocking TCP connection factory. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def __init__(self, resolver=None, io_loop=None): self.io_loop = io_loop or IOLoop.current() if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self._own_resolver = True def close(self): if self._own_resolver: self.resolver.close() @gen.coroutine def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). """ addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, self.io_loop, functools.partial(self._create_stream, max_buffer_size)) af, addr, stream = yield connector.start() # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: stream = yield stream.start_tls(False, ssl_options=ssl_options, server_hostname=host) raise gen.Return(stream) def _create_stream(self, max_buffer_size, af, addr): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. try: stream = IOStream(socket.socket(af), io_loop=self.io_loop, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() fu.set_exception(e) return fu else: return stream.connect(addr)
def _resolve(self, ioloop): """ Resolve host addr (domain) Args: ioloop (IOLoop): io_loop to use Returns: Tuple of address family and ip address """ resolver = Resolver(io_loop=ioloop) addrinfo = yield resolver.resolve(self.addr, int(self.port), socket.AF_UNSPEC) raise gen.Return(addrinfo[0])
class TCPClient(object): """A non-blocking TCP connection factory. """ def __init__(self, resolver=None, io_loop=None): self.io_loop = io_loop or IOLoop.current() if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self._own_resolver = True def close(self): if self._own_resolver: self.resolver.close() @gen.coroutine def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). """ addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, self.io_loop, functools.partial(self._create_stream, host, ssl_options, max_buffer_size)) af, addr, stream = yield connector.start() # TODO: For better performance we could cache the (af, addr) # information here and re-use it on sbusequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) raise gen.Return(stream) def _create_stream(self, host, ssl_options, max_buffer_size, af, addr): # TODO: we should connect in plaintext mode and start the # ssl handshake only after stopping the _Connector. if ssl_options is None: stream = IOStream(socket.socket(af), io_loop=self.io_loop, max_buffer_size=max_buffer_size) else: stream = SSLIOStream(socket.socket(af), io_loop=self.io_loop, ssl_options=ssl_options, max_buffer_size=max_buffer_size) return stream.connect(addr, server_hostname=host)
class TCPClient(object): """A non-blocking TCP connection factory. """ def __init__(self, resolver=None, io_loop=None): self.io_loop = io_loop or IOLoop.current() if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self._own_resolver = True def close(self): if self._own_resolver: self.resolver.close() @gen.coroutine def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). """ addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, self.io_loop, # 这个涉及到三个函数的调用,不说了,多是泪 functools.partial(self._create_stream, host, ssl_options, max_buffer_size)) af, addr, stream = yield connector.start() # TODO: For better performance we could cache the (af, addr) # information here and re-use it on sbusequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) raise gen.Return(stream) # 这里gen会捕获这个错误 def _create_stream(self, host, ssl_options, max_buffer_size, af, addr): # TODO: we should connect in plaintext mode and start the # ssl handshake only after stopping the _Connector. if ssl_options is None: stream = IOStream(socket.socket(af), io_loop=self.io_loop, max_buffer_size=max_buffer_size) else: stream = SSLIOStream(socket.socket(af), io_loop=self.io_loop, ssl_options=ssl_options, max_buffer_size=max_buffer_size) return stream.connect(addr, server_hostname=host)
class URLString(str): ''' A str wrapper, has more supports of URL ''' __slots__ = ('parsed', 'solver') def __new__(cls, s): ''' New hook ''' return str.__new__(cls, s) def __init__(self, s): super().__init__() self.parsed = urlparse(self) self.solver = Resolver() @classmethod def config_solver(cls, solver_type='tornado.netutil.BlockingResolver'): Resolver.configure(solver_type) @property def resolve(self) -> list: ''' DNS resolve ''' return self.solver.resolve(self.parsed.netloc, port=80).result() def HEAD(self, **kwargs) -> bytes: ''' http HEAD method ''' assert self.parsed.scheme return requests.head(self, **kwargs).content def GET(self, **kwargs) -> bytes: ''' HTTP GET method ''' assert self.parsed.scheme return requests.get(self, **kwargs).content def POST(self, data=dict(), **kwargs) -> bytes: ''' HTTP POST method ''' assert self.parsed.scheme return requests.post(self, data=data, **kwargs).content def __getattr__(self, attr): ''' Get attributes support ''' return self.parsed.__getattribute__(attr)
class TCPClient(object): """A non-blocking TCP connection factory. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def __init__(self, resolver=None, io_loop=None): self.io_loop = io_loop or IOLoop.current() if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self._own_resolver = True def close(self): if self._own_resolver: self.resolver.close() @gen.coroutine def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None, source_ip=None, source_port=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). Using the ``source_ip`` kwarg, one can specify the source IP address to use when establishing the connection. In case the user needs to resolve and use a specific interface, it has to be handled outside of Tornado as this depends very much on the platform. Similarly, when the user requires a certain source port, it can be specified using the ``source_port`` arg. """ addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, self.io_loop, functools.partial(self._create_stream, max_buffer_size, source_ip=source_ip, source_port=source_port) ) af, addr, stream = yield connector.start() # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: stream = yield stream.start_tls(False, ssl_options=ssl_options, server_hostname=host) raise gen.Return(stream) def _create_stream(self, max_buffer_size, af, addr, source_ip=None, source_port=None): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. source_port_bind = source_port if isinstance(source_port, int) else 0 source_ip_bind = source_ip if source_port_bind and not source_ip: # User required a specific port, but did not specify # a certain source IP, will bind to the default loopback. source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1' # Trying to use the same address family as the requested af socket: # - 127.0.0.1 for IPv4 # - ::1 for IPv6 socket_obj = socket.socket(af) if source_port_bind or source_ip_bind: # If the user requires binding also to a specific IP/port. socket_obj.bind((source_ip_bind, source_port_bind)) # Fail loudly if unable to use the IP/port. try: stream = IOStream(socket_obj, io_loop=self.io_loop, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() fu.set_exception(e) return fu else: return stream.connect(addr)
class UDPClient(object): """A non-blocking UDP connection factory. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ def __init__(self, resolver: Resolver = None) -> None: if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver() self._own_resolver = True # self.io_loop = io_loop or IOLoop.current() # def __init__(self, resolver: Resolver = None) -> None: # if resolver is not None: # self.resolver = resolver # self._own_resolver = False # else: # self.resolver = Resolver() # self._own_resolver = True def close(self) -> None: if self._own_resolver: self.resolver.close() def connect( self, host: str, port: int, af: socket.AddressFamily = socket.AF_UNSPEC, ssl_options=None, max_buffer_size: int = None, source_ip: str = None, source_port: int = None, timeout: Union[float, datetime.timedelta] = None, ) -> IOStream: if timeout is not None: if isinstance(timeout, numbers.Real): timeout = IOLoop.current().time() + timeout elif isinstance(timeout, datetime.timedelta): timeout = IOLoop.current().time() + timeout.total_seconds() else: raise TypeError("Unsupported timeout %r" % timeout) if timeout is not None: addrinfo = gen.with_timeout( timeout, self.resolver.resolve(host, port, af) ) else: addrinfo = self.resolver.resolve(host, port, af) # noinspection PyTypeChecker connector = _Connector(addrinfo, functools.partial(self._create_stream, max_buffer_size, source_ip=source_ip, source_port=source_port, ), ) af, addr, stream = connector.start(connect_timeout=timeout) # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: if timeout is not None: stream = gen.with_timeout( timeout, stream.start_tls( False, ssl_options=ssl_options, server_hostname=host ), ) else: stream = stream.start_tls( False, ssl_options=ssl_options, server_hostname=host ) return stream # sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # # sock.bind((host, port)) # # # if broadcast: # # sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # # # # if reuse: # # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # # if timeout is not None: # if isinstance(timeout, numbers.Real): # timeout = IOLoop.current().time() + timeout # elif isinstance(timeout, datetime.timedelta): # timeout = IOLoop.current().time() + timeout.total_seconds() # else: # raise TypeError("Unsupported timeout %r" % timeout) # if timeout is not None: # addrinfo = gen.with_timeout( # timeout, self.resolver.resolve(host, port, af) # ) # else: # addrinfo = self.resolver.resolve(host, port, af) # connector = _Connector( # # addrinfo, # functools.partial( # self._create_stream, # max_buffer_size, # source_ip=source_ip, # source_port=source_port, # ), # ) # af, addr, stream = connector.start(connect_timeout=timeout) # # # stream = UDPStream( # # socket=sock, # # max_buffer_size=max_buffer_size, # # destination=(host, port), # # ) # # # # if ssl_options is not None: # # stream = yield stream.start_tls( # # server_side=False, # # ssl_options=ssl_options, # # server_hostname=host, # # ) # # return stream # async def connect( # self, # host: str, # port: int, # af: socket.AddressFamily = socket.AF_UNSPEC, # # ssl_options: Union[Dict[str, Any], ssl.SSLContext] = None, # max_buffer_size: int = None, # source_ip: str = None, # source_port: int = None, # timeout: Union[float, datetime.timedelta] = None, # ) -> IOStream: # if timeout is not None: # if isinstance(timeout, numbers.Real): # timeout = IOLoop.current().time() + timeout # elif isinstance(timeout, datetime.timedelta): # timeout = IOLoop.current().time() + timeout.total_seconds() # else: # raise TypeError("Unsupported timeout %r" % timeout) # if timeout is not None: # addrinfo = await gen.with_timeout( # timeout, self.resolver.resolve(host, port, af) # ) # else: # addrinfo = await self.resolver.resolve(host, port, af) # connector = _Connector( # addrinfo, # functools.partial( # self._create_stream, # max_buffer_size, # source_ip=source_ip, # source_port=source_port, # ), # ) # af, addr, stream = await connector.start(connect_timeout=timeout) # # TODO: For better performance we could cache the (af, addr) # # information here and re-use it on subsequent connections to # # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) # if ssl_options is not None: # if timeout is not None: # stream = await gen.with_timeout( # timeout, # stream.start_tls( # False, ssl_options=ssl_options, server_hostname=host # ), # ) # else: # stream = await stream.start_tls( # False, ssl_options=ssl_options, server_hostname=host # ) # return stream def _create_stream( self, max_buffer_size: int, af: socket.AddressFamily, addr: Tuple, source_ip: str = None, source_port: int = None, ) -> Tuple[IOStream, "Future[IOStream]"]: # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. source_port_bind = source_port if isinstance(source_port, int) else 0 source_ip_bind = source_ip if source_port_bind and not source_ip: # User required a specific port, but did not specify # a certain source IP, will bind to the default loopback. source_ip_bind = "::1" if af == socket.AF_INET6 else "127.0.0.1" # Trying to use the same address family as the requested af socket: # - 127.0.0.1 for IPv4 # - ::1 for IPv6 socket_obj = socket.socket(af) set_close_exec(socket_obj.fileno()) if source_port_bind or source_ip_bind: # If the user requires binding also to a specific IP/port. try: socket_obj.bind((source_ip_bind, source_port_bind)) except socket.error: socket_obj.close() # Fail loudly if unable to use the IP/port. raise try: stream = IOStream(socket_obj, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() # type: Future[IOStream] fu.set_exception(e) return stream, fu else: return stream, stream.connect(addr)
class TCPClient(object): """A non-blocking TCP connection factory. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def __init__(self, resolver=None, io_loop=None): self.io_loop = io_loop or IOLoop.current() if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self._own_resolver = True def close(self): if self._own_resolver: self.resolver.close() @gen.coroutine def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None, source_ip=None, source_port=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). Using the ``source_ip`` kwarg, one can specify the source IP address to use when establishing the connection. In case the user needs to resolve and use a specific interface, it has to be handled outside of Tornado as this depends very much on the platform. Similarly, when the user requires a certain source port, it can be specified using the ``source_port`` arg. """ addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, self.io_loop, functools.partial(self._create_stream, max_buffer_size, source_ip=source_ip, source_port=source_port) ) af, addr, stream = yield connector.start() # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: stream = yield stream.start_tls(False, ssl_options=ssl_options, server_hostname=host) raise gen.Return(stream) def _create_stream(self, max_buffer_size, af, addr, source_ip=None, source_port=None): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. source_port_bind = source_port if isinstance(source_port, int) else 0 source_ip_bind = source_ip if source_port_bind and not source_ip: # User required a specific port, but did not specify # a certain source IP, will bind to the default loopback. source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1' # Trying to use the same address family as the requested af socket: # - 127.0.0.1 for IPv4 # - ::1 for IPv6 socket_obj = socket.socket(af) if source_port_bind or source_ip_bind: # If the user requires binding also to a specific IP/port. try: socket_obj.bind((source_ip_bind, source_port_bind)) except socket.error: socket_obj.close() # Fail loudly if unable to use the IP/port. raise try: stream = IOStream(socket_obj, io_loop=self.io_loop, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() fu.set_exception(e) return fu else: return stream.connect(addr)
class DubboConnection(object): READ_HEAD = 0x01 READ_BODY = 0x02 def __init__(self, host, port, io_loop): self.io_loop = io_loop self.resolver = Resolver() self.stream = None self.queue = deque() self._callbacks = {} self._connected = False self.read_state = self.READ_HEAD self.prev_response = None self.prof = {} with stack_context.ExceptionStackContext(self._handle_exception): self.resolver.resolve(host, port, socket.AF_INET, callback=self._on_resolve) def _on_resolve(self, addrinfo): af = addrinfo[0][0] self.stream = IOStream(socket.socket(af)) self.stream.set_nodelay(True) self.stream.set_close_callback(self._on_close) sockaddr = addrinfo[0][1] gen_log.info("sock addr {0}".format(sockaddr)) self.stream.connect(sockaddr, self._on_connect) def _on_close(self): gen_log.info("close dubbo connect") def _on_connect(self): gen_log.info("dubbo connect ready") self._connected = True self._process_queue() self.stream.read_bytes(16, self._on_header) def _on_header(self, data): # print 'read header', data resp = Response() resp.decode_header(data) self.prof[resp.Id].append(time.time()) self.prev_response = resp self.stream.read_bytes(resp.data_len, self._on_body) def _on_body(self, data): resp = self.prev_response resp.decode_body(data) cb = self._callbacks[resp.Id] t = time.time() # gen_log.info( # "DubboID[{0}]: {1}, {2}, {3}, {4}, {5}, {6}, {7}".format(resp.Id, self.prof[resp.Id][0], self.prof[resp.Id][1], self.prof[resp.Id][2], t, # self.prof[resp.Id][1] - self.prof[resp.Id][0], self.prof[resp.Id][2] - self.prof[resp.Id][0], # t - self.prof[resp.Id][0])) del self._callbacks[resp.Id] del self.prof[resp.Id] # self.io_loop.add_callback(cb, resp) # 调用 callback cb(resp) self.stream.read_bytes(16, self._on_header) def fetch(self, dubbo_request, callback): if dubbo_request.Id in self._callbacks: gen_log.error("dubbo Id {0} already in cbs !!".format( dubbo_request.Id)) self._callbacks[dubbo_request.Id] = callback self.prof[dubbo_request.Id] = [ time.time(), ] self.queue.append(dubbo_request) self._process_queue() def _process_queue(self): if not self._connected: gen_log.info("dubbo connection not ready") return with stack_context.NullContext(): while self.queue: dubbo_request = self.queue.popleft() self.prof[dubbo_request.Id].append(time.time()) # print 'write data', dubbo_request.encode() self.stream.write(dubbo_request.encode()) def _handle_exception(self, typ, value, tb): gen_log.exception("dubbo connection error [%s] [%s] %s", typ, value, tb)
class AsyncSocket(object): def __init__(self, sock): self._iostream = IOStream(sock) self._resolver = Resolver() self._readtimeout = 0 self._connecttimeout = 0 def set_readtimeout(self, timeout): self._readtimeout = timeout def set_connecttimeout(self, timeout): self._connecttimeout = timeout @synclize def connect(self, address): host, port = address timer = None try: if self._connecttimeout: timer = Timeout(self._connecttimeout) timer.start() resolved_addrs = yield self._resolver.resolve( host, port, family=socket.AF_INET) for addr in resolved_addrs: family, host_port = addr yield self._iostream.connect(host_port) break except TimeoutException: self.close() raise finally: if timer: timer.cancel() #@synclize def sendall(self, buff): self._iostream.write(buff) @synclize def read(self, nbytes, partial=False): timer = None try: if self._readtimeout: timer = Timeout(self._readtimeout) timer.start() buff = yield self._iostream.read_bytes(nbytes, partial=partial) raise Return(buff) except TimeoutException: self.close() raise finally: if timer: timer.cancel() def recv(self, nbytes): return self.read(nbytes, partial=True) @synclize def readline(self, max_bytes=-1): timer = None if self._readtimeout: timer = Timeout(self._readtimeout) timer.start() try: if max_bytes > 0: buff = yield self._iostream.read_until('\n', max_bytes=max_bytes) else: buff = yield self._iostream.read_until('\n') raise Return(buff) except TimeoutException: self.close() raise finally: if timer: timer.cancel() def close(self): self._iostream.close() def set_nodelay(self, flag): self._iostream.set_nodelay(flag) def settimeout(self, timeout): pass def shutdown(self, direction): if self._iostream.fileno(): self._iostream.fileno().shutdown(direction) def recv_into(self, buff): expected_rbytes = len(buff) data = self.read(expected_rbytes, True) srcarray = bytearray(data) nbytes = len(srcarray) buff[0:nbytes] = srcarray return nbytes def makefile(self, mode, other): return self
class AsyncSocket(object): def __init__(self, sock): self._iostream = IOStream(sock) self._resolver = Resolver() self._readtimeout = 0 self._connecttimeout = 0 self._rbuffer = StringIO(b'') self._rbuffer_size = 0 def set_readtimeout(self, timeout): self._readtimeout = timeout def set_connecttimeout(self, timeout): self._connecttimeout = timeout @synclize def connect(self, address): host, port = address timer = None try: if self._connecttimeout: timer = Timeout(self._connecttimeout) timer.start() resolved_addrs = yield self._resolver.resolve( host, port, family=socket.AF_INET) for addr in resolved_addrs: family, host_port = addr yield self._iostream.connect(host_port) break except TimeoutException as e: self.close() raise socket.timeout(e.message) finally: if timer: timer.cancel() def sendall(self, buff): self._iostream.write(buff) def read(self, nbytes): if nbytes <= self._rbuffer_size: self._rbuffer_size -= nbytes return self._rbuffer.read(nbytes) if self._rbuffer_size > 0: self._iostream._read_buffer.appendleft(self._rbuffer.read()) self._iostream._read_buffer_size += self._rbuffer_size self._rbuffer_size = 0 if nbytes <= self._iostream._read_buffer_size: data, data_len = b''.join( self._iostream._read_buffer), self._iostream._read_buffer_size self._iostream._read_buffer.clear() self._iostream._read_buffer_size = 0 if data_len == nbytes: return data self._rbuffer_size = data_len - nbytes self._rbuffer = StringIO(data) return self._rbuffer.read(nbytes) data = self._read(nbytes) if len(data) == nbytes: return data self._rbuffer_size = len(data) - nbytes self._rbuffer = StringIO(data) return self._rbuffer.read(nbytes) @synclize def _read(self, nbytes): timer = None try: if self._readtimeout: timer = Timeout(self._readtimeout) timer.start() data = yield self._iostream.read_bytes(nbytes) raise Return(data) except TimeoutException as e: self.close() raise socket.timeout(e.message) finally: if timer: timer.cancel() def recv(self, nbytes): return self.read(nbytes) def close(self): self._iostream.close() def set_nodelay(self, flag): self._iostream.set_nodelay(flag) def settimeout(self, timeout): pass def shutdown(self, direction): if self._iostream.fileno(): self._iostream.fileno().shutdown(direction) def recv_into(self, buff): expected_rbytes = len(buff) data = self.read(expected_rbytes) srcarray = bytearray(data) nbytes = len(srcarray) buff[0:nbytes] = srcarray return nbytes def makefile(self, mode, other): return self def fileno(self): return self._iostream.fileno()
class AsyncConnection(object): """async redis connection based on tornado""" description_format = "AsyncConnection<%(host)s,port=%(port)s,db=%(db)s>" def __init__(self, host='localhost', port=6379, db=0, password=None, socket_timeout=None, socket_connect_timeout=None, retry_on_timeout=False, encoding="utf-8", encoding_errors='strict', decode_responses=False, parser_class=PythonParser): self.pid = os.getpid() self.host = host self.port = int(port) self.db = db self.password = password self._timeout = None self.io_loop = IOLoop.current() self.socket_timeout = socket_timeout self.socket_connect_timeout = socket_connect_timeout or socket_timeout self.retry_on_timeout = retry_on_timeout self.encoder = Encoder(encoding, encoding_errors, decode_responses) self._stream = None self._parser = parser_class() self.resolver = Resolver() self._description_args = { 'host': self.host, 'port': self.port, 'db': self.db, } self._connect_callbacks = [] def __repr__(self): return self.description_format % self._description_args def __del__(self): try: self.disconnect() except Exception: pass def register_connect_callback(self, callback): self._connect_callbacks.append(callback) def clear_connect_callbacks(self): self._connect_callbacks = [] @gen.coroutine def connect(self): if self._stream: return stream = yield self._connect() stream.set_nodelay(True) self._stream = stream try: yield self.on_connect() except RedisError: self.disconnect() raise for callback in self._connect_callbacks: callback(self) @gen.coroutine def _connect(self): addrinfo = yield self.resolver.resolve(self.host, self.port, 0) err = None for af, addr in addrinfo: try: s = socket.socket(af) self._stream = IOStream(s, io_loop=self.io_loop) self._timeout = self.io_loop.add_timeout( self.io_loop.time() + self.socket_connect_timeout, stack_context.wrap(self._on_timeout)) stream = yield self._stream.connect(addr) self._remove_timeout() raise gen.Return(stream) except (StreamClosedError, socket.error) as _: err = _ self.disconnect() if err is not None: raise ConnectionError(self._error_message(err)) raise socket.error("socket.getaddrinfo returned an empty list") def _on_timeout(self): self._timeout = None self.disconnect() def _remove_timeout(self): if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None def _error_message(self, exception): if len(exception.args) == 1: return "Error connecting to %s:%s. %s." % (self.host, self.port, exception.args[0]) elif len(exception.args) == 2: return "Error %s connecting to %s:%s. %s." % ( exception.args[0], self.host, self.port, exception.args[1]) else: return "Error connecting to %s:%s" % (self.host, self.port) def disconnect(self): self._parser.on_disconnect() if self._stream is None: return try: self._stream.close() except socket.error: pass self._stream = None @gen.coroutine def on_connect(self): self._parser.on_connect(self) if self.password: yield self.send_command('AUTH', self.password) response = yield self.read_response() if nativestr(response) != 'OK': raise AuthenticationError('Invalid Password') if self.db: yield self.send_command('SELECT', self.db) response = yield self.read_response() if nativestr(response) != 'OK': raise ConnectionError('Invalid Database') @gen.coroutine def read_response(self): try: if self.socket_timeout: self._timeout = self.io_loop.add_timeout( self.io_loop.time() + self.socket_timeout, stack_context.wrap(self._on_timeout)) response = yield self._parser.read_response() self._remove_timeout() except StreamClosedError: self.disconnect() raise TimeoutError except: self.disconnect() raise if isinstance(response, ResponseError): raise response raise gen.Return(response) @gen.coroutine def send_packed_command(self, command): if not self._stream: yield self.connect() if isinstance(command, str): command = [command] for item in command: self._stream.write(item) @gen.coroutine def send_command(self, *args): yield self.send_packed_command(self.pack_command(*args)) def pack_command(self, *args): output = [] command = args[0] if ' ' in command: args = tuple([Token.get_token(s) for s in command.split()]) + args[1:] else: args = (Token.get_token(command), ) + args[1:] buff = SYM_EMPTY.join((SYM_STAR, b(str(len(args))), SYM_CRLF)) for arg in imap(self.encoder.encode, args): if len(buff) > 6000 or len(arg) > 6000: buff = SYM_EMPTY.join( (buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF)) output.append(buff) output.append(arg) buff = SYM_CRLF else: buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF, arg, SYM_CRLF)) output.append(buff) return output def pack_commands(self, commands): output = [] pieces = [] buffer_length = 0 for cmd in commands: for chunk in self.pack_command(*cmd): pieces.append(chunk) buffer_length += len(chunk) if buffer_length > 6000: output.append(SYM_EMPTY.join(pieces)) buffer_length = 0 pieces = [] if pieces: output.append(SYM_EMPTY.join(pieces)) return output
class PAConnection(object): def __init__(self, host, port, io_loop, key): self.io_loop = io_loop self.resolver = Resolver() self._callbacks = {} self._connected = False self.queue = deque() self.key = key self.stream = None self.pepv_act_resp = None self.prof = {} with stack_context.ExceptionStackContext(self._handle_exception): self.resolver.resolve(host, port, socket.AF_INET, callback=self._on_resolve) def _handle_exception(self, typ, value, tb): gen_log.exception("pa connection error [%s] [%s] %s", typ, value, tb) def _on_resolve(self, addrinfo): af = addrinfo[0][0] self.stream = IOStream(socket.socket(af)) self.stream.set_nodelay(True) self.stream.set_close_callback(self._on_close) sockaddr = addrinfo[0][1] # gen_log.info("sock addr {0}".format(sockaddr)) self.stream.connect(sockaddr, self._on_connect) def _on_close(self): gen_log.info("pa conn close") def _on_connect(self): # gen_log.info("start conn to pa") self._connected = True self.stream.write('\xab\xcd') # magic number of act protocol # gen_log.info('write data {0}'.format(repr(encode_act_key(self.key)))) self.stream.write(encode_act_key(self.key)) self._process_queue() self.stream.read_bytes(4, self._on_id) def _on_id(self, data): resp = ActResponse() resp.Id = bytes2int(data) self.prof[resp.Id].append(time.time()) self.pepv_act_resp = resp self.stream.read_bytes(4, self._on_rlen) def _on_rlen(self, data): self.stream.read_bytes(bytes2int(data), self._on_res_body) def _on_res_body(self, data): resp = self.pepv_act_resp resp.result = data cb = self._callbacks[resp.Id] t = time.time() # gen_log.info( # "ActID[{0}]: {1}, {2}, {3}, {4}, {5}, {6}, {7}".format(resp.Id, self.prof[resp.Id][0], self.prof[resp.Id][1], self.prof[resp.Id][2], t, self.prof[resp.Id][1]-self.prof[resp.Id][0], self.prof[resp.Id][2]-self.prof[resp.Id][0], t-self.prof[resp.Id][0])) del self.prof[resp.Id] del self._callbacks[resp.Id] # self.io_loop.add_callback(cb, resp) cb(resp) self.stream.read_bytes(4, self._on_id) def fetch(self, act_request, callback): if act_request.Id in self._callbacks: gen_log.error("act Id {0} already in cbs !!".format( act_request.Id)) self._callbacks[act_request.Id] = callback self.prof[act_request.Id] = [ time.time(), ] self.queue.append(act_request) self._process_queue() def _process_queue(self): if not self._connected: # gen_log.info("act connection not ready, wait an other turn") return with stack_context.NullContext(): while self.queue: act_request = self.queue.popleft() self.prof[act_request.Id].append(time.time()) self.stream.write(act_request.encode_body())
class TCPClient(object): """A non-blocking TCP connection factory. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ def __init__(self, resolver=None): if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver() self._own_resolver = True def close(self): if self._own_resolver: self.resolver.close() @gen.coroutine def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None, source_ip=None, source_port=None, timeout=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). Using the ``source_ip`` kwarg, one can specify the source IP address to use when establishing the connection. In case the user needs to resolve and use a specific interface, it has to be handled outside of Tornado as this depends very much on the platform. Raises `TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) Similarly, when the user requires a certain source port, it can be specified using the ``source_port`` arg. .. versionchanged:: 4.5 Added the ``source_ip`` and ``source_port`` arguments. """ if timeout is not None: if isinstance(timeout, numbers.Real): timeout = IOLoop.current().time() + timeout elif isinstance(timeout, datetime.timedelta): timeout = IOLoop.current().time() + timedelta_to_seconds(timeout) else: raise TypeError("Unsupported timeout %r" % timeout) if timeout is not None: addrinfo = yield gen.with_timeout( timeout, self.resolver.resolve(host, port, af)) else: addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, functools.partial(self._create_stream, max_buffer_size, source_ip=source_ip, source_port=source_port) ) af, addr, stream = yield connector.start(connect_timeout=timeout) # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: if timeout is not None: stream = yield gen.with_timeout(timeout, stream.start_tls( False, ssl_options=ssl_options, server_hostname=host)) else: stream = yield stream.start_tls(False, ssl_options=ssl_options, server_hostname=host) raise gen.Return(stream) def _create_stream(self, max_buffer_size, af, addr, source_ip=None, source_port=None): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. source_port_bind = source_port if isinstance(source_port, int) else 0 source_ip_bind = source_ip if source_port_bind and not source_ip: # User required a specific port, but did not specify # a certain source IP, will bind to the default loopback. source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1' # Trying to use the same address family as the requested af socket: # - 127.0.0.1 for IPv4 # - ::1 for IPv6 socket_obj = socket.socket(af) set_close_exec(socket_obj.fileno()) if source_port_bind or source_ip_bind: # If the user requires binding also to a specific IP/port. try: socket_obj.bind((source_ip_bind, source_port_bind)) except socket.error: socket_obj.close() # Fail loudly if unable to use the IP/port. raise try: stream = IOStream(socket_obj, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() fu.set_exception(e) return fu else: return stream, stream.connect(addr)
class TrickleTCPTest(AsyncTestCase): # Tests with a TCPServer. def setUp(self): super(TrickleTCPTest, self).setUp() sock, port = bind_unused_port() self.port = port self.server = TestTCPServer(self.io_loop) self.server.add_socket(sock) self.resolver = Resolver() # Utility method, returns two connected Trickles. @gen.coroutine def connect(self): client_trickle = Trickle( socket.socket(socket.AF_INET), io_loop=self.io_loop) addr_info = yield self.resolver.resolve( 'localhost', self.port, socket.AF_INET) sock_addr = addr_info[0][1] yield client_trickle.connect(sock_addr) # Wait for server to handle connection. server_stream = yield self.server.test_stream server_trickle = Trickle(server_stream) raise gen.Return((client_trickle, server_trickle)) @gen_test def test_read_until(self): client_trickle, server_trickle = yield self.connect() data_in = b'foo-bar-baz' yield server_trickle.write(data_in) data_out = yield client_trickle.read_until(b'bar') self.assertEqual(b'foo-bar', data_out) @gen_test def test_read_until_timeout(self): client_trickle, server_trickle = yield self.connect() try: yield client_trickle.read_until('', timeout=0.01) except socket.timeout: pass else: self.fail('socket.timeout not raised') @gen_test def test_read_until_regex(self): client_trickle, server_trickle = yield self.connect() data_in = b'foo-bar-baz' yield server_trickle.write(data_in) data_out = yield client_trickle.read_until_regex(b'bar') self.assertEqual(b'foo-bar', data_out) @gen_test def test_read_until_regex_timeout(self): client_trickle, server_trickle = yield self.connect() try: yield client_trickle.read_until_regex(b'', timeout=0.01) except socket.timeout: pass else: self.fail('socket.timeout not raised') @gen_test def test_read_bytes(self): client_trickle, server_trickle = yield self.connect() data_in = b'foo-bar-baz' yield server_trickle.write(data_in) data_out = yield client_trickle.read_bytes(7) self.assertEqual(b'foo-bar', data_out) @gen_test def test_read_bytes_timeout(self): client_trickle, server_trickle = yield self.connect() try: yield client_trickle.read_bytes(1, timeout=0.01) except socket.timeout: pass else: self.fail('socket.timeout not raised') @gen_test def test_read_until_close(self): client_trickle, server_trickle = yield self.connect() data_in = b'foo-bar-baz' yield server_trickle.write(data_in) server_trickle.stream.close() data_out = yield client_trickle.read_until_close() self.assertEqual(b'foo-bar-baz', data_out) @gen_test def test_read_until_close_timeout(self): client_trickle, server_trickle = yield self.connect() try: yield client_trickle.read_until_close(timeout=0.01) except socket.timeout: pass else: self.fail('socket.timeout not raised')
class AsyncSocket(object): def __init__(self, sock): self._iostream = IOStream(sock) self._resolver = Resolver() self._readtimeout = 0 self._connecttimeout = 0 def set_readtimeout(self, timeout): self._readtimeout = timeout def set_connecttimeout(self, timeout): self._connecttimeout = timeout @synclize def connect(self, address): host, port = address timer = None try: if self._connecttimeout: timer = Timeout(self._connecttimeout) timer.start() resolved_addrs = yield self._resolver.resolve(host, port, family=socket.AF_INET) for addr in resolved_addrs: family, host_port = addr yield self._iostream.connect(host_port) break except TimeoutException: self.close() raise finally: if timer: timer.cancel() #@synclize def sendall(self, buff): self._iostream.write(buff) @synclize def read(self, nbytes, partial=False): timer = None try: if self._readtimeout: timer = Timeout(self._readtimeout) timer.start() buff = yield self._iostream.read_bytes(nbytes, partial=partial) raise Return(buff) except TimeoutException: self.close() raise finally: if timer: timer.cancel() def recv(self, nbytes): return self.read(nbytes, partial=True) @synclize def readline(self, max_bytes=-1): timer = None if self._readtimeout: timer = Timeout(self._readtimeout) timer.start() try: if max_bytes > 0: buff = yield self._iostream.read_until('\n', max_bytes=max_bytes) else: buff = yield self._iostream.read_until('\n') raise Return(buff) except TimeoutException: self.close() raise finally: if timer: timer.cancel() def close(self): self._iostream.close() def set_nodelay(self, flag): self._iostream.set_nodelay(flag) def settimeout(self, timeout): pass def shutdown(self, direction): if self._iostream.fileno(): self._iostream.fileno().shutdown(direction) def recv_into(self, buff): expected_rbytes = len(buff) data = self.read(expected_rbytes, True) srcarray = bytearray(data) nbytes = len(srcarray) buff[0:nbytes] = srcarray return nbytes def makefile(self, mode, other): return self
class TCPClient(object): """A non-blocking TCP connection factory. .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. """ def __init__(self, resolver=None): if resolver is not None: self.resolver = resolver self._own_resolver = False else: self.resolver = Resolver() self._own_resolver = True def close(self): if self._own_resolver: self.resolver.close() @gen.coroutine def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None, source_ip=None, source_port=None, timeout=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). Using the ``source_ip`` kwarg, one can specify the source IP address to use when establishing the connection. In case the user needs to resolve and use a specific interface, it has to be handled outside of Tornado as this depends very much on the platform. Raises `TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) Similarly, when the user requires a certain source port, it can be specified using the ``source_port`` arg. .. versionchanged:: 4.5 Added the ``source_ip`` and ``source_port`` arguments. .. versionchanged:: 5.0 Added the ``timeout`` argument. """ if timeout is not None: if isinstance(timeout, numbers.Real): timeout = IOLoop.current().time() + timeout elif isinstance(timeout, datetime.timedelta): timeout = IOLoop.current().time() + timedelta_to_seconds( timeout) else: raise TypeError("Unsupported timeout %r" % timeout) if timeout is not None: addrinfo = yield gen.with_timeout( timeout, self.resolver.resolve(host, port, af)) else: addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, functools.partial(self._create_stream, max_buffer_size, source_ip=source_ip, source_port=source_port)) af, addr, stream = yield connector.start(connect_timeout=timeout) # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: if timeout is not None: stream = yield gen.with_timeout( timeout, stream.start_tls(False, ssl_options=ssl_options, server_hostname=host)) else: stream = yield stream.start_tls(False, ssl_options=ssl_options, server_hostname=host) raise gen.Return(stream) def _create_stream(self, max_buffer_size, af, addr, source_ip=None, source_port=None): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. source_port_bind = source_port if isinstance(source_port, int) else 0 source_ip_bind = source_ip if source_port_bind and not source_ip: # User required a specific port, but did not specify # a certain source IP, will bind to the default loopback. source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1' # Trying to use the same address family as the requested af socket: # - 127.0.0.1 for IPv4 # - ::1 for IPv6 socket_obj = socket.socket(af) set_close_exec(socket_obj.fileno()) if source_port_bind or source_ip_bind: # If the user requires binding also to a specific IP/port. try: socket_obj.bind((source_ip_bind, source_port_bind)) except socket.error: socket_obj.close() # Fail loudly if unable to use the IP/port. raise try: stream = IOStream(socket_obj, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() fu.set_exception(e) return fu else: return stream, stream.connect(addr)
class LBConnector(object): """ Adds support for sequential search for live LB to IOStream. Uses socket.create_connection to perform it - sequential approach. """ def __init__(self, io_loop): self.io_loop = io_loop # Default blocking resolver calling socket.getaddrinfo self.resolver = Resolver(io_loop=io_loop) self._own_resolver = True def close(self): self.resolver.close() @gen.coroutine def connect(self, host, port, timeout, af=socket.AF_UNSPEC, max_buffer_size=None): """Connect to the given host and port. Asynchronously returns an `.IOStream`. """ addrinfo = yield self.resolver.resolve(host, port, af) connector = _RandomConnector( addrinfo, self.io_loop, partial(self._create_stream, max_buffer_size, timeout)) # Use large timeout for connection search, assume that all addresses # apart from the last will timeout total_connect_timeout = (len(addrinfo) + 1) * timeout af, addr, stream = yield connector.start(total_connect_timeout) # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) # TODO: support ssl; it can be copied from tornado but we need to # read ssl opts from Connection raise gen.Return(stream) def _create_stream(self, max_buffer_size, timeout, af, addr): sock = socket.socket(af) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) future = Future() stream = iostream.IOStream(sock, io_loop=self.io_loop, max_buffer_size=max_buffer_size) def on_stream_connect_timeout(): """ Close the stream and pass an exception to caller """ stream.set_close_callback(None) exc = iostream.StreamClosedError("Connect timeout") stream.close(exc_info=(None, exc, None)) future.set_exception(exc) def on_stream_connected(): """ On success clean after ourselves """ self.io_loop.remove_timeout(handler) stream.set_close_callback(None) future.set_result(stream) def on_stream_error(): """ Stream close while connecting means it failed Cancel the timeout and pass the error to caller """ self.io_loop.remove_timeout(handler) future.set_exception(stream.error) timeout = timedelta(seconds=timeout) handler = self.io_loop.add_timeout(timeout, on_stream_connect_timeout) stream.set_close_callback(on_stream_error) stream.connect(addr, callback=on_stream_connected) return future