def ignore_after(delay, callable=None, *args): thr.enable_async() if callable: with curio.ignore_after(delay): return callable(*args) else: return curio.ignore_after(delay)
async def shutdown_and_clean_up(self): # When this method is called, it's because we definitely want to kill # this connection, either as a clean shutdown or because of some kind # of error or loss-of-sync bug, and we no longer care if that violates # the protocol or not. So we ignore the state of self.conn, and just # go ahead and do the shutdown on the socket directly. (If you're # implementing a client you might prefer to send ConnectionClosed() # and let it raise an exception if that violates the protocol.) # # Curio bug: doesn't expose shutdown() with self.sock.blocking() as real_sock: try: real_sock.shutdown(SHUT_WR) except OSError: # They're already gone, nothing to do return # Wait and read for a bit to give them a chance to see that we closed # things, but eventually give up and just close the socket. async with curio.ignore_after(TIMEOUT): try: while True: # Attempt to read until EOF got = await self.sock.recv(MAX_RECV) if not got: break finally: await self.sock.close()
async def shutdown_and_clean_up(self): # When this method is called, it's because we definitely want to kill # this connection, either as a clean shutdown or because of some kind # of error or loss-of-sync bug, and we no longer care if that violates # the protocol or not. So we ignore the state of self.conn, and just # go ahead and do the shutdown on the socket directly. (If you're # implementing a client you might prefer to send ConnectionClosed() # and let it raise an exception if that violates the protocol.) # # Curio bug: doesn't expose shutdown() with self.sock.blocking() as real_sock: try: real_sock.shutdown(SHUT_WR) except OSError: # They're already gone, nothing to do return # Wait and read for a bit to give them a chance to see that we closed # things, but eventually give up and just close the socket. # XX FIXME: possibly we should set SO_LINGER to 0 here, so # that in the case where the client has ignored our shutdown and # declined to initiate the close themselves, we do a violent shutdown # (RST) and avoid the TIME_WAIT? # it looks like nginx never does this for keepalive timeouts, and only # does it for regular timeouts (slow clients I guess?) if explicitly # enabled ("Default: reset_timedout_connection off") async with curio.ignore_after(TIMEOUT): try: while True: # Attempt to read until EOF got = await self.sock.recv(MAX_RECV) if not got: break finally: await self.sock.close()
async def closeConnection(self): # When this method is called, it's because we definitely want to kill # this connection, either as a clean shutdown or because of some kind # of error or loss-of-sync bug, and we no longer care if that violates # the protocol or not. So we ignore the state of self.http, and just go # ahead and do the shutdown on the socket directly. (If you're # implementing a client you might prefer to send ConnectionClosed() and # let it raise an exception if that violates the protocol.) # # Curio bug: doesn't expose shutdown() with self.sock.blocking() as sock: try: sock.shutdown(SHUT_WR) except OSError: return # Connection already closed. # Wait and read for a bit to give them a chance to see that we closed # things, but eventually give up and just close the socket. # XX FIXME: possibly we should set SO_LINGER to 0 here, so # that in the case where the client has ignored our shutdown and # declined to initiate the close themselves, we do a violent shutdown # (RST) and avoid the TIME_WAIT? # it looks like nginx never does this for keepalive timeouts, and only # does it for regular timeouts (slow clients I guess?) if explicitly # enabled ("Default: reset_timedout_connection off") async with curio.ignore_after(TIMEOUT): try: while True: # Attempt to read until end of the request. ignored = await self.sock.recv(self.maxRecvSize) if not ignored: break finally: await self.sock.close()
async def shutdown_and_clean_up(self): # When this method is called, it's because we definitely want to kill # this connection, either as a clean shutdown or because of some kind # of error or loss-of-sync bug, and we no longer care if that violates # the protocol or not. So we ignore the state of self.conn, and just # go ahead and do the shutdown on the socket directly. (If you're # implementing a client you might prefer to send ConnectionClosed() # and let it raise an exception if that violates the protocol.) # # Curio bug: doesn't expose shutdown() with self.sock.blocking() as real_sock: try: real_sock.shutdown(SHUT_WR) except OSError: # They're already gone, nothing to do return # Wait and read for a bit to give them a chance to see that we closed # things, but eventually give up and just close the socket. async with curio.ignore_after(TIMEOUT): try: while True: # Attempt to read until EOF got = await self.sock.recv(MAX_RECV) if not got: break finally: await self.sock.close()
async def wait(self, timeout=None): if timeout is not None: async with curio.ignore_after(timeout): await super().wait() return True return False else: await super().wait() return True
async def close(self): await self.socket.shutdown(SHUT_WR) async with curio.ignore_after(settings.TIMEOUT_S): try: while True: data = await self.socket.recv(settings.MAX_RECV) if not data: break finally: await self.socket.close()
async def create_many_channels(self, *names, priority=0, wait_for_connection=True, move_on_after=2): '''Create many channels in parallel through this context Parameters ---------- *names : str Channel / PV names priority : int, optional Set priority of circuits wait_for_connection : bool, optional Wait for connections Returns ------- channel_dict : OrderedDict Ordered dictionary of name to Channel ''' async def connect_one(name): await self.search(name) chan = await self.create_channel(name, priority=priority) if wait_for_connection: await chan.wait_for_connection() return name, chan async def create_many_outer(): async with curio.TaskGroup() as task: for name in names: await task.spawn(connect_one, name) while True: res = await task.next_done() if res is None: break name, chan = res.result channels[name] = chan channels = OrderedDict() if move_on_after is not None: async with curio.ignore_after(move_on_after): await create_many_outer() else: await create_many_outer() return channels
async def open_tcp_stream(hostname, port, delay=0.3): # Get all of the possible targets for a given host/port targets = await socket.getaddrinfo(hostname, port, type=socket.SOCK_STREAM) if not targets: raise OSError(f'nothing known about {hostname}:{port}') # Cluster the targets into unique address families (e.g., AF_INET, AF_INET6, etc.) # and make sure the first entries are from a different family. families = [ list(g) for _, g in itertools.groupby(targets, key=lambda t: t[0]) ] targets = [fam.pop(0) for fam in families] targets.extend(itertools.chain(*families)) # List of accumulated errors to report in case of total failure errors = [] # Task group to manage a collection concurrent tasks. # Cancels all remaining once an interesting result is returned. async with TaskGroup(wait=object) as group: # Attempt to make a connection request async def try_connect(sockargs, addr, errors): sock = socket.socket(*sockargs) try: await sock.connect(addr) return sock except Exception as e: await sock.close() errors.append(e) # Walk the list of targets and try connections with a staggered delay for *sockargs, _, addr in targets: await group.spawn(try_connect, sockargs, addr, errors) async with ignore_after(delay): task = await group.next_done() if not task.exception: group.completed = task break if group.completed: return group.completed.result else: raise OSError(errors)
async def tcp_handle(self, sock, addr): wrapper = HTTPWrapper(self, sock, addr) while True: assert wrapper.conn.states == { h11.CLIENT: h11.IDLE, h11.SERVER: h11.IDLE } try: async with curio.timeout_after(self.timeout): event = await wrapper.next_event() self.log.debug('server main loop got event: {}', event) # NOTE: we dont want to timeout the handler if type(event) is h11.Request: if not await self.process_request(wrapper, event): # TODO: 404 handler await wrapper.send_simple_response(404, b'not found') except curio.TaskTimeout: # NOTE: Is it okay to ignore timeout on send async with curio.ignore_after(self.timeout): # TODO: timeout handler await wrapper.send_simple_response(408, None) except Exception as e: await wrapper.maybe_send_error_response(e) if wrapper.conn.our_state is h11.MUST_CLOSE: self.log.debug('must close connection: {}', wrapper.id) await wrapper.kill() return else: self.log.debug('our state is (supposedly) reusable: {}', wrapper.conn.our_state) try: wrapper.conn.start_next_cycle() except h11.ProtocolError as e: self.log.warn( 'couldnt start next cycle: protocolerror: {}', e) await wrapper.maybe_send_error_response(e) # self.log.warn('ProtocolError for connection: {}', wrapper.id) # self.log.warn(e) # await wrapper.kill() return
async def open_tcp_stream(hostname, port, delay=0.3): # Get all of the possible targets for a given host/port targets = await socket.getaddrinfo(hostname, port, type=socket.SOCK_STREAM) if not targets: raise OSError(f'nothing known about {hostname}:{port}') # Cluster the targets into unique address families (e.g., AF_INET, AF_INET6, etc.) # and make sure the first entries are from a different family. families = [ list(g) for _, g in itertools.groupby(targets, key=lambda t: t[0]) ] targets = [ fam.pop(0) for fam in families ] targets.extend(itertools.chain(*families)) # List of accumulated errors to report in case of total failure errors = [] # Task group to manage a collection concurrent tasks. # Cancels all remaining once an interesting result is returned. async with TaskGroup(wait=object) as group: # Attempt to make a connection request async def try_connect(sockargs, addr, errors): sock = socket.socket(*sockargs) try: await sock.connect(addr) return sock except Exception as e: await sock.close() errors.append(e) # Walk the list of targets and try connections with a staggered delay for *sockargs, _, addr in targets: await group.spawn(try_connect, sockargs, addr, errors) async with ignore_after(delay): sock = await group.next_result() if sock: break if group.completed: return group.completed.result else: raise OSError(errors)
def _maybe_timeout(timeout): if timeout: return curio.ignore_after(timeout) else: return dns._asyncbackend.NullContext()