def _do_close(self, fobj, closefd): self.__io_holder[0] = None # for _wrap_method try: with self.lock: self.threadpool.apply(fobj.flush) finally: if closefd: # Note that we're not taking the lock; older code # did fobj.close() without going through the threadpool at all, # so acquiring the lock could potentially introduce deadlocks # that weren't present before. Avoiding the lock doesn't make # the existing race condition any worse. # We wrap the close in an exception handler and re-raise directly # to avoid the (common, expected) IOError from being logged by the pool def close(_fobj=fobj): try: _fobj.close() except: # pylint:disable=bare-except return sys.exc_info() finally: _fobj = None del fobj exc_info = self.threadpool.apply(close) del close if exc_info: reraise(*exc_info)
def close(self): """ .. versionchanged:: 1.1b1 The file object is closed using the threadpool. Note that whether or not this action is synchronous or asynchronous is not documented. """ fobj = self.io if fobj is None: return self.io = None try: self.flush(_fobj=fobj) finally: if self._close: # Note that we're not using self._apply; older code # did fobj.close() without going through the threadpool at all, # so acquiring the lock could potentially introduce deadlocks # that weren't present before. Avoiding the lock doesn't make # the existing race condition any worse. # We wrap the close in an exception handler and re-raise directly # to avoid the (common, expected) IOError from being logged def close(): try: fobj.close() except: # pylint:disable=bare-except return sys.exc_info() exc_info = self.threadpool.apply(close) if exc_info: reraise(*exc_info)
def _do_close(self, fobj, closefd): self.__io_holder[0] = None # for _wrap_method try: with self.lock: self.threadpool.apply(fobj.flush) finally: if closefd: # Note that we're not taking the lock; older code # did fobj.close() without going through the threadpool at all, # so acquiring the lock could potentially introduce deadlocks # that weren't present before. Avoiding the lock doesn't make # the existing race condition any worse. # We wrap the close in an exception handler and re-raise directly # to avoid the (common, expected) IOError from being logged by the pool def close(_fobj=fobj): try: _fobj.close() except: # pylint:disable=bare-except return sys.exc_info() finally: _fobj = None del fobj exc_info = self.threadpool.apply(close) del close if exc_info: reraise(*exc_info)
def seek(self, offset, whence=0): try: return os.lseek(self._fileno, offset, whence) except IOError: # pylint:disable=try-except-raise raise except OSError as ex: # pylint:disable=duplicate-except # Python 2.x # make sure on Python 2.x we raise an IOError # as documented for RawIOBase. # See https://github.com/gevent/gevent/issues/1323 reraise(IOError, IOError(*ex.args), sys.exc_info()[2])
def seek(self, offset, whence=0): try: return os.lseek(self._fileno, offset, whence) except IOError: # pylint:disable=try-except-raise raise except OSError as ex: # pylint:disable=duplicate-except # Python 2.x # make sure on Python 2.x we raise an IOError # as documented for RawIOBase. # See https://github.com/gevent/gevent/issues/1323 reraise(IOError, IOError(*ex.args), sys.exc_info()[2])
def _getnameinfo(self, sockaddr, flags): if not isinstance(flags, int): raise TypeError('an integer is required') if not isinstance(sockaddr, tuple): raise TypeError('getnameinfo() argument 1 must be a tuple') address = sockaddr[0] if not PY3 and isinstance(address, text_type): address = address.encode('ascii') if not isinstance(address, string_types): raise TypeError('sockaddr[0] must be a string, not %s' % type(address).__name__) port = sockaddr[1] if not isinstance(port, int): raise TypeError('port must be an integer, not %s' % type(port)) waiter = Waiter(self.hub) result = self._getaddrinfo(address, str(sockaddr[1]), family=AF_UNSPEC, socktype=SOCK_DGRAM) if not result: reraise(*sys.exc_info()) elif len(result) != 1: raise error('sockaddr resolved to multiple addresses') family, _socktype, _proto, _name, address = result[0] if family == AF_INET: if len(sockaddr) != 2: raise error("IPv4 sockaddr must be 2 tuple") elif family == AF_INET6: address = address[:2] + sockaddr[2:] self.ares.getnameinfo(waiter, address, flags) node, service = waiter.get() if service is None: if PY3: # ares docs: "If the query did not complete # successfully, or one of the values was not # requested, node or service will be NULL ". Python 2 # allows that for the service, but Python 3 raises # an error. This is tested by test_socket in py 3.4 err = gaierror('nodename nor servname provided, or not known') err.errno = 8 raise err service = '0' return node, service
def _getnameinfo(self, sockaddr, flags): if not isinstance(flags, int): raise TypeError("an integer is required") if not isinstance(sockaddr, tuple): raise TypeError("getnameinfo() argument 1 must be a tuple") address = sockaddr[0] if not PY3 and isinstance(address, text_type): address = address.encode("ascii") if not isinstance(address, string_types): raise TypeError("sockaddr[0] must be a string, not %s" % type(address).__name__) port = sockaddr[1] if not isinstance(port, int): raise TypeError("port must be an integer, not %s" % type(port)) waiter = Waiter(self.hub) result = self._getaddrinfo(address, str(sockaddr[1]), family=AF_UNSPEC, socktype=SOCK_DGRAM) if not result: reraise(*sys.exc_info()) elif len(result) != 1: raise error("sockaddr resolved to multiple addresses") family, _socktype, _proto, _name, address = result[0] if family == AF_INET: if len(sockaddr) != 2: raise error("IPv4 sockaddr must be 2 tuple") elif family == AF_INET6: address = address[:2] + sockaddr[2:] self.ares.getnameinfo(waiter, address, flags) node, service = waiter.get() if service is None: if PY3: # ares docs: "If the query did not complete # successfully, or one of the values was not # requested, node or service will be NULL ". Python 2 # allows that for the service, but Python 3 raises # an error. This is tested by test_socket in py 3.4 err = gaierror("nodename nor servname provided, or not known") err.errno = 8 raise err service = "0" return node, service
def handle_error(self, context, type, value, tb): """ Called by the event loop when an error occurs. The default action is to print the exception to the :attr:`exception stream <exception_stream>`. The arguments ``type``, ``value``, and ``tb`` are the standard tuple as returned by :func:`sys.exc_info`. (Note that when this is called, it may not be safe to call :func:`sys.exc_info`.) Errors that are :attr:`not errors <NOT_ERROR>` are not printed. Errors that are :attr:`system errors <SYSTEM_ERROR>` are passed to :meth:`handle_system_error` after being printed. Applications can set a property on the hub instance with this same signature to override the error handling provided by this class. This is an advanced usage and requires great care. This function *must not* raise any exceptions. :param context: If this is ``None``, indicates a system error that should generally result in exiting the loop and being thrown to the parent greenlet. """ type, value, tb = self._normalize_exception(type, value, tb) if type is HubDestroyed: # We must continue propagating this for it to properly # exit. reraise(type, value, tb) if not issubclass(type, self.NOT_ERROR): self.print_exception(context, type, value, tb) if context is None or issubclass(type, self.SYSTEM_ERROR): self.handle_system_error(type, value, tb)
def _raise_exception(self): reraise(*self.exc_info)
def _test_sendall(self, data, match_data=None, client_method='sendall', **client_args): # pylint:disable=too-many-locals,too-many-branches,too-many-statements log = self.log log("Sendall", client_method) read_data = [] accepted_event = Event() def accept_and_read(): log("accepting", self.listener) conn, _ = self.listener.accept() try: with conn.makefile(mode='rb') as r: log("accepted on server", conn) accepted_event.set() log("reading") read_data.append(r.read()) log("done reading") del r finally: conn.close() del conn server = Thread(target=accept_and_read) try: log("creating client connection") client = self.create_connection(**client_args) # We seem to have a buffer stuck somewhere on appveyor? # https://ci.appveyor.com/project/denik/gevent/builds/27320824/job/bdbax88sqnjoti6i#L712 should_unwrap = hasattr( client, 'unwrap') and greentest.PY37 and greentest.WIN # The implicit reference-based nastiness of Python 2 # sockets interferes, especially when using SSL sockets. # The best way to get a decent FIN to the server is to shutdown # the output. Doing that on Python 3, OTOH, is contraindicated # except on PyPy. should_shutdown = greentest.PY2 or greentest.PYPY # It's important to wait for the server to fully accept before # we shutdown and close the socket. In SSL mode, the number # and timing of data exchanges to complete the handshake and # thus exactly when greenlet switches occur, varies by TLS version. # # It turns out that on < TLS1.3, we were getting lucky and the # server was the greenlet that raced ahead and blocked in r.read() # before the client returned from create_connection(). # # But when TLS 1.3 was deployed (OpenSSL 1.1), the *client* was the # one that raced ahead while the server had yet to return from # self.listener.accept(). So the client sent the data to the socket, # and closed, before the server could do anything, and the server, # when it got switched to by server.join(), found its new socket # dead. accepted_event.wait() log("accepted", client) try: getattr(client, client_method)(data) except: import traceback traceback.print_exc() # unwrapping might not work after this because we're in # a bad state. if should_unwrap: client.shutdown(socket.SHUT_RDWR) should_unwrap = False should_shutdown = False raise finally: log("shutdown") if should_shutdown: client.shutdown(socket.SHUT_RDWR) elif should_unwrap: try: client.unwrap() except OSError as e: if greentest.PY37 and greentest.WIN and e.errno == 0: # ? 3.7.4 on AppVeyor sometimes raises # "OSError[errno 0] Error" here, which doesn't make # any sense. pass else: raise log("closing") client.close() finally: server.join(10) assert not server.is_alive() if server.terminal_exc: reraise(*server.terminal_exc) if match_data is None: match_data = self.long_data self.assertEqual(read_data, [match_data])
def _raise_exception(self): reraise(*self.exc_info)
def _test_sendall(self, data, match_data=None, client_method='sendall', **client_args): # pylint:disable=too-many-locals,too-many-branches,too-many-statements log = self.log log("test_sendall using method", client_method) read_data = [] accepted_event = Event() def accept_and_read(): log("\taccepting", self.listener) conn, _ = self.listener.accept() try: with conn.makefile(mode='rb') as r: log("\taccepted on server; client conn is", conn, "file is", r) accepted_event.set() log("\treading") read_data.append(r.read()) log("\tdone reading", r, "got bytes", len(read_data[0])) del r finally: conn.close() del conn server = Thread(target=accept_and_read) try: log("creating client connection") client = self.create_connection(**client_args) # It's important to wait for the server to fully accept before # we shutdown and close the socket. In SSL mode, the number # and timing of data exchanges to complete the handshake and # thus exactly when greenlet switches occur, varies by TLS version. # # It turns out that on < TLS1.3, we were getting lucky and the # server was the greenlet that raced ahead and blocked in r.read() # before the client returned from create_connection(). # # But when TLS 1.3 was deployed (OpenSSL 1.1), the *client* was the # one that raced ahead while the server had yet to return from # self.listener.accept(). So the client sent the data to the socket, # and closed, before the server could do anything, and the server, # when it got switched to by server.join(), found its new socket # dead. accepted_event.wait() log("Client got accepted event from server", client, "; sending data", len(data)) try: x = getattr(client, client_method)(data) log("Client sent data: result from method", x) finally: log("Client will unwrap and shutdown") if hasattr(client, 'unwrap'): # Are we dealing with an SSLSocket? If so, unwrap it # before attempting to shut down the socket. This does the # SSL shutdown handshake and (hopefully) stops ``accept_and_read`` # from generating ``ConnectionResetError`` on AppVeyor. try: client = client.unwrap() except (ValueError, OSError): # PyPy 3.7 started raising _cffi_ssl._stdssl.error.SSLSyscallError, # which is an OSError pass try: # The implicit reference-based nastiness of Python 2 # sockets interferes, especially when using SSL sockets. # The best way to get a decent FIN to the server is to shutdown # the output. Doing that on Python 3, OTOH, is contraindicated # except on PyPy, so this used to read ``PY2 or PYPY``. But # it seems that a shutdown is generally good practice, and I didn't # document what errors we saw without it. Per issue #1637 # lets do a shutdown everywhere, but only after removing any # SSL wrapping. client.shutdown(socket.SHUT_RDWR) except (OSError, socket.error): pass log("Client will close") client.close() finally: server.join(10) assert not server.is_alive() if server.terminal_exc: reraise(*server.terminal_exc) if match_data is None: match_data = self.long_data read_data = read_data[0].split(b',') match_data = match_data.split(b',') self.assertEqual(read_data[0], match_data[0]) self.assertEqual(len(read_data), len(match_data)) self.assertEqual(read_data, match_data)