def _select(self, readers=None, writers=None, err=None, timeout=0): readers = set() if readers is None else readers writers = set() if writers is None else writers err = set() if err is None else err try: r, w, e = select.select(readers, writers, err, timeout) if e: seen = set() r = r | set(f for f in r + e if f not in seen and not seen.add(f)) return r, w, 0 except (select.error, socket.error) as exc: if get_errno(exc) == errno.EINTR: return [], [], 1 elif get_errno(exc) in SELECT_BAD_FD: for fd in readers | writers | err: try: select.select([fd], [], [], 0) except (select.error, socket.error) as exc: if get_errno(exc) not in SELECT_BAD_FD: raise readers.discard(fd) writers.discard(fd) err.discard(fd) return [], [], 1 else: raise
def _write_ack(fd, ack, callback=None): header, body, body_size = ack._payload try: try: proc = fileno_to_synq[fd] except KeyError: raise StopIteration() send_offset = proc.synq._writer.send_offset Hw = Bw = 0 while Hw < 4: try: Hw += send_offset(header, Hw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise yield while Bw < body_size: try: Bw += send_offset(body, Bw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise # suspend until more data yield finally: if callback: callback() active_writes.discard(fd)
def process_flush_queues(self, proc): """Flushes all queues, including the outbound buffer, so that all tasks that have not been started will be discarded. In Celery this is called whenever the transport connection is lost (consumer restart). """ resq = proc.outq._reader on_state_change = self._result_handler.on_state_change fds = set([resq]) while fds and not resq.closed and self._state != TERMINATE: readable, _, again = _select(fds, None, fds, timeout=0.01) if readable: try: task = resq.recv() except (OSError, IOError, EOFError) as exc: if get_errno(exc) == errno.EINTR: continue elif get_errno(exc) == errno.EAGAIN: break else: debug("got %r while flushing process %r", exc, proc, exc_info=1) if get_errno(exc) not in UNAVAIL: debug("got %r while flushing process %r", exc, proc, exc_info=1) break else: if task is None: debug("got sentinel while flushing process %r", proc) break else: on_state_change(task) else: break
def _write_ack(fd, ack, callback=None): # writes ack back to the worker if synack enabled. # this operation *MUST* complete, otherwise # the worker process will hang waiting for the ack. header, body, body_size = ack[2] try: try: proc = fileno_to_synq[fd] except KeyError: # process died, we can safely discard the ack at this # point. raise StopIteration() send = proc.send_syn_offset Hw = Bw = 0 while Hw < 4: try: Hw += send(header, Hw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise yield while Bw < body_size: try: Bw += send(body, Bw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise # suspend until more data yield finally: if callback: callback() # message written, so this fd is now available active_writes.discard(fd)
def _write_job(fd, job, callback=None): header, body, body_size = job._payload try: try: proc = fileno_to_inq[fd] except KeyError: put_message(job) raise StopIteration() # job result keeps track of what process the job is sent to. job._write_to = proc send_offset = proc.inq._writer.send_offset Hw = Bw = 0 while Hw < 4: try: Hw += send_offset(header, Hw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise # suspend until more data yield while Bw < body_size: try: Bw += send_offset(body, Bw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise # suspend until more data yield finally: if callback: callback() active_writes.discard(fd)
def _write_to(fd, job, callback=None): header, body, body_size = job._payload try: try: proc = fileno_to_inq[fd] except KeyError: put_message(job) raise StopIteration() send_offset = proc.inq._writer.send_offset # job result keeps track of what process the job is sent to. job._write_to = proc Hw = Bw = 0 while Hw < 4: try: Hw += send_offset(header, Hw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise # suspend until more data yield while Bw < body_size: try: Bw += send_offset(body, Bw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise # suspend until more data yield finally: if callback: callback() active_writes.discard(fd)
def _recv_message( self, add_reader, fd, callback, __read__=__read__, readcanbuf=readcanbuf, BytesIO=BytesIO, unpack_from=unpack_from, load=_pickle.load, ): Hr = Br = 0 if readcanbuf: buf = bytearray(4) bufv = memoryview(buf) else: buf = bufv = BytesIO() # header assert not isblocking(fd) while Hr < 4: try: n = __read__(fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr) except OSError as exc: if get_errno(exc) not in UNAVAIL: raise yield else: if n == 0: raise (OSError("End of file during message") if Hr else EOFError()) Hr += n body_size, = unpack_from(">i", bufv) if readcanbuf: buf = bytearray(body_size) bufv = memoryview(buf) else: buf = bufv = BytesIO() while Br < body_size: try: n = __read__(fd, bufv[Br:] if readcanbuf else bufv, body_size - Br) except OSError as exc: if get_errno(exc) not in UNAVAIL: raise yield else: if n == 0: raise (OSError("End of file during message") if Br else EOFError()) Br += n add_reader(fd, self.handle_event, fd) if readcanbuf: message = load(BytesIO(bufv)) else: bufv.seek(0) message = load(bufv) if message: callback(message)
def _recv_message(self, add_reader, fd, callback, __read__=__read__, readcanbuf=readcanbuf, BytesIO=BytesIO, unpack_from=unpack_from, load=_pickle.load): Hr = Br = 0 if readcanbuf: buf = bytearray(4) bufv = memoryview(buf) else: buf = bufv = BytesIO() # header assert not isblocking(fd) while Hr < 4: try: n = __read__( fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr, ) except OSError as exc: if get_errno(exc) not in UNAVAIL: raise yield else: if n == 0: raise (OSError('End of file during message') if Hr else EOFError()) Hr += n body_size, = unpack_from('>i', bufv) if readcanbuf: buf = bytearray(body_size) bufv = memoryview(buf) else: buf = bufv = BytesIO() while Br < body_size: try: n = __read__( fd, bufv[Br:] if readcanbuf else bufv, body_size - Br, ) except OSError as exc: if get_errno(exc) not in UNAVAIL: raise yield else: if n == 0: raise (OSError('End of file during message') if Br else EOFError()) Br += n add_reader(fd, self.handle_event, fd) if readcanbuf: message = load(BytesIO(bufv)) else: bufv.seek(0) message = load(bufv) if message: callback(message)
def _write_job(proc, fd, job): print 'writing job to %s' % fd # writes job to the worker process. # Operation must complete if more than one byte of data # was written. If the broker connection is lost # and no data was written the operation shall be cancelled. header, body, body_size = job._payload errors = 0 try: # job result keeps track of what process the job is sent to. job._write_to = proc send = proc.send_job_offset Hw = Bw = 0 # write header while Hw < 4: try: Hw += send(header, Hw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise # suspend until more data errors += 1 if errors > 100: on_not_recovering(proc, fd, job) raise StopIteration() yield else: errors = 0 # write body while Bw < body_size: try: Bw += send(body, Bw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise # suspend until more data errors += 1 if errors > 100: on_not_recovering(proc, fd, job) raise StopIteration() yield else: errors = 0 finally: hub_remove(fd) write_stats[proc.index] += 1 # message written, so this fd is now available active_writes.discard(fd) write_generator_done(job._writer()) # is a weakref
def _write_job(proc, fd, job): # writes job to the worker process. # Operation must complete if more than one byte of data # was written. If the broker connection is lost # and no data was written the operation shall be cancelled. header, body, body_size = job._payload errors = 0 try: # job result keeps track of what process the job is sent to. job._write_to = proc send = proc.send_job_offset Hw = Bw = 0 # write header while Hw < 4: try: Hw += send(header, Hw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise # suspend until more data errors += 1 if errors > 100: on_not_recovering(proc, fd, job) raise StopIteration() yield else: errors = 0 # write body while Bw < body_size: try: Bw += send(body, Bw) except Exception as exc: if get_errno(exc) not in UNAVAIL: raise # suspend until more data errors += 1 if errors > 100: on_not_recovering(proc, fd, job) raise StopIteration() yield else: errors = 0 finally: hub_remove(fd) write_stats[proc.index] += 1 # message written, so this fd is now available active_writes.discard(fd) write_generator_done(job._writer()) # is a weakref
def _recv_message(self, add_reader, fd, callback, read=os.read, unpack=struct.unpack, loads=_pickle.loads, BytesIO=BytesIO): buf = BytesIO() # header remaining = 4 bsize = None assert not isblocking(fd) while remaining > 0: try: bsize = read(fd, remaining) except OSError as exc: if get_errno(exc) not in UNAVAIL: raise yield else: n = len(bsize) if n == 0: if remaining == 4: raise EOFError() else: raise OSError("Got end of file during message") remaining -= n remaining, = size, = unpack('>i', bsize) while remaining > 0: try: chunk = read(fd, remaining) except OSError as exc: if get_errno(exc) not in UNAVAIL: raise yield n = len(chunk) if n == 0: if remaining == size: raise EOFError() else: raise IOError('Got end of file during message') buf.write(chunk) remaining -= n add_reader(fd, self.handle_event, fd) message = loads(buf.getvalue()) if message: callback(message)
def fire_timers(self, min_delay=1, max_delay=10, max_timers=10, propagate=()): timer = self.timer delay = None if timer and timer._queue: for i in range(max_timers): delay, entry = next(self.scheduler) if entry is None: break try: entry() except propagate: raise except (MemoryError, AssertionError): raise except OSError as exc: if get_errno(exc) == errno.ENOMEM: raise logger.error('Error in timer: %r', exc, exc_info=1) except Exception as exc: logger.error('Error in timer: %r', exc, exc_info=1) return min(delay or min_delay, max_delay)
def process_flush_queues(self, proc): """Flushes all queues, including the outbound buffer, so that all tasks that have not been started will be discarded. In Celery this is called whenever the transport connection is lost (consumer restart). """ resq = proc.outq._reader on_state_change = self._result_handler.on_state_change fds = set([resq]) while fds and not resq.closed and self._state != TERMINATE: readable, _, again = _select(fds, None, fds, timeout=0.01) if readable: try: task = resq.recv() except (OSError, IOError, EOFError) as exc: if get_errno(exc) not in UNAVAIL: debug('got %r while flushing process %r', exc, proc, exc_info=1) break else: if task is None: debug('got sentinel while flushing process %r', proc) break else: on_state_change(task) else: break
def _stop_task_handler(task_handler): for proc in task_handler.pool: proc.inq._writer.setblocking(1) try: proc.inq.put(None) except OSError as exc: if get_errno(exc) != errno.EBADF: raise
def _flush_writer(self, writer): try: list(writer) except (OSError, IOError) as exc: if get_errno(exc) != errno.EBADF: raise finally: self._active_writers.discard(writer)
def _recv_message(self, add_reader, fd, callback, read=os.read, unpack=struct.unpack, loads=_pickle.loads, BytesIO=BytesIO): buf = BytesIO() # header remaining = 4 bsize = None assert not isblocking(fd) while remaining > 0: try: bsize = read(fd, remaining) except OSError as exc: if get_errno(exc) not in UNAVAIL: raise yield else: n = len(bsize) if n == 0: if remaining == 4: raise EOFError() else: raise OSError("Got end of file during message") remaining -= n remaining, = size, = unpack('>i', bsize) while remaining > 0: try: chunk = read(fd, remaining) except OSError as exc: if get_errno(exc) not in UNAVAIL: raise yield else: n = len(chunk) if n == 0: if remaining == size: raise EOFError() else: raise IOError('Got end of file during message') buf.write(chunk) remaining -= n add_reader(fd, self.handle_event, fd) message = loads(buf.getvalue()) if message: callback(message)
def _stop_task_handler(task_handler): """Called at shutdown to tell processes that we are shutting down.""" for proc in task_handler.pool: proc.inq._writer.setblocking(1) try: proc.inq.put(None) except OSError as exc: if get_errno(exc) != errno.EBADF: raise
def schedule_writes(ready_fds, shuffle=random.shuffle): # Schedule write operation to ready file descriptor. # The file descriptor is writeable, but that does not # mean the process is currently reading from the socket. # The socket is buffered so writeable simply means that # the buffer can accept at least 1 byte of data. shuffle(ready_fds) for ready_fd in ready_fds: if ready_fd in active_writes: # already writing to this fd continue if is_fair_strategy and ready_fd in busy_workers: # worker is already busy with another task continue if ready_fd not in all_inqueues: hub_remove(ready_fd) continue try: job = pop_message() except IndexError: # no more messages, remove all inactive fds from the hub. # this is important since the fds are always writeable # as long as there's 1 byte left in the buffer, and so # this may create a spinloop where the event loop # always wakes up. for inqfd in diff(active_writes): hub_remove(inqfd) break else: if not job._accepted: # job not accepted by another worker try: # keep track of what process the write operation # was scheduled for. proc = job._scheduled_for = fileno_to_inq[ready_fd] except KeyError: # write was scheduled for this fd but the process # has since exited and the message must be sent to # another process. put_message(job) continue cor = _write_job(proc, ready_fd, job) job._writer = ref(cor) mark_write_gen_as_active(cor) mark_write_fd_as_active(ready_fd) mark_worker_as_busy(ready_fd) # Try to write immediately, in case there's an error. try: next(cor) except StopIteration: pass except OSError as exc: if get_errno(exc) != errno.EBADF: raise else: add_writer(ready_fd, cor)
def _read(loop): try: drain_events(timeout=0) except timeout: return except error as exc: if get_errno(exc) in _unavail: return raise loop.call_soon(_read, loop)
def _select(readers=None, writers=None, err=None, timeout=0): """Simple wrapper to :class:`~select.select`. :param readers: Set of reader fds to test if readable. :param writers: Set of writer fds to test if writable. :param err: Set of fds to test for error condition. All fd sets passed must be mutable as this function will remove non-working fds from them, this also means the caller must make sure there are still fds in the sets before calling us again. :returns: tuple of ``(readable, writable, again)``, where ``readable`` is a set of fds that have data available for read, ``writable`` is a set of fds that is ready to be written to and ``again`` is a flag that if set means the caller must throw away the result and call us again. """ readers = set() if readers is None else readers writers = set() if writers is None else writers err = set() if err is None else err try: r, w, e = select.select(readers, writers, err, timeout) if e: r = list(set(r) | set(e)) return r, w, 0 except (select.error, socket.error) as exc: if get_errno(exc) == errno.EINTR: return [], [], 1 elif get_errno(exc) in SELECT_BAD_FD: for fd in readers | writers | err: try: select.select([fd], [], [], 0) except (select.error, socket.error) as exc: if get_errno(exc) not in SELECT_BAD_FD: raise readers.discard(fd) writers.discard(fd) err.discard(fd) return [], [], 1 else: raise
def _read(loop): if not connection.connected: raise ConnectionError('Socket was disconnected') try: drain_events(timeout=0) except timeout: return except error as exc: if get_errno(exc) in _unavail: return raise loop.call_soon(_read, loop)
def _flush_writer(self, writer): try: while 1: try: next(writer) except StopIteration: break except (OSError, IOError) as exc: if get_errno(exc) != errno.EBADF: raise finally: self._active_writers.discard(writer)
def _reader(self, connection, timeout=socket.timeout, error=socket.error, get_errno=get_errno, _unavail=(errno.EAGAIN, errno.EINTR)): drain_events = connection.drain_events while 1: try: yield drain_events(timeout=0) except timeout: break except error as exc: if get_errno(exc) in _unavail: break raise
def maybe_drop_privileges(uid=None, gid=None): """Change process privileges to new user/group. If UID and GID is specified, the real user/group is changed. If only UID is specified, the real user is changed, and the group is changed to the users primary group. If only GID is specified, only the group is changed. """ if sys.platform == 'win32': return if os.geteuid(): # no point trying to setuid unless we're root. if not os.getuid(): raise AssertionError('contact support') uid = uid and parse_uid(uid) gid = gid and parse_gid(gid) if uid: # If GID isn't defined, get the primary GID of the user. if not gid and pwd: gid = pwd.getpwuid(uid).pw_gid # Must set the GID before initgroups(), as setgid() # is known to zap the group list on some platforms. # setgid must happen before setuid (otherwise the setgid operation # may fail because of insufficient privileges and possibly stay # in a privileged group). setgid(gid) initgroups(uid, gid) # at last: setuid(uid) # ... and make sure privileges cannot be restored: try: setuid(0) except OSError as exc: if get_errno(exc) != errno.EPERM: raise pass # Good: cannot restore privileges. else: raise RuntimeError( 'non-root user able to restore privileges after setuid.') else: gid and setgid(gid) if uid and (not os.getuid()) and not (os.geteuid()): raise AssertionError('Still root uid after drop privileges!') if gid and (not os.getgid()) and not (os.getegid()): raise AssertionError('Still root gid after drop privileges!')
def fire_timers(self, min_delay=1, max_delay=10, max_timers=10, propagate=()): timer = self.timer delay = None if timer and timer._queue: for i in range(max_timers): delay, entry = next(self.scheduler) if entry is None: break try: entry() except propagate: raise except (MemoryError, AssertionError): raise except OSError as exc: if get_errno(exc) == errno.ENOMEM: raise logger.error('Error in timer: %r', exc, exc_info=1) except Exception as exc: logger.error('Error in timer: %r', exc, exc_info=1) return min(max(delay or 0, min_delay), max_delay)
def start(self): blueprint, loop = self.blueprint, self.loop while blueprint.state != CLOSE: self.restart_count += 1 maybe_shutdown() try: blueprint.start(self) except self.connection_errors as exc: if isinstance(exc, OSError) and get_errno(exc) == errno.EMFILE: raise # Too many open files maybe_shutdown() try: self._restart_state.step() except RestartFreqExceeded as exc: crit('Frequent restarts detected: %r', exc, exc_info=1) sleep(1) if blueprint.state != CLOSE and self.connection: warn(CONNECTION_RETRY, exc_info=True) try: self.connection.collect() except Exception: pass self.on_close() blueprint.restart(self)
def create_loop(self, generator=generator, sleep=sleep, min=min, next=next, Empty=Empty, StopIteration=StopIteration, KeyError=KeyError, READ=READ, WRITE=WRITE, ERR=ERR): readers, writers = self.readers, self.writers poll = self.poller.poll fire_timers = self.fire_timers hub_remove = self.remove scheduled = self.timer._queue consolidate = self.consolidate consolidate_callback = self.consolidate_callback on_tick = self.on_tick todo = self._ready propagate = self.propagate_errors while 1: for tick_callback in on_tick: tick_callback() while todo: item = todo.popleft() if item: item() poll_timeout = fire_timers(propagate=propagate) if scheduled else 1 if readers or writers: to_consolidate = [] try: events = poll(poll_timeout) except ValueError: # Issue 882 raise StopIteration() for fd, event in events or (): general_error = False if fd in consolidate and \ writers.get(fd) is None: to_consolidate.append(fd) continue cb = cbargs = None if event & READ: try: cb, cbargs = readers[fd] except KeyError: self.remove_reader(fd) continue elif event & WRITE: try: cb, cbargs = writers[fd] except KeyError: self.remove_writer(fd) continue elif event & ERR: general_error = True else: logger.info(W_UNKNOWN_EVENT, event, fd) general_error = True if general_error: try: cb, cbargs = (readers.get(fd) or writers.get(fd)) except TypeError: pass if cb is None: self.remove(fd) continue if isinstance(cb, generator): try: next(cb) except OSError as exc: if get_errno(exc) != errno.EBADF: raise hub_remove(fd) except StopIteration: pass except Exception: hub_remove(fd) raise else: try: cb(*cbargs) except Empty: pass if to_consolidate: consolidate_callback(to_consolidate) else: # no sockets yet, startup is probably not done. sleep(min(poll_timeout, 0.1)) yield
def create_loop(self, generator=generator, sleep=sleep, min=min, next=next, Empty=Empty, StopIteration=StopIteration, KeyError=KeyError, READ=READ, WRITE=WRITE, ERR=ERR): readers, writers = self.readers, self.writers poll = self.poller.poll fire_timers = self.fire_timers hub_remove = self.remove scheduled = self.timer._queue consolidate = self.consolidate consolidate_callback = self.consolidate_callback on_tick = self.on_tick todo = self._ready propagate = self.propagate_errors while 1: for tick_callback in on_tick: tick_callback() while todo: item = todo.popleft() if item: item() poll_timeout = fire_timers(propagate=propagate) if scheduled else 1 #print('[[[HUB]]]: %s' % (self.repr_active(), )) if readers or writers: to_consolidate = [] try: events = poll(poll_timeout) #print('[EVENTS]: %s' % (self.nepr_events(events or []), )) except ValueError: # Issue 882 raise StopIteration() for fileno, event in events or (): if fileno in consolidate and \ writers.get(fileno) is None: to_consolidate.append(fileno) continue cb = cbargs = None try: if event & READ: cb, cbargs = readers[fileno] elif event & WRITE: cb, cbargs = writers[fileno] elif event & ERR: try: cb, cbargs = (readers.get(fileno) or writers.get(fileno)) except TypeError: pass except (KeyError, Empty): hub_remove(fileno) continue if cb is None: continue if isinstance(cb, generator): try: next(cb) except OSError as exc: if get_errno(exc) != errno.EBADF: raise hub_remove(fileno) except StopIteration: pass except Exception: hub_remove(fileno) raise else: try: cb(*cbargs) except Empty: pass if to_consolidate: consolidate_callback(to_consolidate) else: # no sockets yet, startup is probably not done. sleep(min(poll_timeout, 0.1)) yield
def schedule_writes(ready_fds, curindex=[0]): # Schedule write operation to ready file descriptor. # The file descriptor is writeable, but that does not # mean the process is currently reading from the socket. # The socket is buffered so writeable simply means that # the buffer can accept at least 1 byte of data. # This means we have to cycle between the ready fds. # the first version used shuffle, but using i % total # is about 30% faster with many processes. The latter # also shows more fairness in write stats when used with # many processes [XXX On OS X, this may vary depending # on event loop implementation (i.e select vs epoll), so # have to test further] total = len(ready_fds) for i in range(total): ready_fd = ready_fds[curindex[0] % total] if ready_fd in active_writes: # already writing to this fd curindex[0] += 1 continue if is_fair_strategy and ready_fd in busy_workers: # worker is already busy with another task curindex[0] += 1 continue if ready_fd not in all_inqueues: hub_remove(ready_fd) curindex[0] += 1 continue try: job = pop_message() except IndexError: # no more messages, remove all inactive fds from the hub. # this is important since the fds are always writeable # as long as there's 1 byte left in the buffer, and so # this may create a spinloop where the event loop # always wakes up. for inqfd in diff(active_writes): hub_remove(inqfd) break else: if not job._accepted: # job not accepted by another worker try: # keep track of what process the write operation # was scheduled for. proc = job._scheduled_for = fileno_to_inq[ready_fd] except KeyError: # write was scheduled for this fd but the process # has since exited and the message must be sent to # another process. put_back_message(job) curindex[0] += 1 continue cor = _write_job(proc, ready_fd, job) job._writer = ref(cor) mark_write_gen_as_active(cor) mark_write_fd_as_active(ready_fd) mark_worker_as_busy(ready_fd) # Try to write immediately, in case there's an error. try: next(cor) except StopIteration: pass except OSError as exc: if get_errno(exc) != errno.EBADF: raise else: add_writer(ready_fd, cor) curindex[0] += 1
def create_loop(self, generator=generator, sleep=sleep, min=min, next=next, Empty=Empty, StopIteration=StopIteration, KeyError=KeyError, READ=READ, WRITE=WRITE, ERR=ERR): readers, writers = self.readers, self.writers poll = self.poller.poll fire_timers = self.fire_timers hub_remove = self.remove scheduled = self.timer._queue consolidate = self.consolidate consolidate_callback = self.consolidate_callback on_tick = self.on_tick todo = self._ready propagate = self.propagate_errors while 1: for tick_callback in on_tick: tick_callback() while todo: item = todo.popleft() if item: item() poll_timeout = fire_timers(propagate=propagate) if scheduled else 1 #print('[[[HUB]]]: %s' % (self.repr_active(), )) if readers or writers: to_consolidate = [] try: events = poll(poll_timeout) #print('[EVENTS]: %s' % (self.repr_events(events or []), )) except ValueError: # Issue 882 raise StopIteration() for fileno, event in events or (): if fileno in consolidate and \ writers.get(fileno) is None: to_consolidate.append(fileno) continue cb = cbargs = None try: if event & READ: cb, cbargs = readers[fileno] elif event & WRITE: cb, cbargs = writers[fileno] elif event & ERR: try: cb, cbargs = (readers.get(fileno) or writers.get(fileno)) except TypeError: pass except (KeyError, Empty): hub_remove(fileno) continue if cb is None: continue if isinstance(cb, generator): try: next(cb) except OSError as exc: if get_errno(exc) != errno.EBADF: raise hub_remove(fileno) except StopIteration: pass except Exception: hub_remove(fileno) raise else: try: cb(*cbargs) except Empty: pass if to_consolidate: consolidate_callback(to_consolidate) else: # no sockets yet, startup is probably not done. sleep(min(poll_timeout, 0.1)) yield