def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe, queue_sem): while True: try: with notempty: if not buffer: notempty.wait() try: while True: obj = buffer.popleft() if obj is _sentinel: debug('feeder thread got sentinel -- exiting') close() return obj = ForkingPickler.dumps(obj) if sys.platform == 'win32': send_bytes(obj) else: with writelock: send_bytes(obj) except IndexError: # 当buffer为空时popleft会抛出异常 pass except Exception as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has started to cleanup. if is_exiting(): info('error in queue thread: %s', e) return else: # Since the object has not been sent in the queue, we need to decrease the size of the queue. # The error acts as if the object had been silently removed from the queue and this step is necessary to have a properly working queue. queue_sem.release() traceback.print_exc()
def _serve(self): if hasattr(signal, "pthread_sigmask"): signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG)) while 1: try: conn = self._listener.accept() msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) send(conn, destination_pid) close() conn.close() except: if not is_exiting(): import traceback sub_warning( "thread for sharing handles raised exception :\n" + "-" * 79 + "\n" + traceback.format_exc() + "-" * 79 )
def _feed(buffer, notempty, send, writelock, close): debug('starting thread to feed data to pipe') from multiprocessing.util import is_exiting nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None try: while 1: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return if wacquire is None: send(obj) else: wacquire() try: send(obj) finally: wrelease() except IndexError: pass except IOError: # Should be catching the same as errno.EPIPE below return except Exception as e: if getattr(e, 'errno', 0) == errno.EPIPE: return except Exception as e: # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. try: if is_exiting(): info('error in queue thread: %s', e) else: import traceback traceback.print_exc() except Exception: pass
def _feed(buffer, notempty, send, writelock, close): debug('starting thread to feed data to pipe') from multiprocessing.util import is_exiting nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None try: while 1: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug('feeder thread got sentinel -- exiting') close() return if wacquire is None: send(obj) else: wacquire() try: send(obj) finally: wrelease() except IndexError: pass except IOError: # Should be catching the same as errno.EPIPE below return except Exception as e: if getattr(e, 'errno', 0) == errno.EPIPE: return except Exception, e: # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. try: if is_exiting(): info('error in queue thread: %s', e) else: import traceback traceback.print_exc() except Exception: pass
def _feed(buffer, notempty, send_bytes, writelock, close, reducers, ignore_epipe, onerror, queue_sem): util.debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != 'win32': wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None while 1: try: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: util.debug('feeder thread got sentinel -- exiting') close() return # serialize the data before acquiring the lock obj_ = CustomizableLokyPickler.dumps( obj, reducers=reducers) if wacquire is None: send_bytes(obj_) else: wacquire() try: send_bytes(obj_) finally: wrelease() # Remove references early to avoid leaking memory del obj, obj_ except IndexError: pass except BaseException as e: if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. if util.is_exiting(): util.info('error in queue thread: %s', e) return else: queue_sem.release() onerror(e, obj)
def _serve(): from multiprocessing.util import is_exiting, sub_warning while 1: try: conn = _listener.accept() handle_wanted, destination_pid = conn.recv() _cache.remove(handle_wanted) send_handle(conn, handle_wanted, destination_pid) close(handle_wanted) conn.close() except: if not is_exiting(): import traceback sub_warning('thread for sharing handles raised exception :\n' + '-' * 79 + '\n' + traceback.format_exc() + '-' * 79)
def _serve(): from multiprocessing.util import is_exiting, sub_warning while 1: try: conn = _listener.accept() handle_wanted, destination_pid = conn.recv() _cache.remove(handle_wanted) send_handle(conn, handle_wanted, destination_pid) close(handle_wanted) conn.close() except: if not is_exiting(): import traceback sub_warning( 'thread for sharing handles raised exception :\n' + '-'*79 + '\n' + traceback.format_exc() + '-'*79 )
def _serve(self): if hasattr(signal, 'pthread_sigmask'): signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG)) while 1: try: conn = self._listener.accept() msg = conn.recv() if msg is None: break key, destination_pid = msg send, close = self._cache.pop(key) send(conn, destination_pid) close() conn.close() except: if not is_exiting(): import traceback sub_warning( 'thread for sharing handles raised exception :\n' + '-' * 79 + '\n' + traceback.format_exc() + '-' * 79)
def _feed(buffer, notempty, send, writelock, close): debug("starting thread to feed data to pipe") nacquire = notempty.acquire nrelease = notempty.release nwait = notempty.wait bpopleft = buffer.popleft sentinel = _sentinel if sys.platform != "win32": wacquire = writelock.acquire wrelease = writelock.release else: wacquire = None try: while 1: nacquire() try: if not buffer: nwait() finally: nrelease() try: while 1: obj = bpopleft() if obj is sentinel: debug("feeder thread got sentinel -- exiting") close() return if wacquire is None: send(obj) else: wacquire() try: # print ('sending object of size %i"' % # sys.getsizeof(obj, -1) ) # print str(obj)[:100] # print "" # print "" try: send(obj) except SystemError as e: print("Que sending error %s" % e) print("error dump in %i.dump" % (os.getpid(), )) print( "Likely source: stdout/stderr too large (gigabytes)" ) with open("%i.dump" % (os.getpid(), ), "wb") as op: try: pickle.dump(obj, op) except Exception: pass try: op.write("%s" % (obj, )[:20000]) except Exception: pass raise finally: wrelease() except IndexError: pass except Exception as e: # Since this runs in a daemon thread the resources it uses # may be become unusable while the process is cleaning up. # We ignore errors which happen after the process has # started to cleanup. try: if is_exiting(): debug("error in queue thread: %s", e) else: traceback.print_exc() except Exception: pass