def _worker(self): # pylint:disable=too-many-branches need_decrease = True try: while 1: # tiny bit faster than True on Py2 h = _get_hub() if h is not None: h.name = 'ThreadPool Worker Hub' task_queue = self.task_queue # While we block, don't let the monitoring thread, if any, # report us as blocked. Indeed, so long as we never # try to switch greenlets, don't report us as blocked--- # the threadpool is *meant* to run blocking tasks self.__ignore_current_greenlet_blocking(h) task = task_queue.get() try: if task is None: need_decrease = False self._decrease_size() # we want first to decrease size, then decrease unfinished_tasks # otherwise, _adjust might think there's one more idle thread that # needs to be killed return func, args, kwargs, thread_result = task try: value = func(*args, **kwargs) except: # pylint:disable=bare-except exc_info = getattr(sys, 'exc_info', None) if exc_info is None: return thread_result.handle_error((self, func), exc_info()) else: if sys is None: return thread_result.set(value) del value finally: del func, args, kwargs, thread_result, task finally: if sys is None: return # pylint:disable=lost-exception task_queue.task_done() finally: if need_decrease: self._decrease_size() if sys is not None and self._destroy_worker_hub: hub = _get_hub() if hub is not None: hub.destroy(True) del hub
def switch_count(self): if self.switch_expected is None: return if not hasattr(self, 'initial_switch_count'): raise AssertionError('Cannot check switch_count (setUp() was not called)') if self.initial_switch_count is None: return current = getattr(_get_hub(), 'switch_count', 0) return current - self.initial_switch_count
def switch_count(self): if self.switch_expected is None: return if not hasattr(self, 'initial_switch_count'): raise AssertionError( 'Cannot check switch_count (setUp() was not called)') if self.initial_switch_count is None: return current = getattr(_get_hub(), 'switch_count', 0) return current - self.initial_switch_count
def wrapper(self, *args, **kwargs): initial_switch_count = getattr(_get_hub(), 'switch_count', None) self.switch_expected = getattr(self, 'switch_expected', True) if initial_switch_count is not None: fullname = getattr(self, 'fullname', None) if self.switch_expected == 'default' and fullname: self.switch_expected = get_switch_expected(fullname) result = method(self, *args, **kwargs) if initial_switch_count is not None and self.switch_expected is not None: switch_count = _get_hub().switch_count - initial_switch_count if self.switch_expected is True: assert switch_count >= 0 if not switch_count: raise AssertionError('%s did not switch' % fullname) elif self.switch_expected is False: if switch_count: raise AssertionError('%s switched but not expected to' % fullname) else: raise AssertionError('Invalid value for switch_expected: %r' % (self.switch_expected, )) return result
def wrapped(self, *args, **kwargs): initial_switch_count = getattr(_get_hub(), 'switch_count', None) self.switch_expected = getattr(self, 'switch_expected', True) if initial_switch_count is not None: fullname = getattr(self, 'fullname', None) if self.switch_expected == 'default' and fullname: self.switch_expected = get_switch_expected(fullname) result = method(self, *args, **kwargs) if initial_switch_count is not None and self.switch_expected is not None: switch_count = _get_hub().switch_count - initial_switch_count if self.switch_expected is True: assert switch_count >= 0 if not switch_count: raise AssertionError('%s did not switch' % fullname) elif self.switch_expected is False: if switch_count: raise AssertionError('%s switched but not expected to' % fullname) else: raise AssertionError('Invalid value for switch_expected: %r' % (self.switch_expected, )) return result
def wrapped(self, *args, **kwargs): initial_switch_count = getattr(_get_hub(), "switch_count", None) self.switch_expected = getattr(self, "switch_expected", True) if initial_switch_count is not None: fullname = getattr(self, "fullname", None) if self.switch_expected == "default" and fullname: self.switch_expected = get_switch_expected(fullname) result = method(self, *args, **kwargs) if initial_switch_count is not None and self.switch_expected is not None: switch_count = _get_hub().switch_count - initial_switch_count if self.switch_expected is True: assert switch_count >= 0 if not switch_count: raise AssertionError("%s did not switch" % fullname) elif self.switch_expected is False: if switch_count: raise AssertionError("%s switched but not expected to" % fullname) else: raise AssertionError("Invalid value for switch_expected: %r" % (self.switch_expected,)) return result
def _sys_stats_monitor(context): import gc from gevent.hub import _get_hub from gevent import sleep context = weakref.ref(context) # give gc a hand end = faststat.nanotime( ) # current time throws off duration stats less than 0 while 1: start = faststat.nanotime() tmp = context() if tmp is None or tmp.stopping: return # tmp.stats['gc.garbage'].add(len(gc.garbage)) # NOTE: gc.garbage() only does something if gc module has debug flag set counts = gc.get_count() for i in range(len(counts)): tmp.stats['gc.count' + str(i)].add(counts[i]) tmp.stats['greenlets.active'].add(_get_hub().loop.activecnt) tmp.stats['greenlets.pending'].add(_get_hub().loop.pendingcnt) try: tmp.stats['queues.cpu_bound.depth'].add( len(tmp.thread_locals.cpu_bound_thread.in_q)) except AttributeError: pass try: tmp.stats['queues.io_bound.depth'].add( tmp.thread_locals.io_bound_thread.task_queue._qsize()) except AttributeError: pass interval = tmp.monitor_interval end, prev = faststat.nanotime(), end # keep a rough measure of the fraction of time spent on monitoring if prev == end: tmp.stats['monitoring.overhead'].add(0) else: tmp.stats['monitoring.overhead'].add((end - start) / (end - prev)) tmp.durations['monitoring.duration'].end(start) tmp = None sleep(interval)
def _sys_stats_monitor(context): import gc from gevent.hub import _get_hub from gevent import sleep context = weakref.ref(context) # give gc a hand end = faststat.nanotime() # current time throws off duration stats less than 0 while 1: start = faststat.nanotime() tmp = context() if tmp is None or tmp.stopping: return # tmp.stats['gc.garbage'].add(len(gc.garbage)) # NOTE: gc.garbage() only does something if gc module has debug flag set counts = gc.get_count() for i in range(len(counts)): tmp.stats['gc.count' + str(i)].add(counts[i]) tmp.stats['greenlets.active'].add(_get_hub().loop.activecnt) tmp.stats['greenlets.pending'].add(_get_hub().loop.pendingcnt) try: tmp.stats['queues.cpu_bound.depth'].add( len(tmp.thread_locals.cpu_bound_thread.in_q)) except AttributeError: pass try: tmp.stats['queues.io_bound.depth'].add( tmp.thread_locals.io_bound_thread.task_queue._qsize()) except AttributeError: pass interval = tmp.monitor_interval end, prev = faststat.nanotime(), end # keep a rough measure of the fraction of time spent on monitoring if prev == end: tmp.stats['monitoring.overhead'].add(0) else: tmp.stats['monitoring.overhead'].add((end - start)/(end - prev)) tmp.durations['monitoring.duration'].end(start) tmp = None sleep(interval)
def get_greenlets(): try: import traceback from greenlet import greenlet answer = [] for ob in gc.get_objects(): if not isinstance(ob, greenlet): continue if not ob: continue answer.append(traceback.format_stack(ob.gr_frame)) except Exception: from gevent.hub import _get_hub answer.append([_get_hub().loop.activecnt]) return answer
def _worker(self): # pylint:disable=too-many-branches need_decrease = True try: while True: task_queue = self.task_queue task = task_queue.get() try: if task is None: need_decrease = False self._decrease_size() # we want first to decrease size, then decrease unfinished_tasks # otherwise, _adjust might think there's one more idle thread that # needs to be killed return func, args, kwargs, thread_result = task try: value = func(*args, **kwargs) except: # pylint:disable=bare-except exc_info = getattr(sys, 'exc_info', None) if exc_info is None: return thread_result.handle_error((self, func), exc_info()) else: if sys is None: return thread_result.set(value) del value finally: del func, args, kwargs, thread_result, task finally: if sys is None: return # pylint:disable=lost-exception task_queue.task_done() finally: if need_decrease: self._decrease_size() if sys is not None and self._destroy_worker_hub: hub = _get_hub() if hub is not None: hub.destroy(True) del hub
def setUp(self): self.initial_switch_count = getattr(_get_hub(), 'switch_count', None)