def run(command, **kwargs): # pylint:disable=too-many-locals """ Execute *command*, returning a `RunResult`. This blocks until *command* finishes or until it times out. """ buffer_output = kwargs.pop('buffer_output', BUFFER_OUTPUT) quiet = kwargs.pop('quiet', QUIET) verbose = not quiet nested = kwargs.pop('nested', False) if buffer_output: assert 'stdout' not in kwargs and 'stderr' not in kwargs, kwargs kwargs['stderr'] = subprocess.STDOUT kwargs['stdout'] = subprocess.PIPE popen = start(command, quiet=quiet, **kwargs) name = popen.name try: time_start = perf_counter() out, err = popen.communicate() duration = perf_counter() - time_start if popen.was_killed or popen.poll() is None: result = 'TIMEOUT' else: result = popen.poll() finally: kill(popen) assert popen.timer is None failed = bool(result) if out: out = out.strip() out = out if isinstance(out, str) else out.decode('utf-8', 'ignore') if out and (failed or verbose or _should_show_warning_output(out)): out = ' ' + out.replace('\n', '\n ') out = out.rstrip() out += '\n' log('| %s\n%s', name, out) status, run_count, skipped_count = _find_test_status(duration, out) if result: log('! %s [code %s] %s', name, result, status, color='error') elif not nested: log('- %s %s', name, status) return RunResult( command, kwargs, result, output=out, error=err, name=name, run_count=run_count, skipped_count=skipped_count, run_duration=duration, )
def did_block_hub(self, hub): if self.max_blocking == 0: # We never switched. Check the time now self.max_blocking = perf_counter() - self.last_switch if self.max_blocking > self.max_blocking_time: return True, self.active_greenlet
def __exit__(self, t, v, tb): self.tracer.kill() hub = self.hub self.hub = None tracer = self.tracer self.tracer = None # Only check if there was no exception raised, we # don't want to hide anything if t is not None: return did_block = tracer.did_block_hub(hub) if did_block: execution_time_s = perf_counter() - self._entered active_greenlet = did_block[1] report_lines = tracer.did_block_hub_report(hub, active_greenlet, {}) message = 'To the hub' if self.hub_only else 'To any greenlet' message += ' in %.4f seconds' % (execution_time_s, ) max_block = self.max_blocking_time message += ' (max allowed %.4f seconds)' % ( max_block, ) if max_block else '' message += '\n' message += '\n'.join(report_lines) raise _FailedToSwitch(message)
def __call__(self, event, args): old_active = self.active_greenlet GreenletTracer.__call__(self, event, args) if old_active is not self.hub and old_active is not None: # If we're switching out of the hub, the blocking # time doesn't count. switched_at = perf_counter() self.max_blocking = max(self.max_blocking, switched_at - self.last_switch)
def _wait_and_check(self, timeout=None): if timeout is None: timeout = self._default_wait_timeout # gevent.timer instances have a 'seconds' attribute, # otherwise it's the raw number seconds = getattr(timeout, 'seconds', timeout) gevent.get_hub().loop.update_now() start = perf_counter() try: result = self.wait(timeout) finally: self._check_delay_bounds(seconds, perf_counter() - start, self._default_delay_min_adj, self._default_delay_max_adj) return result
def test_resolution(self): # pylint:disable=too-many-locals # Make sure that having an active IO watcher # doesn't badly throw off our timer resolution. # (This was a specific problem with libuv) # https://github.com/gevent/gevent/pull/1194 from gevent._compat import perf_counter import socket s = socket.socket() self._close_on_teardown(s) fd = s.fileno() ran_at_least_once = False fired_at = [] def timer_counter(): fired_at.append(perf_counter()) loop = self.loop timer_multiplier = 11 max_time = self.timer_duration * timer_multiplier assert max_time < 0.3 for _ in range(150): # in libuv, our signal timer fires every 300ms; depending on # when this runs, we could artificially get a better # resolution than we expect. Run it multiple times to be more sure. io = loop.io(fd, 1) io.start(lambda events=None: None) now = perf_counter() del fired_at[:] timer = self.timer timer.start(timer_counter) loop.run(once=True) io.stop() io.close() timer.stop() if fired_at: ran_at_least_once = True self.assertEqual(1, len(fired_at)) self.assertTimeWithinRange(fired_at[0] - now, 0, max_time) if not greentest.RUNNING_ON_CI: # Hmm, this always fires locally on mocOS but # not an Travis? self.assertTrue(ran_at_least_once)
def runs_in_given_time(self, expected, fuzzy=None): if fuzzy is None: if sysinfo.EXPECT_POOR_TIMER_RESOLUTION or sysinfo.LIBUV: # The noted timer jitter issues on appveyor/pypy3 fuzzy = expected * 5.0 else: fuzzy = expected / 2.0 min_time = expected - fuzzy max_time = expected + fuzzy start = perf_counter() yield (min_time, max_time) elapsed = perf_counter() - start try: self.assertTrue( min_time <= elapsed <= max_time, 'Expected: %r; elapsed: %r; fuzzy %r; clock_info: %s' % (expected, elapsed, fuzzy, get_clock_info('perf_counter'))) except AssertionError: flaky.reraiseFlakyTestRaceCondition()
def __call__(self): # The function that runs in the monitoring thread. # We cannot use threading.current_thread because it would # create an immortal DummyThread object. getcurrent().gevent_monitoring_thread = wref(self) try: while self.should_run: functions = self.monitoring_functions() assert functions sleep_time = self.calculate_sleep_time() thread_sleep(sleep_time) # Make sure the hub is still around, and still active, # and keep it around while we are here. hub = self.hub if not hub: self.kill() if self.should_run: this_run = perf_counter() for entry in functions: f = entry.function period = entry.period last_run = entry.last_run_time if period and last_run + period <= this_run: entry.last_run_time = this_run f(hub) del hub # break our reference to hub while we sleep except SystemExit: pass except: # pylint:disable=bare-except # We're a daemon thread, so swallow any exceptions that get here # during interpreter shutdown. if not sys or not sys.stderr: # pragma: no cover # Interpreter is shutting down pass else: hub = self.hub if hub is not None: # XXX: This tends to do bad things like end the process, because we # try to switch *threads*, which can't happen. Need something better. hub.handle_error(self, *sys.exc_info())
def __enter__(self): from gevent import get_hub from gevent import _tracer self.hub = hub = get_hub() # TODO: We could optimize this to use the GreenletTracer # installed by the monitoring thread, if there is one. # As it is, we will chain trace calls back to it. if not self.max_blocking_time: self.tracer = _tracer.GreenletTracer() elif self.hub_only: self.tracer = _tracer.HubSwitchTracer(hub, self.max_blocking_time) else: self.tracer = _tracer.MaxSwitchTracer(hub, self.max_blocking_time) self._entered = perf_counter() self.tracer.monitor_current_greenlet_blocking() return self
def __exit__(self, t, v, tb): self.tracer.kill() hub = self.hub; self.hub = None tracer = self.tracer; self.tracer = None # Only check if there was no exception raised, we # don't want to hide anything if t is not None: return did_block = tracer.did_block_hub(hub) if did_block: execution_time_s = perf_counter() - self._entered active_greenlet = did_block[1] report_lines = tracer.did_block_hub_report(hub, active_greenlet, {}) message = 'To the hub' if self.hub_only else 'To any greenlet' message += ' in %.4f seconds' % (execution_time_s,) max_block = self.max_blocking_time message += ' (max allowed %.4f seconds)' % (max_block,) if max_block else '' message += '\n' message += '\n'.join(report_lines) raise _FailedToSwitch(message)
def __init__(self, hub, max_blocking_time): _HubTracer.__init__(self, hub, max_blocking_time) self.last_switch = perf_counter()
def did_block_hub(self, hub): if perf_counter() - self.last_entered_hub > self.max_blocking_time: return True, self.active_greenlet
def __call__(self, event, args): GreenletTracer.__call__(self, event, args) if self.active_greenlet is self.hub: self.last_entered_hub = perf_counter()
def timer_counter(): fired_at.append(perf_counter())