def main(tests=None): if not tests: tests = set(os.path.basename(x) for x in glob.glob(cwd + '/*.py')) tests = sorted(tests) failed = [] for filename in tests: if filename in ignore: continue min_time, max_time = time_ranges.get(filename, default_time_range) start = time.time() if util.run([sys.executable, '-u', filename], timeout=max_time, cwd=cwd): failed.append(filename) else: took = time.time() - start if took < min_time: util.log( '! Failed example %s: exited too quickly, after %.1fs (expected %.1fs)', filename, took, min_time) failed.append(filename) if failed: util.log('! Failed examples:\n! - %s', '\n! - '.join(failed)) sys.exit(1) if not tests: sys.exit('No tests.')
def assertEqualResults(self, real_result, gevent_result, func): errors = (socket.gaierror, socket.herror, TypeError) if isinstance(real_result, errors) and isinstance( gevent_result, errors): if type(real_result) is not type(gevent_result): log('WARNING: error type mismatch: %r (gevent) != %r (stdlib)', gevent_result, real_result) return real_result = self._normalize_result(real_result, func) gevent_result = self._normalize_result(gevent_result, func) real_result_repr = repr(real_result) gevent_result_repr = repr(gevent_result) if real_result_repr == gevent_result_repr: return if relaxed_is_equal(gevent_result, real_result): return # If we're using the ares resolver, allow the real resolver to generate an # error that the ares resolver actually gets an answer to. if (RESOLVER_NOT_SYSTEM and isinstance(real_result, errors) and not isinstance(gevent_result, errors)): return # From 2.7 on, assertEqual does a better job highlighting the results than we would # because it calls assertSequenceEqual, which highlights the exact # difference in the tuple self.assertEqual(real_result, gevent_result)
def run(function, *args): if DEBUG: log(format_call(function, args)) delta = time() result = _run(function, *args) delta = time() - delta if DEBUG: log_fresult(result, delta) return result, delta
def log_fresult(result, seconds): if isinstance(result, Exception): msg = ' -=> raised %r' % (result, ) else: msg = ' -=> returned %r' % (result, ) time_ms = ' %.2fms' % (seconds * 1000.0, ) space = 80 - len(msg) - len(time_ms) if space > 0: space = ' ' * space else: space = '' log(msg + space + time_ms)
def _test(self, func, *args): gevent_func = getattr(gevent_socket, func) real_func = getattr(socket, func) real_result, time_real = run(real_func, *args) gevent_result, time_gevent = run(gevent_func, *args) if not DEBUG and self.should_log_results(real_result, gevent_result): log('') log_call(real_result, time_real, real_func, *args) log_call(gevent_result, time_gevent, gevent_func, *args) self.assertEqualResults(real_result, gevent_result, func) if self.verbose_dns and time_gevent > time_real + 0.01 and time_gevent > 0.02: msg = 'gevent:%s%s took %dms versus %dms stdlib' % (func, args, time_gevent * 1000.0, time_real * 1000.0) if time_gevent > time_real + 1: word = 'VERY' else: word = 'quite' log('\nWARNING: %s slow: %s', word, msg) return gevent_result
def TESTRUNNER(tests=None): if not os.path.exists(directory): util.log('WARNING: No test directory found at %s', directory) return with open(os.path.join(directory, 'version')) as f: preferred_version = f.read().strip() if preferred_version != version: util.log( 'WARNING: The tests in %s/ are from version %s and your Python is %s', directory, preferred_version, version) version_tests = glob.glob('%s/test_*.py' % full_directory) version_tests = sorted(version_tests) if not tests: tests = glob.glob('%s/test_*.py' % directory) tests = sorted(tests) PYTHONPATH = (os.getcwd() + os.pathsep + get_absolute_pythonpath()).rstrip(':') tests = [os.path.basename(x) for x in tests] version_tests = [os.path.basename(x) for x in version_tests] options = { 'cwd': directory, 'timeout': TIMEOUT, 'setenv': { 'PYTHONPATH': PYTHONPATH, # debug produces resource tracking warnings for the # CFFI backends. On Python 2, many of the stdlib tests # rely on refcounting to close sockets so they produce # lots of noise. Python 3 is not completely immune; # test_ftplib.py tends to produce warnings---and the Python 3 # test framework turns those into test failures! 'GEVENT_DEBUG': 'error', } } if tests and not sys.platform.startswith("win"): atexit.register(os.system, 'rm -f */@test*') basic_args = [ sys.executable, '-u', '-W', 'ignore', '-m' 'greentest.monkey_test' ] for filename in tests: if filename in version_tests: util.log("Overriding %s from %s with file from %s", filename, directory, full_directory) continue yield basic_args + [filename], options.copy() options['cwd'] = full_directory for filename in version_tests: yield basic_args + [filename], options.copy()
def TESTRUNNER(tests=None): if not os.path.exists(directory): util.log('WARNING: No test directory found at %s', directory) return with open(os.path.join(directory, 'version')) as f: preferred_version = f.read().strip() if preferred_version != version: util.log( 'WARNING: The tests in %s/ are from version %s and your Python is %s', directory, preferred_version, version) if not tests: tests = glob.glob('%s/test_*.py' % directory) version_tests = glob.glob('%s/test_*.py' % full_directory) tests = sorted(tests) version_tests = sorted(version_tests) PYTHONPATH = (os.getcwd() + os.pathsep + get_absolute_pythonpath()).rstrip(':') tests = [os.path.basename(x) for x in tests] version_tests = [os.path.basename(x) for x in version_tests] options = { 'cwd': directory, 'timeout': TIMEOUT, 'setenv': { 'PYTHONPATH': PYTHONPATH, 'GEVENT_DEBUG': 'error', } } if tests and not sys.platform.startswith("win"): atexit.register(os.system, 'rm -f */@test*') basic_args = [ sys.executable, '-u', '-W', 'ignore', '-m' 'greentest.monkey_test' ] for filename in tests: if filename in version_tests: util.log("Overriding %s from %s with file from %s", filename, directory, full_directory) continue yield basic_args + [filename], options.copy() yield basic_args + ['--Event', filename], options.copy() options['cwd'] = full_directory for filename in version_tests: yield basic_args + [filename], options.copy() yield basic_args + ['--Event', filename], options.copy()
def run_many(tests, configured_failing_tests=(), failfast=False, quiet=False): # pylint:disable=too-many-locals global NWORKERS start = time.time() total = 0 failed = {} passed = {} NWORKERS = min(len(tests), NWORKERS) or 1 print('thread pool size:', NWORKERS, '\n') pool = ThreadPool(NWORKERS) util.BUFFER_OUTPUT = NWORKERS > 1 def run_one(cmd, **kwargs): kwargs['quiet'] = quiet result = util.run(cmd, **kwargs) if result: if failfast: sys.exit(1) failed[result.name] = [cmd, kwargs] else: passed[result.name] = True results = [] def reap(): for r in results[:]: if not r.ready(): continue if r.successful(): results.remove(r) else: r.get() sys.exit('Internal error in testrunner.py: %r' % (r, )) return len(results) def reap_all(): while reap() > 0: time.sleep(0.1) def spawn(cmd, options): while True: if reap() < NWORKERS: r = pool.apply_async(run_one, (cmd, ), options or {}) results.append(r) return else: time.sleep(0.1) run_alone = [] try: try: for cmd, options in tests: total += 1 options = options or {} if matches(RUN_ALONE, cmd): run_alone.append((cmd, options)) else: spawn(cmd, options) pool.close() pool.join() for cmd, options in run_alone: run_one(cmd, **options) except KeyboardInterrupt: try: log('Waiting for currently running to finish...') reap_all() except KeyboardInterrupt: pool.terminate() report(total, failed, passed, exit=False, took=time.time() - start, configured_failing_tests=configured_failing_tests) log('(partial results)\n') raise except: traceback.print_exc() pool.terminate() raise reap_all() report(total, failed, passed, took=time.time() - start, configured_failing_tests=configured_failing_tests)
def print_list(lst): for name in lst: log(' - %s', name)
def report(total, failed, passed, exit=True, took=None, configured_failing_tests=()): # pylint:disable=redefined-builtin,too-many-branches runtimelog = util.runtimelog if runtimelog: log('\nLongest-running tests:') runtimelog.sort() length = len('%.1f' % -runtimelog[0][0]) frmt = '%' + str(length) + '.1f seconds: %s' for delta, name in runtimelog[:5]: log(frmt, -delta, name) if took: took = ' in %s' % format_seconds(took) else: took = '' failed_expected = [] failed_unexpected = [] passed_unexpected = [] for name in passed: if matches(configured_failing_tests, name, include_flaky=False): passed_unexpected.append(name) if passed_unexpected: log('\n%s/%s unexpected passes', len(passed_unexpected), total) print_list(passed_unexpected) if failed: log('\n%s/%s tests failed%s', len(failed), total, took) for name in failed: if matches(configured_failing_tests, name, include_flaky=True): failed_expected.append(name) else: failed_unexpected.append(name) if failed_expected: log('\n%s/%s expected failures', len(failed_expected), total) print_list(failed_expected) if failed_unexpected: log('\n%s/%s unexpected failures', len(failed_unexpected), total) print_list(failed_unexpected) else: log('\n%s tests passed%s', total, took) if exit: if failed_unexpected: sys.exit(min(100, len(failed_unexpected))) if passed_unexpected: sys.exit(101) if total <= 0: sys.exit('No tests found.')
def log_call(result, time, function, *args): log(format_call(function, args)) log_fresult(result, time)
from gevent import monkey import os import re import greentest import unittest import socket from time import time import traceback import gevent.socket as gevent_socket from greentest.util import log from greentest import six from greentest.six import xrange resolver = gevent.get_hub().resolver log('Resolver: %s', resolver) if getattr(resolver, 'pool', None) is not None: resolver.pool.size = 1 from greentest.sysinfo import RESOLVER_NOT_SYSTEM from greentest.sysinfo import RESOLVER_DNSPYTHON from greentest.sysinfo import PY2 import greentest.timing assert gevent_socket.gaierror is socket.gaierror assert gevent_socket.error is socket.error DEBUG = os.getenv('GEVENT_DEBUG', '') == 'trace'