def run_test(module, **kwds): """Run a unit test module Recognized keyword arguments: incomplete, nosubprocess """ option_incomplete = kwds.get('incomplete', False) option_expected_failures = kwds.get('expected_failures', False) option_nosubprocess = kwds.get('nosubprocess', False) suite = unittest.TestSuite() test_utils.fail_incomplete_tests = option_incomplete test_utils.fail_expected_failures = option_expected_failures m = import_submodule(module) if m.unittest is not unittest: raise ImportError( "%s is not using correct unittest\n\n" % module + "should be: %s\n is using: %s" % (unittest.__file__, m.unittest.__file__) ) print ('loading %s' % module) test = unittest.defaultTestLoader.loadTestsFromName(module) suite.addTest(test) output = StringIO.StringIO() runner = unittest.TextTestRunner(stream=output) results = runner.run(suite) output = StringIOContents(output) num_tests = results.testsRun failures = results.failures errors = results.errors tests = results.tests results = {module:from_namespace(locals(), RESULTS_TEMPLATE)} if not option_nosubprocess: print (TEST_RESULTS_START) print (pformat(results)) print (TEST_RESULTS_END) else: return results
def run_test(module, **kwds): """Run a unit test module Recognized keyword arguments: incomplete, nosubprocess """ option_incomplete = kwds.get("incomplete", False) option_expected_failures = kwds.get("expected_failures", False) option_nosubprocess = kwds.get("nosubprocess", False) suite = unittest.TestSuite() test_utils.fail_incomplete_tests = option_incomplete test_utils.fail_expected_failures = option_expected_failures m = import_submodule(module) if m.unittest is not unittest: raise ImportError( "%s is not using correct unittest\n\n" % module + "should be: %s\n is using: %s" % (unittest.__file__, m.unittest.__file__) ) print("loading %s" % module) test = unittest.defaultTestLoader.loadTestsFromName(module) suite.addTest(test) output = StringIO.StringIO() runner = unittest.TextTestRunner(stream=output) results = runner.run(suite) output = StringIOContents(output) num_tests = results.testsRun failures = results.failures errors = results.errors tests = results.tests results = {module: from_namespace(locals(), RESULTS_TEMPLATE)} if not option_nosubprocess: print(TEST_RESULTS_START) print(pformat(results)) print(TEST_RESULTS_END) else: return results
def run(*args, **kwds): """Run the Pygame unit test suite and return (total tests run, fails dict) Positional arguments (optional): The names of tests to include. If omitted then all tests are run. Test names need not include the trailing '_test'. Keyword arguments: incomplete - fail incomplete tests (default False) nosubprocess - run all test suites in the current process (default False, use separate subprocesses) dump - dump failures/errors as dict ready to eval (default False) file - if provided, the name of a file into which to dump failures/errors timings - if provided, the number of times to run each individual test to get an average run time (default is run each test once) exclude - A list of TAG names to exclude from the run. The items may be comma or space separated. show_output - show silenced stderr/stdout on errors (default False) all - dump all results, not just errors (default False) randomize - randomize order of tests (default False) seed - if provided, a seed randomizer integer multi_thread - if provided, the number of THREADS in which to run subprocessed tests time_out - if subprocess is True then the time limit in seconds before killing a test (default 30) fake - if provided, the name of the fake tests package in the run_tests__tests subpackage to run instead of the normal Pygame tests python - the path to a python executable to run subprocessed tests (default sys.executable) interative - allow tests tagged 'interative'. Return value: A tuple of total number of tests run, dictionary of error information. The dictionary is empty if no errors were recorded. By default individual test classes are run in separate subprocesses. This recreates normal Pygame usage where pygame.init() and pygame.quit() are called only once per program execution, and avoids unfortunate interactions between test classes. Also, a time limit is placed on test execution, so frozen tests are killed when there time allotment expired. Use the single process option if threading is not working properly or if tests are taking too long. It is not guaranteed that all tests will pass in single process mode. Tests are run in a randomized order if the randomize argument is True or a seed argument is provided. If no seed integer is provided then the system time is used. Individual test classes may have a corresponding *_tags.py module, defining a __tags__ attribute, a list of tag strings used to selectively omit classes from a run. By default only the 'interactive', 'ignore', and 'subprocess_ignore' tags are ignored. 'interactive' is for classes that take user input, like cdrom_test.py. 'ignore' and 'subprocess_ignore' for for disabling classes for foreground and subprocess modes respectively. These are for disabling tests on optional classes or for experimental classes with known problems. These classes can be run from the console as a Python program. This function can only be called once per Python session. It is not reentrant. """ global was_run if was_run: raise RuntimeError("run() was already called this session") was_run = True options = kwds.copy() option_nosubprocess = options.get('nosubprocess', False) option_dump = options.pop('dump', False) option_file = options.pop('file', None) option_randomize = options.get('randomize', False) option_seed = options.get('seed', None) option_multi_thread = options.pop('multi_thread', 1) option_time_out = options.pop('time_out', 120) option_fake = options.pop('fake', None) option_python = options.pop('python', sys.executable) option_exclude = options.pop('exclude', ()) option_interactive = options.pop('interactive', False) if not option_interactive and 'interactive' not in option_exclude: option_exclude += ('interactive',) if not option_nosubprocess and 'subprocess_ignore' not in option_exclude: option_exclude += ('subprocess_ignore',) elif 'ignore' not in option_exclude: option_exclude += ('ignore',) if sys.version_info < (3, 0, 0): option_exclude += ('python2_ignore',) else: option_exclude += ('python3_ignore',) main_dir, test_subdir, fake_test_subdir = prepare_test_env() ########################################################################### # Compile a list of test classes. If fake, then compile list of fake # xxxx_test.py from run_tests__tests TEST_MODULE_RE = re.compile('^(.+_test)\.py$') test_mods_pkg_name = test_pkg_name working_dir_temp = tempfile.mkdtemp() if option_fake is not None: test_mods_pkg_name = '.'.join([test_mods_pkg_name, 'run_tests__tests', option_fake]) test_subdir = os.path.join(fake_test_subdir, option_fake) working_dir = test_subdir else: working_dir = working_dir_temp # Added in because some machines will need os.environ else there will be # false failures in subprocess mode. Same issue as python2.6. Needs some # env vars. test_env = os.environ fmt1 = '%s.%%s' % test_mods_pkg_name fmt2 = '%s.%%s_test' % test_mods_pkg_name if args: test_modules = [ m.endswith('_test') and (fmt1 % m) or (fmt2 % m) for m in args ] else: test_modules = [] for f in sorted(os.listdir(test_subdir)): for match in TEST_MODULE_RE.findall(f): test_modules.append(fmt1 % (match,)) ########################################################################### # Remove classes to be excluded. tmp = test_modules test_modules = [] for name in tmp: tag_module_name = "%s_tags" % (name[0:-5],) try: tag_module = import_submodule(tag_module_name) except ImportError: test_modules.append(name) else: try: tags = tag_module.__tags__ except AttributeError: print ("%s has no tags: ignoring" % (tag_module_name,)) test_modules.append(name) else: for tag in tags: if tag in option_exclude: print ("skipping %s (tag '%s')" % (name, tag)) break else: test_modules.append(name) del tmp, tag_module_name, name ########################################################################### # Meta results results = {} meta_results = {'__meta__' : {}} meta = meta_results['__meta__'] ########################################################################### # Randomization if option_randomize or option_seed is not None: if option_seed is None: option_seed = time.time() meta['random_seed'] = option_seed print ("\nRANDOM SEED USED: %s\n" % option_seed) random.seed(option_seed) random.shuffle(test_modules) ########################################################################### # Single process mode if option_nosubprocess: options['exclude'] = option_exclude t = time.time() for module in test_modules: results.update(run_test(module, **options)) t = time.time() - t ########################################################################### # Subprocess mode # else: if is_pygame_pkg: from pygame.tests.test_utils.async_sub import proc_in_time_or_kill else: from test.test_utils.async_sub import proc_in_time_or_kill pass_on_args = ['--exclude', ','.join(option_exclude)] for field in ['randomize', 'incomplete', 'unbuffered']: if kwds.get(field, False): pass_on_args.append('--'+field) def sub_test(module): print ('loading %s' % module) cmd = [option_python, '-m', test_runner_mod, module] + pass_on_args return (module, (cmd, test_env, working_dir), proc_in_time_or_kill(cmd, option_time_out, env=test_env, wd=working_dir)) if option_multi_thread > 1: def tmap(f, args): return pygame.threads.tmap ( f, args, stop_on_error = False, num_workers = option_multi_thread ) else: tmap = map t = time.time() for module, cmd, (return_code, raw_return) in tmap(sub_test, test_modules): test_file = '%s.py' % os.path.join(test_subdir, module) cmd, test_env, working_dir = cmd test_results = get_test_results(raw_return) if test_results: results.update(test_results) else: results[module] = {} results[module].update(dict( return_code=return_code, raw_return=raw_return, cmd=cmd, test_file=test_file, test_env=test_env, working_dir=working_dir, module=module, )) t = time.time() - t ########################################################################### # Output Results # untrusty_total, combined = combine_results(results, t) total, n_errors, n_failures = count_results(results) meta['total_tests'] = total meta['combined'] = combined meta['total_errors'] = n_errors meta['total_failures'] = n_failures results.update(meta_results) if option_nosubprocess: assert total == untrusty_total if not option_dump: print (combined) else: print (TEST_RESULTS_START) print (pformat(results)) if option_file is not None: results_file = open(option_file, 'w') try: results_file.write(pformat(results)) finally: results_file.close() shutil.rmtree(working_dir_temp) return total, n_errors + n_failures
def run(*args, **kwds): """Run the Pygame unit test suite and return (total tests run, fails dict) Positional arguments (optional): The names of tests to include. If omitted then all tests are run. Test names need not include the trailing '_test'. Keyword arguments: incomplete - fail incomplete tests (default False) nosubprocess - run all test suites in the current process (default False, use separate subprocesses) dump - dump failures/errors as dict ready to eval (default False) file - if provided, the name of a file into which to dump failures/errors timings - if provided, the number of times to run each individual test to get an average run time (default is run each test once) exclude - A list of TAG names to exclude from the run. The items may be comma or space separated. show_output - show silenced stderr/stdout on errors (default False) all - dump all results, not just errors (default False) randomize - randomize order of tests (default False) seed - if provided, a seed randomizer integer multi_thread - if provided, the number of THREADS in which to run subprocessed tests time_out - if subprocess is True then the time limit in seconds before killing a test (default 30) fake - if provided, the name of the fake tests package in the run_tests__tests subpackage to run instead of the normal Pygame tests python - the path to a python executable to run subprocessed tests (default sys.executable) interative - allow tests tagged 'interative'. Return value: A tuple of total number of tests run, dictionary of error information. The dictionary is empty if no errors were recorded. By default individual test modules are run in separate subprocesses. This recreates normal Pygame usage where pygame.init() and pygame.quit() are called only once per program execution, and avoids unfortunate interactions between test modules. Also, a time limit is placed on test execution, so frozen tests are killed when there time allotment expired. Use the single process option if threading is not working properly or if tests are taking too long. It is not guaranteed that all tests will pass in single process mode. Tests are run in a randomized order if the randomize argument is True or a seed argument is provided. If no seed integer is provided then the system time is used. Individual test modules may have a corresponding *_tags.py module, defining a __tags__ attribute, a list of tag strings used to selectively omit modules from a run. By default only the 'interactive', 'ignore', and 'subprocess_ignore' tags are ignored. 'interactive' is for modules that take user input, like cdrom_test.py. 'ignore' and 'subprocess_ignore' for for disabling modules for foreground and subprocess modes respectively. These are for disabling tests on optional modules or for experimental modules with known problems. These modules can be run from the console as a Python program. This function can only be called once per Python session. It is not reentrant. """ global was_run if was_run: raise RuntimeError("run() was already called this session") was_run = True options = kwds.copy() option_nosubprocess = options.get('nosubprocess', False) option_dump = options.pop('dump', False) option_file = options.pop('file', None) option_all = options.pop('all', False) option_randomize = options.get('randomize', False) option_seed = options.get('seed', None) option_multi_thread = options.pop('multi_thread', 1) option_time_out = options.pop('time_out', 120) option_fake = options.pop('fake', None) option_python = options.pop('python', sys.executable) option_exclude = options.pop('exclude', ()) option_interactive = options.pop('interactive', False) if not option_interactive and 'interactive' not in option_exclude: option_exclude += ('interactive',) if not option_nosubprocess and 'subprocess_ignore' not in option_exclude: option_exclude += ('subprocess_ignore',) elif 'ignore' not in option_exclude: option_exclude += ('ignore',) if sys.version_info < (3, 0, 0): option_exclude += ('python2_ignore',) else: option_exclude += ('python3_ignore',) main_dir, test_subdir, fake_test_subdir = prepare_test_env() test_runner_py = os.path.join(test_subdir, "test_utils", "test_runner.py") cur_working_dir = os.path.abspath(os.getcwd()) ########################################################################### # Compile a list of test modules. If fake, then compile list of fake # xxxx_test.py from run_tests__tests TEST_MODULE_RE = re.compile('^(.+_test)\.py$') test_mods_pkg_name = test_pkg_name if option_fake is not None: test_mods_pkg_name = '.'.join([test_mods_pkg_name, 'run_tests__tests', option_fake]) test_subdir = os.path.join(fake_test_subdir, option_fake) working_dir = test_subdir else: working_dir = main_dir # Added in because some machines will need os.environ else there will be # false failures in subprocess mode. Same issue as python2.6. Needs some # env vars. test_env = os.environ fmt1 = '%s.%%s' % test_mods_pkg_name fmt2 = '%s.%%s_test' % test_mods_pkg_name if args: test_modules = [ m.endswith('_test') and (fmt1 % m) or (fmt2 % m) for m in args ] else: test_modules = [] for f in sorted(os.listdir(test_subdir)): for match in TEST_MODULE_RE.findall(f): test_modules.append(fmt1 % (match,)) ########################################################################### # Remove modules to be excluded. tmp = test_modules test_modules = [] for name in tmp: tag_module_name = "%s_tags" % (name[0:-5],) try: tag_module = import_submodule(tag_module_name) except ImportError: test_modules.append(name) else: try: tags = tag_module.__tags__ except AttributeError: print ("%s has no tags: ignoring" % (tag_module_name,)) test_module.append(name) else: for tag in tags: if tag in option_exclude: print ("skipping %s (tag '%s')" % (name, tag)) break else: test_modules.append(name) del tmp, tag_module_name, name ########################################################################### # Meta results results = {} meta_results = {'__meta__' : {}} meta = meta_results['__meta__'] ########################################################################### # Randomization if option_randomize or option_seed is not None: if option_seed is None: option_seed = time.time() meta['random_seed'] = option_seed print ("\nRANDOM SEED USED: %s\n" % option_seed) random.seed(option_seed) random.shuffle(test_modules) ########################################################################### # Single process mode if option_nosubprocess: unittest_patch.patch(**options) options['exclude'] = option_exclude t = time.time() for module in test_modules: results.update(run_test(module, **options)) t = time.time() - t ########################################################################### # Subprocess mode # if not option_nosubprocess: if is_pygame_pkg: from pygame.tests.test_utils.async_sub import proc_in_time_or_kill else: from test.test_utils.async_sub import proc_in_time_or_kill pass_on_args = ['--exclude', ','.join(option_exclude)] for option in ['timings', 'seed']: value = options.pop(option, None) if value is not None: pass_on_args.append('--%s' % option) pass_on_args.append(str(value)) for option, value in options.items(): option = option.replace('_', '-') if value: pass_on_args.append('--%s' % option) def sub_test(module): print ('loading %s' % module) cmd = [option_python, test_runner_py, module ] + pass_on_args return (module, (cmd, test_env, working_dir), proc_in_time_or_kill(cmd, option_time_out, env=test_env, wd=working_dir)) if option_multi_thread > 1: def tmap(f, args): return pygame.threads.tmap ( f, args, stop_on_error = False, num_workers = option_multi_thread ) else: tmap = map t = time.time() for module, cmd, (return_code, raw_return) in tmap(sub_test, test_modules): test_file = '%s.py' % os.path.join(test_subdir, module) cmd, test_env, working_dir = cmd test_results = get_test_results(raw_return) if test_results: results.update(test_results) else: results[module] = {} add_to_results = [ 'return_code', 'raw_return', 'cmd', 'test_file', 'test_env', 'working_dir', 'module', ] results[module].update(from_namespace(locals(), add_to_results)) t = time.time() - t ########################################################################### # Output Results # untrusty_total, combined = combine_results(results, t) total, fails = test_failures(results) meta['total_tests'] = total meta['combined'] = combined results.update(meta_results) if option_nosubprocess: assert total == untrusty_total if not option_dump: print (combined) else: results = option_all and results or fails print (TEST_RESULTS_START) print (pformat(results)) if option_file is not None: results_file = open(option_file, 'w') try: results_file.write(pformat(results)) finally: results_file.close() return total, fails
def get_parent_module(self, class_): if class_ not in self.parent_modules: self.parent_modules[class_] = import_submodule(class_.__module__) return self.parent_modules[class_]