def RunTests(): """Run the functional tests and any embedded doctests""" import entry_test import fdt_test import ftest import test import doctest result = unittest.TestResult() for module in []: suite = doctest.DocTestSuite(module) suite.run(result) sys.argv = [sys.argv[0]] # Run the entry tests first ,since these need to be the first to import the # 'entry' module. suite = unittest.TestLoader().loadTestsFromTestCase(entry_test.TestEntry) suite.run(result) for module in (ftest.TestFunctional, fdt_test.TestFdt): suite = unittest.TestLoader().loadTestsFromTestCase(module) suite.run(result) print result for test, err in result.errors: print test.id(), err for test, err in result.failures: print err, result.failures if result.errors or result.failures: print 'binman tests FAILED' return 1 return 0
def RunTests(debug, processes, args): """Run the functional tests and any embedded doctests Args: debug: True to enable debugging, which shows a full stack trace on error args: List of positional args provided to binman. This can hold a test name to execute (as in 'binman -t testSections', for example) processes: Number of processes to use to run tests (None=same as #CPUs) """ import elf_test import entry_test import fdt_test import ftest import image_test import test import doctest result = unittest.TestResult() for module in []: suite = doctest.DocTestSuite(module) suite.run(result) sys.argv = [sys.argv[0]] if debug: sys.argv.append('-D') if debug: sys.argv.append('-D') # Run the entry tests first ,since these need to be the first to import the # 'entry' module. test_name = args and args[0] or None suite = unittest.TestSuite() loader = unittest.TestLoader() for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage): if test_name: try: suite.addTests(loader.loadTestsFromName(test_name, module)) except AttributeError: continue else: suite.addTests(loader.loadTestsFromTestCase(module)) if use_concurrent and processes != 1: concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(processes or multiprocessing.cpu_count())) concurrent_suite.run(result) else: suite.run(result) print result for test, err in result.errors: print test.id(), err for test, err in result.failures: print err, result.failures if result.errors or result.failures: print 'binman tests FAILED' return 1 return 0
def RunTests(debug, args): """Run the functional tests and any embedded doctests Args: debug: True to enable debugging, which shows a full stack trace on error args: List of positional args provided to binman. This can hold a test name to execute (as in 'binman -t testSections', for example) """ import elf_test import entry_test import fdt_test import ftest import image_test import test import doctest result = unittest.TestResult() for module in []: suite = doctest.DocTestSuite(module) suite.run(result) sys.argv = [sys.argv[0]] if debug: sys.argv.append('-D') # Run the entry tests first ,since these need to be the first to import the # 'entry' module. suite = unittest.TestLoader().loadTestsFromTestCase(entry_test.TestEntry) suite.run(result) test_name = args and args[0] or None for module in (ftest.TestFunctional, fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage): if test_name: try: suite = unittest.TestLoader().loadTestsFromName( args[0], module) except AttributeError: continue else: suite = unittest.TestLoader().loadTestsFromTestCase(module) suite.run(result) print result for test, err in result.errors: print test.id(), err for test, err in result.failures: print err, result.failures if result.errors or result.failures: print 'binman tests FAILED' return 1 return 0
def RunTests(debug, args): """Run the functional tests and any embedded doctests Args: debug: True to enable debugging, which shows a full stack trace on error args: List of positional args provided to binman. This can hold a test name to execute (as in 'binman -t testSections', for example) """ import elf_test import entry_test import fdt_test import ftest import image_test import test import doctest result = unittest.TestResult() for module in []: suite = doctest.DocTestSuite(module) suite.run(result) sys.argv = [sys.argv[0]] if debug: sys.argv.append('-D') # Run the entry tests first ,since these need to be the first to import the # 'entry' module. suite = unittest.TestLoader().loadTestsFromTestCase(entry_test.TestEntry) suite.run(result) test_name = args and args[0] or None for module in (ftest.TestFunctional, fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage): if test_name: try: suite = unittest.TestLoader().loadTestsFromName(args[0], module) except AttributeError: continue else: suite = unittest.TestLoader().loadTestsFromTestCase(module) suite.run(result) print result for test, err in result.errors: print test.id(), err for test, err in result.failures: print err, result.failures if result.errors or result.failures: print 'binman tests FAILED' return 1 return 0
def RunTests(): """Run the functional tests and any embedded doctests""" import entry_test import fdt_test import func_test import test import doctest result = unittest.TestResult() for module in []: suite = doctest.DocTestSuite(module) suite.run(result) sys.argv = [sys.argv[0]] for module in (func_test.TestFunctional, fdt_test.TestFdt, entry_test.TestEntry): suite = unittest.TestLoader().loadTestsFromTestCase(module) suite.run(result) print result for test, err in result.errors: print test.id(), err for test, err in result.failures: print err
def runtests(tests, args): for t in tests: tid = test.id(t) if not tid: continue try: targs = list(args) if test.run(t, tid, targs): print("OK : "+test.info(t)+" ["+tid+"]"); else: print("FAIL : "+test.info(t)+" ["+tid+"]"); except: print("EXCEPTION while running test with ID: "+tid); raise return 1 return 0
def _run_test(args, test_class, exclude, output_filters): # When logging to a file we don't have stdout's test delimiters to correlate # logs with the test that generated them. if args.logging_path: stem.util.log.notice('Beginning test %s' % test_class) start_time = time.time() # Test classes look like... # # test.unit.util.conf.TestConf.test_parse_enum_csv # # We want to strip the 'test.unit.' or 'test.integ.' prefix since it's # redundant. We also want to drop the test class name. The individual test # name at the end it optional (only present if we used the '--test' # argument). label_comp = test_class.split('.')[2:] del label_comp[-1 if label_comp[-1][0].isupper() else -2] test_label = ' %-52s' % ('.'.join(label_comp) + '...') if args.verbose: test.output.print_divider(test_class) else: println(test_label, STATUS, NO_NL) try: suite = unittest.TestLoader().loadTestsFromName(test_class) except AttributeError: if args.specific_test: # should only come up if user provided '--test' for something that doesn't exist println(' no such test', ERROR) return None else: raise except Exception as exc: println(' failed', ERROR) traceback.print_exc(exc) return None # check if we should skip any individual tests within this module if exclude: cropped_name = test.arguments.crop_module_name(test_class) cropped_name = cropped_name.rsplit('.', 1)[0] # exclude the class name for prefix in exclude: if prefix.startswith(cropped_name): test_name = prefix.split('.')[-1] suite._tests = list( filter(lambda test: test.id().split('.')[-1] != test_name, suite._tests)) test_results = io.StringIO() run_result = stem.util.test_tools.TimedTestRunner(test_results, verbosity=2).run(suite) if args.verbose: println( test.output.apply_filters(test_results.getvalue(), *output_filters)) elif not run_result.failures and not run_result.errors: println(' success (%0.2fs)' % (time.time() - start_time), SUCCESS) else: if args.quiet: println(test_label, STATUS, NO_NL, STDERR) println(' failed (%0.2fs)' % (time.time() - start_time), ERROR, STDERR) println( test.output.apply_filters(test_results.getvalue(), *output_filters), STDERR) else: println(' failed (%0.2fs)' % (time.time() - start_time), ERROR) println( test.output.apply_filters(test_results.getvalue(), *output_filters), NO_NL) if args.logging_path: stem.util.log.notice('Finished test %s' % test_class) return run_result
def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath): """Run the functional tests and any embedded doctests Args: debug: True to enable debugging, which shows a full stack trace on error verbosity: Verbosity level to use test_preserve_dirs: True to preserve the input directory used by tests so that it can be examined afterwards (only useful for debugging tests). If a single test is selected (in args[0]) it also preserves the output directory for this test. Both directories are displayed on the command line. processes: Number of processes to use to run tests (None=same as #CPUs) args: List of positional args provided to binman. This can hold a test name to execute (as in 'binman test testSections', for example) toolpath: List of paths to use for tools """ import cbfs_util_test import elf_test import entry_test import fdt_test import ftest import image_test import test import doctest result = unittest.TestResult() for module in []: suite = doctest.DocTestSuite(module) suite.run(result) sys.argv = [sys.argv[0]] if debug: sys.argv.append('-D') if verbosity: sys.argv.append('-v%d' % verbosity) if toolpath: for path in toolpath: sys.argv += ['--toolpath', path] # Run the entry tests first ,since these need to be the first to import the # 'entry' module. test_name = args and args[0] or None suite = unittest.TestSuite() loader = unittest.TestLoader() for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage, cbfs_util_test.TestCbfs): # Test the test module about our arguments, if it is interested if hasattr(module, 'setup_test_args'): setup_test_args = getattr(module, 'setup_test_args') setup_test_args(preserve_indir=test_preserve_dirs, preserve_outdirs=test_preserve_dirs and test_name is not None, toolpath=toolpath, verbosity=verbosity) if test_name: try: suite.addTests(loader.loadTestsFromName(test_name, module)) except AttributeError: continue else: suite.addTests(loader.loadTestsFromTestCase(module)) if use_concurrent and processes != 1: concurrent_suite = ConcurrentTestSuite( suite, fork_for_tests(processes or multiprocessing.cpu_count())) concurrent_suite.run(result) else: suite.run(result) # Remove errors which just indicate a missing test. Since Python v3.5 If an # ImportError or AttributeError occurs while traversing name then a # synthetic test that raises that error when run will be returned. These # errors are included in the errors accumulated by result.errors. if test_name: errors = [] for test, err in result.errors: if ("has no attribute '%s'" % test_name) not in err: errors.append((test, err)) result.testsRun -= 1 result.errors = errors print(result) for test, err in result.errors: print(test.id(), err) for test, err in result.failures: print(err, result.failures) if result.skipped: print('%d binman test%s SKIPPED:' % (len(result.skipped), 's' if len(result.skipped) > 1 else '')) for skip_info in result.skipped: print('%s: %s' % (skip_info[0], skip_info[1])) if result.errors or result.failures: print('binman tests FAILED') return 1 return 0
if not pid: # child executes tests runner = test.CustomTestRunner(cfg, None) suite = unittest.TestSuite() suite.addTests(test_cases) os._exit(not runner.run(suite).wasSuccessful()) cid, retval = os.waitpid(pid, 0) if retval: write("exit status: %d, signal: %d", retval >> 8, retval % 0xFF) if (retval % 0xFF) > 2: # signal received? return self.FAIL return self.PASS def coerce(self, test_cases): if not test_cases: return "[]" test_cases = [item[-1] for item in test_cases] return "[%s .. %s]" % (test_cases[0].id(), test_cases[-1].id()) def dd_tests(): tests = find_tests() write("Found %d tests", len(tests)) dd = DDTester() min_tests = dd.ddmin(list(enumerate(tests))) return [item[-1] for item in min_tests] if __name__ == "__main__": write("Failing tests:\n%s", "\n".join([test.id() for test in dd_tests()]))
if not pid: # child executes tests runner = test.CustomTestRunner(cfg, None) suite = unittest.TestSuite() suite.addTests(test_cases) os._exit(not runner.run(suite).wasSuccessful()) cid, retval = os.waitpid(pid, 0) if retval: write('exit status: %d, signal: %d', retval >> 8, retval % 0xFF) if (retval % 0xFF) > 2: # signal received? return self.FAIL return self.PASS def coerce(self, test_cases): if not test_cases: return '[]' test_cases = [item[-1] for item in test_cases] return '[%s .. %s]' % (test_cases[0].id(), test_cases[-1].id()) def dd_tests(): tests = find_tests() write('Found %d tests', len(tests)) dd = DDTester() min_tests = dd.ddmin(list(enumerate(tests))) return [item[-1] for item in min_tests] if __name__ == '__main__': write('Failing tests:\n%s', '\n'.join([test.id() for test in dd_tests()]))
test_cases = [ item[-1] for item in test_cases ] pid = os.fork() if not pid: # child executes tests runner = test.CustomTestRunner(cfg, None) suite = unittest.TestSuite() suite.addTests(test_cases) os._exit( not runner.run(suite).wasSuccessful() ) cid, retval = os.waitpid(pid, 0) if retval: write('exit status: %d, signal: %d', retval >> 8, retval % 0xFF) if (retval % 0xFF) > 2: # signal received? return self.FAIL return self.PASS def coerce(self, test_cases): if not test_cases: return '[]' test_cases = [ item[-1] for item in test_cases ] return '[%s .. %s]' % (test_cases[0].id(), test_cases[-1].id()) def dd_tests(): tests = find_tests() write('Found %d tests', len(tests)) dd = DDTester() min_tests = dd.ddmin( list(enumerate(tests)) ) return [ item[-1] for item in min_tests ] if __name__ == '__main__': write('Failing tests:\n%s', '\n'.join([test.id() for test in dd_tests()]))