def test_trivial(self): log = [] result = LoggingResult(log) class Sample(TestCase): def __hash__(self): return id(self) def test_method1(self): pass def test_method2(self): pass test1 = Sample('test_method1') test2 = Sample('test_method2') original_suite = unittest.TestSuite([test1, test2]) suite = ConcurrentTestSuite(original_suite, self.split_suite) suite.run(result) test1 = log[0][1] test2 = log[-1][1] self.assertIsInstance(test1, Sample) self.assertIsInstance(test2, Sample) self.assertNotEqual(test1.id(), test2.id()) # We expect the start/outcome/stop to be grouped expected = [('startTest', test1), ('addSuccess', test1), ('stopTest', test1), ('startTest', test2), ('addSuccess', test2), ('stopTest', test2)] self.assertThat(log, Equals(expected))
def test_trivial(self): log = [] result = LoggingResult(log) class Sample(TestCase): def __hash__(self): return id(self) def test_method1(self): pass def test_method2(self): pass test1 = Sample('test_method1') test2 = Sample('test_method2') original_suite = unittest.TestSuite([test1, test2]) suite = ConcurrentTestSuite(original_suite, self.split_suite) suite.run(result) # 0 is the timestamp for the first test starting. test1 = log[1][1] test2 = log[-1][1] self.assertIsInstance(test1, Sample) self.assertIsInstance(test2, Sample) self.assertNotEqual(test1.id(), test2.id())
def main(argv, prepare_args=prepare_argv, find_tests=find_tests): """CLI entry point to adapt a test run to parallel testing.""" child_args = prepare_argv(argv) test_ids = find_tests(argv) # We could create a proxy object per test id if desired in future) def parallelise_tests(suite): test_ids = list(suite)[0]._test_ids count = concurrency() partitions = partition_tests(test_ids, count) return [ ListTestCase(partition, child_args) for partition in partitions ] suite = ConcurrentTestSuite(ListTestCase(test_ids, None), parallelise_tests) if '--subunit' in argv: runner = SubunitTestRunner(sys.stdout) result = runner.run(suite) else: stream = unicode_output_stream(sys.stdout) result = TextTestResult(stream) result.startTestRun() try: suite.run(result) finally: result.stopTestRun() if result.wasSuccessful(): return 0 return -1
def main(argv, prepare_args=prepare_argv, find_tests=find_tests): """CLI entry point to adapt a test run to parallel testing.""" child_args = prepare_argv(argv) test_ids = find_tests(argv) # We could create a proxy object per test id if desired in future) def parallelise_tests(suite): test_ids = list(suite)[0]._test_ids count = concurrency() partitions = partition_tests(test_ids, count) return [ListTestCase(partition, child_args) for partition in partitions] suite = ConcurrentTestSuite(ListTestCase(test_ids, None), parallelise_tests) if '--subunit' in argv: runner = SubunitTestRunner(sys.stdout) result = runner.run(suite) else: stream = unicode_output_stream(sys.stdout) result = TextTestResult(stream) result.startTestRun() try: suite.run(result) finally: result.stopTestRun() if result.wasSuccessful(): return 0 return -1
def test_broken_test(self): log = [] def on_test(test, status, start_time, stop_time, tags, details): log.append((test.id(), status, set(details.keys()))) class BrokenTest(object): # Simple break - no result parameter to run() def __call__(self): pass run = __call__ original_suite = unittest.TestSuite([BrokenTest()]) suite = ConcurrentTestSuite(original_suite, self.split_suite) suite.run(TestByTestResult(on_test)) self.assertEqual([('broken-runner', 'error', set(['traceback']))], log)
def test_trivial(self): log = [] result = LoggingResult(log) test1 = Sample('test_method1') test2 = Sample('test_method2') original_suite = unittest.TestSuite([test1, test2]) suite = ConcurrentTestSuite(original_suite, self.split_suite) suite.run(result) # log[0] is the timestamp for the first test starting. test1 = log[1][1] test2 = log[-1][1] self.assertIsInstance(test1, Sample) self.assertIsInstance(test2, Sample) self.assertNotEqual(test1.id(), test2.id())
def test_wrap_result(self): # ConcurrentTestSuite has a hook for wrapping the per-thread result. wrap_log = [] def wrap_result(thread_safe_result, thread_number): wrap_log.append((thread_safe_result.result.decorated, thread_number)) return thread_safe_result result_log = [] result = LoggingResult(result_log) test1 = Sample("test_method1") test2 = Sample("test_method2") original_suite = unittest.TestSuite([test1, test2]) suite = ConcurrentTestSuite(original_suite, self.split_suite, wrap_result=wrap_result) suite.run(result) self.assertEqual([(result, 0), (result, 1)], wrap_log) # Smoke test to make sure everything ran OK. self.assertNotEqual([], result_log)
def test_wrap_result(self): # ConcurrentTestSuite has a hook for wrapping the per-thread result. wrap_log = [] def wrap_result(thread_safe_result, thread_number): wrap_log.append( (thread_safe_result.result.decorated, thread_number)) return thread_safe_result result_log = [] result = LoggingResult(result_log) test1 = Sample('test_method1') test2 = Sample('test_method2') original_suite = unittest.TestSuite([test1, test2]) suite = ConcurrentTestSuite( original_suite, self.split_suite, wrap_result=wrap_result) suite.run(result) self.assertEqual( [(result, 0), (result, 1), ], wrap_log) # Smoke test to make sure everything ran OK. self.assertNotEqual([], result_log)
discovered_tests = discover_tests( explicitly_enumerated_tests, "--all-but" in sys.argv ) if "--list" in sys.argv: test_list = tests_from_suite(discovered_tests) print("\n".join(sorted(test_list))) print("{0} tests found".format(len(test_list))) sys.exit() tests_to_run = discovered_tests if run_concurrently: # replace the partitioning function with our own concurrencytest.partition_tests = partition_tests tests_to_run = ConcurrentTestSuite( discovered_tests, # the number doesn't matter, we do our own partitioning which ignores it concurrencytest.fork_for_tests(1) ) use_improved_result_class = ( sys.stdout.isatty() and sys.stderr.isatty() and "--vanilla" not in sys.argv ) resultclass = unittest.TextTestResult if use_improved_result_class: from pcs.test.tools.color_text_runner import get_text_test_result_class
def main(): # pylint: disable=import-outside-toplevel if "BUNDLED_LIB_LOCATION" in os.environ: sys.path.insert(0, os.environ["BUNDLED_LIB_LOCATION"]) if "--installed" in sys.argv: sys.path.append(PACKAGE_DIR) from pcs import settings if settings.pcs_bundled_packages_dir not in sys.path: sys.path.insert(0, settings.pcs_bundled_packages_dir) from pcs_test.tools import pcs_runner pcs_runner.test_installed = True else: sys.path.insert(0, PACKAGE_DIR) from pcs import settings settings.pcs_data_dir = os.path.join(PACKAGE_DIR, "data") run_concurrently = can_concurrency and "--no-parallel" not in sys.argv explicitly_enumerated_tests = [ prepare_test_name(arg) for arg in sys.argv[1:] if arg not in ( "-v", "--all-but", "--fast-info", # show a traceback immediatelly after the test fails "--last-slash", "--list", "--no-parallel", "--traceback-highlight", "--traditional-verbose", "--vanilla", "--installed", "--tier0", "--tier1", ) ] tier = None if "--tier0" in sys.argv: tier = 0 elif "--tier1" in sys.argv: tier = 1 discovered_tests = discover_tests(explicitly_enumerated_tests, "--all-but" in sys.argv, tier=tier) if "--list" in sys.argv: test_list = tests_from_suite(discovered_tests) print("\n".join(sorted(test_list))) print("{0} tests found".format(len(test_list))) sys.exit() tests_to_run = discovered_tests tier1_fixtures_cleanup = None if tier1_fixtures_needed(tests_to_run): tier1_fixtures_cleanup = run_tier1_fixtures( run_concurrently=run_concurrently) if run_concurrently: tests_to_run = ConcurrentTestSuite( discovered_tests, concurrencytest.fork_for_tests(), ) use_improved_result_class = (sys.stdout.isatty() and sys.stderr.isatty() and "--vanilla" not in sys.argv) ResultClass = unittest.TextTestResult if use_improved_result_class: from pcs_test.tools.color_text_runner import get_text_test_result_class ResultClass = get_text_test_result_class( slash_last_fail_in_overview=("--last-slash" in sys.argv), traditional_verbose=( "--traditional-verbose" in sys.argv or # temporary workaround - our verbose writer is not compatible with # running tests in parallel, use our traditional writer (run_concurrently and "-v" in sys.argv)), traceback_highlight=("--traceback-highlight" in sys.argv), fast_info=("--fast-info" in sys.argv), ) test_runner = unittest.TextTestRunner( verbosity=2 if "-v" in sys.argv else 1, resultclass=ResultClass) test_result = test_runner.run(tests_to_run) if tier1_fixtures_cleanup: tier1_fixtures_cleanup() if not test_result.wasSuccessful(): sys.exit(1)
if __name__ == '__main__': import time class SampleTestCase(unittest.TestCase): """Dummy tests that sleep for demo.""" def test_me_1(self): time.sleep(0.5) def test_me_2(self): time.sleep(0.5) def test_me_3(self): time.sleep(0.5) def test_me_4(self): time.sleep(0.5) # Load tests from SampleTestCase defined above suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase) runner = unittest.TextTestRunner() # Run tests sequentially runner.run(suite) # Run same tests across 4 processes suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase) concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(4)) runner.run(concurrent_suite)
elif "--tier1" in sys.argv: tier = 1 discovered_tests = discover_tests(explicitly_enumerated_tests, "--all-but" in sys.argv, tier=tier) if "--list" in sys.argv: test_list = tests_from_suite(discovered_tests) print("\n".join(sorted(test_list))) print("{0} tests found".format(len(test_list))) sys.exit() tests_to_run = discovered_tests if run_concurrently: tests_to_run = ConcurrentTestSuite( discovered_tests, concurrencytest.fork_for_tests(), ) use_improved_result_class = (sys.stdout.isatty() and sys.stderr.isatty() and "--vanilla" not in sys.argv) resultclass = unittest.TextTestResult if use_improved_result_class: from pcs_test.tools.color_text_runner import get_text_test_result_class resultclass = get_text_test_result_class( slash_last_fail_in_overview=("--last-slash" in sys.argv), traditional_verbose=( "--traditional-verbose" in sys.argv or # temporary workaround - our verbose writer is not compatible with # running tests in parallel, use our traditional writer