def test_sql_reporter_sets_discovery_failure_flag(self): runner = TestRunner(self.broken_import_module, test_reporters=[self.reporter]) runner.run() conn = self.reporter.conn (build,) = list(conn.execute(Builds.select())) assert_equal(build['discovery_failure'], True)
def test_text_test_logger_prints_discovery_failure_message(self): runner = TestRunner( 'does.not.exist', test_reporters=[TextTestLogger(self.options, stream=self.stream)], ) runner.run() logger_output = self.stream.getvalue() assert_in('DISCOVERY FAILURE!', logger_output)
def test_text_test_logger_prints_discovery_failure_message(self): runner = TestRunner( self.broken_import_module, test_reporters=[TextTestLogger(self.options, stream=self.stream)], ) runner.run() logger_output = self.stream.getvalue() assert_in('Discovery failure!', logger_output)
def test_http_reporter_completed_test_case(self): runner = TestRunner( DummyTestCase, test_reporters=[HTTPReporter(None, self.connect_addr, 'runner1')]) runner.run() (test_method_result, test_case_result) = self.results_reported assert_equal(test_case_result['method']['name'], 'run')
def test_sql_reporter_sets_discovery_failure_flag(self): runner = TestRunner(self.broken_import_module, test_reporters=[self.reporter]) runner.run() conn = self.reporter.conn (build,) = list(conn.execute(self.reporter.Builds.select())) assert_equal(build['discovery_failure'], True) assert_equal(build['method_count'], 0)
def test_http_reporter_class_teardown_exception(self): runner = TestRunner( ExceptionInClassFixtureSampleTests.FakeClassTeardownTestCase, test_reporters=[HTTPReporter(None, self.connect_addr, 'runner1')], ) runner.run() (test1_method_result, test2_method_result, class_teardown_result, test_case_result) = self.results_reported assert_equal(test_case_result['method']['name'], 'run')
def test_http_reporter_class_teardown_exception(self): runner = TestRunner( ExceptionInClassFixtureSampleTests.FakeClassTeardownTestCase, test_reporters=[HTTPReporter(None, self.connect_addr, 'runner1')]) runner.run() (test1_method_result, test2_method_result, class_teardown_result, test_case_result) = self.results_reported assert_equal(test_case_result['method']['name'], 'run')
def test_http_reporter_reports(self): """A simple test to make sure the HTTPReporter actually reports things.""" runner = TestRunner(DummyTestCase, test_reporters=[HTTPReporter(None, self.connect_addr, "runner1")]) runner.run() (only_result,) = self.results_reported assert_equal(only_result["runner_id"], "runner1") assert_equal(only_result["method"]["class"], "DummyTestCase") assert_equal(only_result["method"]["name"], "test")
def test_http_reporter_reports(self): """A simple test to make sure the HTTPReporter actually reports things.""" runner = TestRunner(DummyTestCase, test_reporters=[HTTPReporter(None, self.connect_addr, 'runner1')]) runner.run() (only_result,) = self.results_reported assert_equal(only_result['runner_id'], 'runner1') assert_equal(only_result['method']['class'], 'DummyTestCase') assert_equal(only_result['method']['name'], 'test')
def test_http_reporter_tries_twice(self): self.status_codes.put(409) self.status_codes.put(409) runner = TestRunner(DummyTestCase, test_reporters=[HTTPReporter(None, self.connect_addr, 'tries_twice')]) runner.run() (first, second, test_case_result) = self.results_reported assert_equal(first['runner_id'], 'tries_twice') assert_equal(first, second)
def test_http_reporter_reports(self): """A simple test to make sure the HTTPReporter actually reports things.""" runner = TestRunner( DummyTestCase, test_reporters=[HTTPReporter(None, self.connect_addr, 'runner1')]) runner.run() (only_result, ) = self.results_reported assert_equal(only_result['runner_id'], 'runner1') assert_equal(only_result['method']['class'], 'DummyTestCase') assert_equal(only_result['method']['name'], 'test')
def _run_test_case(self, test_case): self.logger = TextTestLogger(self.options, stream=self.stream) runner = TestRunner( test_case, test_reporters=[self.logger], ) runner_result = runner.run() assert_equal(runner_result, exit.TESTS_FAILED)
def test_teardown(self): runner = TestRunner(TestReporterExceptionInClassFixtureSampleTests.FakeClassTeardownTestCase, test_reporters=[self.reporter]) runner.run() conn = self.reporter.conn test_results = self._get_test_results(conn) assert_equal(len(test_results), 3) class_teardown_result = test_results[-1] assert_equal( class_teardown_result['failure'], True, 'Unexpected success for %s.%s' % (class_teardown_result['class_name'], class_teardown_result['method_name']) ) failure = conn.execute(Failures.select()).fetchone() assert_in('in class_teardown_raises_exception', failure.traceback)
def test_teardown(self): runner = TestRunner(ExceptionInClassFixtureSampleTests.FakeClassTeardownTestCase, test_reporters=[self.reporter]) runner.run() conn = self.reporter.conn test_results = self._get_test_results(conn) assert_equal(len(test_results), 3) # Errors in class_teardown methods manifest as an additional test # result. class_teardown_result = test_results[-1] assert_equal( class_teardown_result['failure'], True, 'Unexpected success for %s.%s' % (class_teardown_result['class_name'], class_teardown_result['method_name']) ) failure = conn.execute(self.reporter.Failures.select()).fetchone() assert_in('in class_teardown_raises_exception', failure.traceback)
def test_setup(self): runner = TestRunner( ExceptionInClassFixtureSampleTests.FakeClassSetupTestCase, test_reporters=[self.reporter]) runner.run() conn = self.reporter.conn test_results = self._get_test_results(conn) assert_equal(len(test_results), 2) # Errors in class_setup methods manifest as errors in the test case's # test methods. for result in test_results: assert_equal( result['failure'], True, 'Unexpected success for %s.%s' % (result['class_name'], result['method_name'])) failures = conn.execute(self.reporter.Failures.select()).fetchall() for failure in failures: assert_in('in class_setup_raises_exception', failure.traceback)
def test_setup(self): runner = TestRunner(ExceptionInClassFixtureSampleTests.FakeClassSetupTestCase, test_reporters=[self.reporter]) runner.run() conn = self.reporter.conn test_results = self._get_test_results(conn) assert_equal(len(test_results), 2) # Errors in class_setup methods manifest as errors in the test case's # test methods. for result in test_results: assert_equal( result['failure'], True, 'Unexpected success for %s.%s' % (result['class_name'], result['method_name']) ) failures = conn.execute(Failures.select()).fetchall() for failure in failures: assert_in('in class_setup_raises_exception', failure.traceback)
def test_integration(self): """Run a runner with self.reporter as a test reporter, and verify a bunch of stuff.""" runner = TestRunner(DummyTestCase, test_reporters=[self.reporter]) conn = self.reporter.conn # We're creating a new in-memory database in make_reporter, so we don't need to worry about rows from previous tests. (build, ) = list(conn.execute(Builds.select())) assert_equal(build['buildname'], 'a_build_name') assert_equal(build['branch'], 'a_branch_name') assert_equal(build['revision'], 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeef') # Method count should be None until we discover (which is part of running) assert_equal(build['method_count'], None) # End time should be None until we run. assert_equal(build['end_time'], None) assert runner.run() # Now that we've run the tests, get the build row again and check to see that things are updated. (updated_build, ) = list(conn.execute(Builds.select())) for key in updated_build.keys(): if key not in ('end_time', 'run_time', 'method_count'): assert_equal(build[key], updated_build[key]) assert_gt(updated_build['run_time'], 0) assert_in_range(updated_build['end_time'], 0, time.time()) assert_equal(updated_build['method_count'], 2) # The discovery_failure column should exist and be False. assert 'discovery_failure' in build assert_equal(build['discovery_failure'], False) # Check that we have one failure and one pass, and that they're the right tests. test_results = list( conn.execute( SA.select(columns=TestResults.columns + Tests.columns, from_obj=TestResults.join( Tests, TestResults.c.test == Tests.c.id)))) assert_equal(len(test_results), 2) (passed_test, ) = [r for r in test_results if not r['failure']] (failed_test, ) = [r for r in test_results if r['failure']] assert_equal(passed_test['method_name'], 'test_pass') assert_equal(failed_test['method_name'], 'test_fail')
def test_integration(self): """Run a runner with self.reporter as a test reporter, and verify a bunch of stuff.""" runner = TestRunner(DummyTestCase, test_reporters=[self.reporter]) conn = self.reporter.conn # We're creating a new in-memory database in make_reporter, so we don't need to worry about rows from previous tests. (build,) = list(conn.execute(Builds.select())) assert_equal(build['buildname'], 'a_build_name') assert_equal(build['branch'], 'a_branch_name') assert_equal(build['revision'], 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeef') # Method count should be None until we discover (which is part of running) assert_equal(build['method_count'], None) # End time should be None until we run. assert_equal(build['end_time'], None) assert runner.run() # Now that we've run the tests, get the build row again and check to see that things are updated. (updated_build,) = list(conn.execute(Builds.select())) for key in updated_build.keys(): if key not in ('end_time', 'run_time', 'method_count'): assert_equal(build[key], updated_build[key]) assert_gt(updated_build['run_time'], 0) assert_in_range(updated_build['end_time'], 0, time.time()) assert_equal(updated_build['method_count'], 2) # The discovery_failure column should exist and be False. assert 'discovery_failure' in build assert_equal(build['discovery_failure'], False) # Check that we have one failure and one pass, and that they're the right tests. test_results = list(conn.execute(SA.select( columns=TestResults.columns + Tests.columns, from_obj=TestResults.join(Tests, TestResults.c.test == Tests.c.id) ))) assert_equal(len(test_results), 2) (passed_test,) = [r for r in test_results if not r['failure']] (failed_test,) = [r for r in test_results if r['failure']] assert_equal(passed_test['method_name'], 'test_pass') assert_equal(failed_test['method_name'], 'test_fail')
def __init__(self, command_line_args=None): """Initialize and run the test with the given command_line_args command_line_args will be passed to parser.parse_args """ command_line_args = command_line_args or sys.argv[1:] runner_action, test_path, test_runner_args, other_opts = parse_test_runner_command_line_args(command_line_args) self.setup_logging(other_opts) runner = TestRunner(**test_runner_args) runner.discover(test_path, bucket=other_opts.bucket, bucket_count=other_opts.bucket_count) if runner_action == ACTION_LIST_SUITES: runner.list_suites() sys.exit(0) elif runner_action == ACTION_LIST_TESTS: runner.list_tests() sys.exit(0) elif runner_action == ACTION_RUN_TESTS: result = runner.run() sys.exit(not result)
def test_http_reporter_completed_test_case(self): runner = TestRunner(DummyTestCase, test_reporters=[HTTPReporter(None, self.connect_addr, 'runner1')]) runner.run() (test_method_result, test_case_result) = self.results_reported assert_equal(test_case_result['method']['name'], 'run')
def test_integration(self): """Run a runner with self.reporter as a test reporter, and verify a bunch of stuff.""" runner = TestRunner(DummyTestCase, test_reporters=[self.reporter]) conn = self.reporter.conn # We're creating a new in-memory database in make_reporter, so we don't need to worry about rows from previous tests. (build, ) = list(conn.execute(self.reporter.Builds.select())) assert_equal(build['buildname'], 'a_build_name') assert_equal(build['branch'], 'a_branch_name') assert_equal(build['revision'], 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeef') assert_equal(build['buildbot_run_id'], self.fake_buildbot_run_id) # Method count should be None until we discover (which is part of running) assert_equal(build['method_count'], None) # End time should be None until we run. assert_equal(build['end_time'], None) assert runner.run() # Now that we've run the tests, get the build row again and check to see that things are updated. (updated_build, ) = list(conn.execute(self.reporter.Builds.select())) for key in updated_build.keys(): if key not in ('end_time', 'run_time', 'method_count'): assert_equal(build[key], updated_build[key]) assert_gt(updated_build['run_time'], 0) assert_in_range(updated_build['end_time'], 0, time.time()) assert_equal(updated_build['method_count'], 3) # The discovery_failure column should exist and be False. assert 'discovery_failure' in build assert_equal(build['discovery_failure'], False) # Check test results. test_results = self._get_test_results(conn) assert_equal(len(test_results), 3) # Check that we have one failure and one pass, and that they're the right tests. (passed_test, ) = [r for r in test_results if not r['failure']] (failed_test, failed_test_2) = [r for r in test_results if r['failure']] assert_equal(passed_test['method_name'], 'test_pass') assert_equal(passed_test.traceback, None) assert_equal(passed_test.error, None) assert_equal(failed_test['method_name'], 'test_fail') assert_equal( failed_test.traceback.split('\n'), [ 'Traceback (most recent call last):', RegexMatcher( ' File "(\./)?test/plugins/sql_reporter_test\.py", line \d+, in test_fail' ), ' assert False', 'AssertionError', '' # ends with newline ]) assert_equal(failed_test.error, 'AssertionError') assert_equal(failed_test_2['method_name'], 'test_multiline') assert_equal( failed_test_2.traceback.split('\n'), [ 'Traceback (most recent call last):', RegexMatcher( ' File "(\./)?test/plugins/sql_reporter_test\.py", line \d+, in test_multiline' ), ' 3""")', 'Exception: I love lines:', ' 1', ' 2', ' 3', '' # ends with newline ]) assert_equal( failed_test_2.error, 'Exception: I love lines:\n 1\n 2\n 3')
def test_integration(self): """Run a runner with self.reporter as a test reporter, and verify a bunch of stuff.""" runner = TestRunner(DummyTestCase, test_reporters=[self.reporter]) conn = self.reporter.conn # We're creating a new in-memory database in make_reporter, so we don't need to worry about rows from previous tests. (build,) = list(conn.execute(self.reporter.Builds.select())) assert_equal(build['buildname'], 'a_build_name') assert_equal(build['branch'], 'a_branch_name') assert_equal(build['revision'], 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeef') assert_equal(build['buildbot_run_id'], self.fake_buildbot_run_id) # Method count should be None until we discover (which is part of running) assert_equal(build['method_count'], None) # End time should be None until we run. assert_equal(build['end_time'], None) assert runner.run() # Now that we've run the tests, get the build row again and check to see that things are updated. (updated_build,) = list(conn.execute(self.reporter.Builds.select())) for key in updated_build.keys(): if key not in ('end_time', 'run_time', 'method_count'): assert_equal(build[key], updated_build[key]) assert_gt(updated_build['run_time'], 0) assert_in_range(updated_build['end_time'], 0, time.time()) assert_equal(updated_build['method_count'], 3) # The discovery_failure column should exist and be False. assert 'discovery_failure' in build assert_equal(build['discovery_failure'], False) # Check test results. test_results = self._get_test_results(conn) assert_equal(len(test_results), 3) # Check that we have one failure and one pass, and that they're the right tests. (passed_test,) = [r for r in test_results if not r['failure']] (failed_test, failed_test_2) = [r for r in test_results if r['failure']] assert_equal(passed_test['method_name'], 'test_pass') assert_equal(passed_test.traceback, None) assert_equal(passed_test.error, None) assert_equal(failed_test['method_name'], 'test_fail') assert_equal(failed_test.traceback.split('\n'), [ 'Traceback (most recent call last):', RegexMatcher(' File "\./test/plugins/sql_reporter_test\.py", line \d+, in test_fail'), ' assert False', 'AssertionError', '' # ends with newline ]) assert_equal(failed_test.error, 'AssertionError') assert_equal(failed_test_2['method_name'], 'test_multiline') assert_equal(failed_test_2.traceback.split('\n'), [ 'Traceback (most recent call last):', RegexMatcher(' File "\./test/plugins/sql_reporter_test\.py", line \d+, in test_multiline'), ' 3""")', 'Exception: I love lines:', ' 1', ' 2', ' 3', '' # ends with newline ]) assert_equal(failed_test_2.error, 'Exception: I love lines:\n 1\n 2\n 3')