def test_build_driver(self): output = OutputCapture() port = TestWebKitPort() # Delay setting _executive to avoid logging during construction port._executive = MockExecutive(should_log=True) port._options = MockOptions(configuration="Release") # This should not be necessary, but I think TestWebKitPort is actually reading from disk (and thus detects the current configuration). expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n" self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs='')) # Make sure when passed --webkit-test-runner we build the right tool. port._options = MockOptions(webkit_test_runner=True, configuration="Release") expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\nMOCK run_command: ['Tools/Scripts/build-webkittestrunner', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n" self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs='')) # Make sure we show the build log when --verbose is passed, which we simulate by setting the logging level to DEBUG. output.set_log_level(logging.DEBUG) port._options = MockOptions(configuration="Release") expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n" expected_logs = "Output of ['Tools/Scripts/build-dumprendertree', '--release']:\nMOCK output of child process\n" self.assertTrue(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=expected_logs)) output.set_log_level(logging.INFO) # Make sure that failure to build returns False. port._executive = MockExecutive(should_log=True, should_throw=True) # Because WK2 currently has to build both webkittestrunner and DRT, if DRT fails, that's the only one it tries. expected_stderr = "MOCK run_command: ['Tools/Scripts/build-dumprendertree', '--release'], cwd=/mock-checkout, env={'LC_ALL': 'C', 'MOCK_ENVIRON_COPY': '1'}\n" expected_logs = "MOCK ScriptError\n\nMOCK output of child process\n" self.assertFalse(output.assert_outputs(self, port._build_driver, expected_stderr=expected_stderr, expected_logs=expected_logs))
def test_paths(self): self.fs.chdir('/foo/bar') self.check_names(['baz_unittest.py'], ['bar.baz_unittest']) self.check_names(['./baz_unittest.py'], ['bar.baz_unittest']) self.check_names(['/foo/bar/baz_unittest.py'], ['bar.baz_unittest']) self.check_names(['.'], ['bar.baz_unittest']) self.check_names(['../../foo2/bar2'], ['bar2.baz2_integrationtest']) self.fs.chdir('/') self.check_names(['bar'], ['bar.baz_unittest']) self.check_names(['/foo/bar/'], ['bar.baz_unittest']) # This works 'by accident' since it maps onto a package. self.check_names(['bar/'], ['bar.baz_unittest']) # This should log an error, since it's outside the trees. oc = OutputCapture() oc.set_log_level(logging.ERROR) oc.capture_output() try: self.check_names(['/tmp/another_unittest.py'], []) finally: _, _, logs = oc.restore_output() self.assertIn('another_unittest.py', logs) # Paths that don't exist are errors. oc.capture_output() try: self.check_names(['/foo/bar/notexist_unittest.py'], []) finally: _, _, logs = oc.restore_output() self.assertIn('notexist_unittest.py', logs) # Names that don't exist are caught later, at load time. self.check_names(['bar.notexist_unittest'], ['bar.notexist_unittest'])
class OutputCaptureTest(unittest.TestCase): def setUp(self): self.output = OutputCapture() def log_all_levels(self): _log.info('INFO') _log.warning('WARN') _log.error('ERROR') _log.critical('CRITICAL') def assertLogged(self, expected_logs): actual_stdout, actual_stderr, actual_logs = self.output.restore_output( ) self.assertEqual('', actual_stdout) self.assertEqual('', actual_stderr) self.assertMultiLineEqual(expected_logs, actual_logs) def test_initial_log_level(self): self.output.capture_output() self.log_all_levels() self.assertLogged('INFO\nWARN\nERROR\nCRITICAL\n') def test_set_log_level(self): self.output.set_log_level(logging.ERROR) self.output.capture_output() self.log_all_levels() self.output.set_log_level(logging.WARN) self.log_all_levels() self.assertLogged('ERROR\nCRITICAL\nWARN\nERROR\nCRITICAL\n')
class OutputCaptureTest(unittest.TestCase): def setUp(self): self.output = OutputCapture() def log_all_levels(self): _log.info('INFO') _log.warning('WARN') _log.error('ERROR') _log.critical('CRITICAL') def assertLogged(self, expected_logs): actual_stdout, actual_stderr, actual_logs = self.output.restore_output() self.assertEqual('', actual_stdout) self.assertEqual('', actual_stderr) self.assertEqual(expected_logs, actual_logs) def test_initial_log_level(self): self.output.capture_output() self.log_all_levels() self.assertLogged('INFO\nWARN\nERROR\nCRITICAL\n') def test_set_log_level(self): self.output.set_log_level(logging.ERROR) self.output.capture_output() self.log_all_levels() self.output.set_log_level(logging.WARN) self.log_all_levels() self.assertLogged('ERROR\nCRITICAL\nWARN\nERROR\nCRITICAL\n')