def test_reftest_diff_image(self): """A write_test_result should call port.diff_image with tolerance=0 in case of FailureReftestMismatch.""" used_tolerance_values = [] class ImageDiffTestPort(TestPort): def diff_image(self, expected_contents, actual_contents, tolerance=None): used_tolerance_values.append(tolerance) return (True, 1, None) host = MockHost() port = ImageDiffTestPort(host) test_name = 'failures/unexpected/reftest.html' test_reference_file = host.filesystem.join( port.layout_tests_dir(), 'failures/unexpected/reftest-expected.html') driver_output1 = DriverOutput('text1', 'image1', 'imagehash1', 'audio1') driver_output2 = DriverOutput('text2', 'image2', 'imagehash2', 'audio2') failures = [test_failures.FailureReftestMismatch(test_reference_file)] test_result_writer.write_test_result(host.filesystem, ImageDiffTestPort(host), port.results_directory(), test_name, driver_output1, driver_output2, failures) self.assertEqual([0], used_tolerance_values)
def test_parse_output_with_ignored_stderr(self): output = DriverOutput(":Time -> [1080, 1120, 1095, 1101, 1104] ms", image=None, image_hash=None, audio=None, error=""" Jan 22 14:09:24 WebKitTestRunner[1296] <Error>: CGContextSetFillColorWithColor: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable. Jan 22 14:09:24 WebKitTestRunner[1296] <Error>: CGContextSetStrokeColorWithColor: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable. Jan 22 14:09:24 WebKitTestRunner[1296] <Error>: CGContextGetCompositeOperation: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable. Jan 22 14:09:24 WebKitTestRunner[1296] <Error>: CGContextSetCompositeOperation: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable. Jan 22 14:09:24 WebKitTestRunner[1296] <Error>: CGContextFillRects: invalid context 0x0. If you want to see the backtrace, please set CG_CONTEXT_SHOW_BACKTRACE environmental variable. """) class MockPortWithSierraName(MockPort): def name(self): return "mac-sierra" with OutputCapture(level=logging.INFO) as captured: test = PerfTest(MockPortWithSierraName(), 'some-test', '/path/some-dir/some-test') self._assert_results_are_correct(test, output) self.assertEqual(captured.stdout.getvalue(), '') self.assertEqual(captured.stderr.getvalue(), '') self.assertEqual( captured.root.log.getvalue(), """RESULT some-test: Time= 1100.0 ms median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms """)
def test_parse_output_with_subtests(self): output = DriverOutput(""" Running 20 times some test: [1, 2, 3, 4, 5] other test = else: [6, 7, 8, 9, 10] Ignoring warm-up run (1115) Time: values 1080, 1120, 1095, 1101, 1104 ms avg 1100 ms median 1101 ms stdev 14.50862 ms min 1080 ms max 1120 ms """, image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') self._assert_results_are_correct(test, output) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output( ) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, '')
def test_parse_output_with_failing_line(self): output = DriverOutput(""" Running 20 times Ignoring warm-up run (1115) some-unrecognizable-line Time: values 1080, 1120, 1095, 1101, 1104 ms avg 1100 ms median 1101 ms stdev 14.50862 ms min 1080 ms max 1120 ms """, image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') test.run_single = lambda driver, path, time_out_ms: output self.assertFalse(test._run_with_driver(None, None)) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output( ) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
def mock_run_signle(drive, path, timeout): counter[0] += 1 return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=counter[0], measurements={})
def run_single(driver, path, time_out_ms): called[0] += 1 return DriverOutput(""" Description: this is a test description. :Time -> [1080, 1120, 1095, 1101, 1104] ms """, image=None, image_hash=None, audio=None)
def test_parse_output_with_description(self): output = DriverOutput(""" Description: this is a test description. :Time -> [1080, 1120, 1095, 1101, 1104] ms """, image=None, image_hash=None, audio=None) test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') self._assert_results_are_correct(test, output) self.assertEqual(test.description(), 'this is a test description.')
def _expected_driver_output(self): return DriverOutput( self._port.expected_text( self._test_name, device_type=self._driver.host.device_type), self._port.expected_image( self._test_name, device_type=self._driver.host.device_type), self._port.expected_checksum( self._test_name, device_type=self._driver.host.device_type), self._port.expected_audio( self._test_name, device_type=self._driver.host.device_type))
def run_test(test_input, stop_when_done): self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content') return DriverOutput('actual text', 'actual image', 'actual checksum', audio=None, crash=False, timeout=False, error=False)
def test_parse_output_with_subtests(self): output = DriverOutput(""" Description: this is a test description. some test:Time -> [1, 2, 3, 4, 5] ms some other test = else:Time -> [6, 7, 8, 9, 10] ms some other test = else:Malloc -> [11, 12, 13, 14, 15] bytes Array Construction, []:Time -> [11, 12, 13, 14, 15] ms Concat String:Time -> [15163, 15304, 15386, 15608, 15622] ms jQuery - addClass:Time -> [2785, 2815, 2826, 2841, 2861] ms Dojo - div:only-child:Time -> [7825, 7910, 7950, 7958, 7970] ms Dojo - div:nth-child(2n+1):Time -> [3620, 3623, 3633, 3641, 3658] ms Dojo - div > div:Time -> [10158, 10172, 10180, 10183, 10231] ms Dojo - div ~ div:Time -> [6673, 6675, 6714, 6848, 6902] ms :Time -> [1080, 1120, 1095, 1101, 1104] ms """, image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test') test.run_single = lambda driver, path, time_out_ms: output self.assertTrue(test.run(10)) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() subtests = test._metrics self.assertEqual(map(lambda test: test['name'], subtests), ['some test', 'some other test = else', 'Array Construction, []', 'Concat String', 'jQuery - addClass', 'Dojo - div:only-child', 'Dojo - div:nth-child(2n+1)', 'Dojo - div > div', 'Dojo - div ~ div', None]) some_test_metrics = subtests[0]['metrics'] self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time']) self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'some test']) self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4) some_other_test_metrics = subtests[1]['metrics'] self.assertEqual(map(lambda metric: metric.name(), some_other_test_metrics), ['Time', 'Malloc']) self.assertEqual(some_other_test_metrics[0].path(), ['some-dir', 'some-test', 'some other test = else']) self.assertEqual(some_other_test_metrics[0].flattened_iteration_values(), [6, 7, 8, 9, 10] * 4) self.assertEqual(some_other_test_metrics[1].path(), ['some-dir', 'some-test', 'some other test = else']) self.assertEqual(some_other_test_metrics[1].flattened_iteration_values(), [11, 12, 13, 14, 15] * 4) main_metrics = subtests[len(subtests) - 1]['metrics'] self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time']) self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test']) self.assertEqual(main_metrics[0].flattened_iteration_values(), [1080, 1120, 1095, 1101, 1104] * 4) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, """DESCRIPTION: this is a test description. RESULT some-dir: some-test: Time= 1100.0 ms median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms """)
def _assert_failed_on_line(self, output_text, expected_log): output = DriverOutput(output_text, image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') test.run_single = lambda driver, path, time_out_ms: output self.assertFalse(test._run_with_driver(None, None)) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, expected_log)
def _assert_failed_on_line(self, output_text, expected_log): output = DriverOutput(output_text, image=None, image_hash=None, audio=None) with OutputCapture(level=logging.INFO) as captured: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') test.run_single = lambda driver, path, time_out_ms: output self.assertFalse(test._run_with_driver(None, None)) self.assertEqual(captured.stdout.getvalue(), '') self.assertEqual(captured.stderr.getvalue(), '') self.assertEqual(captured.root.log.getvalue(), expected_log)
def test_parse_output_with_subtests_and_total(self): output = DriverOutput(""" :Time:Total -> [2324, 2328, 2345, 2314, 2312] ms EmberJS-TodoMVC:Time:Total -> [1462, 1473, 1490, 1465, 1458] ms EmberJS-TodoMVC/a:Time -> [1, 2, 3, 4, 5] ms BackboneJS-TodoMVC:Time -> [862, 855, 855, 849, 854] ms """, image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test') test.run_single = lambda driver, path, time_out_ms: output self.assertTrue(test.run(10)) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() subtests = test._metrics self.assertEqual(map(lambda test: test['name'], subtests), [None, 'EmberJS-TodoMVC', 'EmberJS-TodoMVC/a', 'BackboneJS-TodoMVC']) main_metrics = subtests[0]['metrics'] self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time']) self.assertEqual(main_metrics[0].aggregator(), 'Total') self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test']) self.assertEqual(main_metrics[0].flattened_iteration_values(), [2324, 2328, 2345, 2314, 2312] * 4) some_test_metrics = subtests[1]['metrics'] self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time']) self.assertEqual(some_test_metrics[0].aggregator(), 'Total') self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'EmberJS-TodoMVC']) self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1462, 1473, 1490, 1465, 1458] * 4) some_test_metrics = subtests[2]['metrics'] self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time']) self.assertEqual(some_test_metrics[0].aggregator(), None) self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'EmberJS-TodoMVC', 'a']) self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4) some_test_metrics = subtests[3]['metrics'] self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time']) self.assertEqual(some_test_metrics[0].aggregator(), None) self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'BackboneJS-TodoMVC']) self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [862, 855, 855, 849, 854] * 4) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, """RESULT some-dir: some-test: Time= 2324.6 ms median= 2324.0 ms, stdev= 12.1326007105 ms, min= 2312.0 ms, max= 2345.0 ms """)
def test_parse_output(self): output = DriverOutput(""" :Time -> [1080, 1120, 1095, 1101, 1104] ms """, image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') self._assert_results_are_correct(test, output) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output() self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, """RESULT some-test: Time= 1100.0 ms median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms """)
def run_single(driver, path, time_out_ms): called[0] += 1 return DriverOutput(""" Running 20 times Ignoring warm-up run (1115) Time: values 1080, 1120, 1095, 1101, 1104 ms avg 1100 ms median 1101 ms stdev 14.50862 ms min 1080 ms max 1120 ms""", image=None, image_hash=None, audio=None)
def run_test(test_input, stop_when_done): if test_input.test_name == test.force_gc_test: loaded_pages.append(test_input) return if test_input.test_name != "about:blank": self.assertEqual(test_input.test_name, 'http://some-test/') loaded_pages.append(test_input) self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content') return DriverOutput('actual text', 'actual image', 'actual checksum', audio=None, crash=False, timeout=False, error=False, test_time=12345)
def test_ignored_stderr_lines(self): test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') output_with_lines_to_ignore = DriverOutput('', image=None, image_hash=None, audio=None, error=""" Unknown option: --foo-bar Should not be ignored [WARNING:proxy_service.cc] bad moon a-rising [WARNING:chrome.cc] Something went wrong [INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/ [ERROR:main.cc] The sky has fallen""") test._filter_output(output_with_lines_to_ignore) self.assertEqual( output_with_lines_to_ignore.error, "Should not be ignored\n" "[WARNING:chrome.cc] Something went wrong\n" "[ERROR:main.cc] The sky has fallen")
def test_parse_output(self): output = DriverOutput(""" :Time -> [1080, 1120, 1095, 1101, 1104] ms """, image=None, image_hash=None, audio=None) with OutputCapture(level=logging.INFO) as captured: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') self._assert_results_are_correct(test, output) self.assertEqual(captured.stdout.getvalue(), '') self.assertEqual(captured.stderr.getvalue(), '') self.assertEqual( captured.root.log.getvalue(), """RESULT some-test: Time= 1100.0 ms median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms """)
def output_for_test(self, test_input, is_reftest): port = self._port actual_text = port.expected_text(test_input.test_name) actual_audio = port.expected_audio(test_input.test_name) actual_image = None actual_checksum = None if is_reftest: # Make up some output for reftests. actual_text = 'reference text\n' actual_checksum = 'mock-checksum' actual_image = 'blank' if test_input.test_name.endswith('-mismatch.html'): actual_text = 'not reference text\n' actual_checksum = 'not-mock-checksum' actual_image = 'not blank' elif self._options.pixel_tests and test_input.image_hash: actual_checksum = port.expected_checksum(test_input.test_name) actual_image = port.expected_image(test_input.test_name) return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
def test_parse_output_with_description(self): output = DriverOutput(""" Description: this is a test description. Running 20 times Ignoring warm-up run (1115) Time: values 1080, 1120, 1095, 1101, 1104 ms avg 1100 ms median 1101 ms stdev 14.50862 ms min 1080 ms max 1120 ms""", image=None, image_hash=None, audio=None) test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') self._assert_results_are_correct(test, output) self.assertEqual(test.description(), 'this is a test description.')
def run_test(self, driver_input, stop_when_done): text = '' timeout = False crash = False if driver_input.test_name.endswith('pass.html'): text = SomeParserTestData.text elif driver_input.test_name.endswith('timeout.html'): timeout = True elif driver_input.test_name.endswith('failed.html'): text = None elif driver_input.test_name.endswith('tonguey.html'): text = 'we are not expecting an output from perf tests but RESULT blablabla' elif driver_input.test_name.endswith('crash.html'): crash = True elif driver_input.test_name.endswith('event-target-wrapper.html'): text = EventTargetWrapperTestData.text elif driver_input.test_name.endswith('some-parser.html'): text = SomeParserTestData.text elif driver_input.test_name.endswith('memory-test.html'): text = MemoryTestData.text return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
def _expected_driver_output(self): return DriverOutput(self._port.expected_text(self._test_name), self._port.expected_image(self._test_name), self._port.expected_checksum(self._test_name), self._port.expected_audio(self._test_name))