def _run_reftest(self): test_output = self._driver.run_test(self._driver_input(), self._stop_when_done) total_test_time = test_output.test_time expected_output = None test_result = None expected_text = self._port.expected_text(self._test_name) expected_text_output = DriverOutput(text=expected_text, image=None, image_hash=None, audio=None) # If the test crashed, or timed out, there's no point in running the reference at all. # This can save a lot of execution time if we have a lot of crashes or timeouts. if test_output.crash or test_output.timeout: test_result = self._compare_output(expected_text_output, test_output) if test_output.crash: test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, expected_text_output, test_result.failures) return test_result # A reftest can have multiple match references and multiple mismatch references; # the test fails if any mismatch matches and all of the matches don't match. # To minimize the number of references we have to check, we run all of the mismatches first, # then the matches, and short-circuit out as soon as we can. # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do. putAllMismatchBeforeMatch = sorted reference_test_names = [] for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files): if self._port.lookup_virtual_test_base(self._test_name): args = self._port.lookup_virtual_reference_args(self._test_name) else: args = self._port.lookup_physical_reference_args(self._test_name) reference_test_name = self._port.relative_test_filename(reference_filename) reference_test_names.append(reference_test_name) driver_input = DriverInput(reference_test_name, self._timeout, image_hash=test_output.image_hash, should_run_pixel_test=True, args=args) expected_output = self._reference_driver.run_test(driver_input, self._stop_when_done) total_test_time += expected_output.test_time test_result = self._compare_output_with_reference( expected_output, test_output, reference_filename, expectation == '!=') if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures): break assert(expected_output) if expected_text: text_output = DriverOutput(text=test_output.text, image=None, image_hash=None, audio=None) test_result.failures.extend(self._compare_output(expected_text_output, text_output).failures) expected_output.text = expected_text_output.text test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, expected_output, test_result.failures) # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type # and only really handle the first of the references in the result. reftest_type = list(set([reference_file[0] for reference_file in self._reference_files])) return TestResult(self._test_name, test_result.failures, total_test_time, test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid, references=reference_test_names)
def run_test(self, input): value = self._values[self._index] self._index += 1 if isinstance(value, str): return DriverOutput('some output', image=None, image_hash=None, audio=None, error=value) else: return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=self._values[self._index - 1])
def test_reftest_diff_image(self): """A write_test_result should call port.diff_image with tolerance=0 in case of FailureReftestMismatch.""" used_tolerance_values = [] class ImageDiffTestPort(TestPort): def diff_image(self, expected_contents, actual_contents, tolerance=None): used_tolerance_values.append(tolerance) return (True, 1) host = MockHost() port = ImageDiffTestPort(host) test_name = 'failures/unexpected/reftest.html' test_reference_file = host.filesystem.join( port.layout_tests_dir(), 'failures/unexpected/reftest-expected.html') driver_output1 = DriverOutput('text1', 'image1', 'imagehash1', 'audio1') driver_output2 = DriverOutput('text2', 'image2', 'imagehash2', 'audio2') failures = [test_failures.FailureReftestMismatch(test_reference_file)] test_result_writer.write_test_result(host.filesystem, ImageDiffTestPort(host), test_name, driver_output1, driver_output2, failures) self.assertEqual([0], used_tolerance_values)
def run_test(self, failures=None, files=None): failures = failures or [] host = MockSystemHost() host.filesystem.files = files or {} port = TestPort(host=host, port_name='test-mac-mac10.11', options=optparse.Values()) actual_output = DriverOutput(text='', image=None, image_hash=None, audio=None) expected_output = DriverOutput(text='', image=None, image_hash=None, audio=None) write_test_result(host.filesystem, port, '/tmp', 'foo.html', actual_output, expected_output, failures) return host.filesystem.written_files
def run_test(self, input, stop_when_done): if input.test_name == self._test.force_gc_test: return value = self._values[self._index] self._index += 1 if isinstance(value, str): return DriverOutput('some output', image=None, image_hash=None, audio=None, error=value) else: return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=self._values[self._index - 1], measurements=self._measurements)
def run_test(test_input): if test_input.test_name != "about:blank": self.assertEqual(test_input.test_name, 'http://some-test/') loaded_pages.append(test_input) self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content') return DriverOutput('actual text', 'actual image', 'actual checksum', audio=None, crash=False, timeout=False, error=False)
def test_parse_output_with_subtests(self): output = DriverOutput(""" Running 20 times some test: [1, 2, 3, 4, 5] other test = else: [6, 7, 8, 9, 10] Ignoring warm-up run (1115) Time: values 1080, 1120, 1095, 1101, 1104 ms avg 1100 ms median 1101 ms stdev 14.50862 ms min 1080 ms max 1120 ms """, image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') self._assert_results_are_correct(test, output) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output( ) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, '')
def test_parse_output_with_failing_line(self): output = DriverOutput(""" Running 20 times Ignoring warm-up run (1115) some-unrecognizable-line Time: values 1080, 1120, 1095, 1101, 1104 ms avg 1100 ms median 1101 ms stdev 14.50862 ms min 1080 ms max 1120 ms """, image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') test.run_single = lambda driver, path, time_out_ms: output self.assertFalse(test._run_with_driver(None, None)) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output( ) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
def test_parse_output(self): output = DriverOutput('\n'.join([ 'Running 20 times', 'Ignoring warm-up run (1115)', '', 'avg 1100', 'median 1101', 'stdev 11', 'min 1080', 'max 1120' ]), image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest('some-test', '/path/some-dir/some-test') self.assertEqual( test.parse_output(output), { 'some-test': { 'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms' } }) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output( ) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual( actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n' )
def mock_run_signle(drive, path, timeout): counter[0] += 1 return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=counter[0], measurements={})
def run_test(test_input, stop_when_done): self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content') return DriverOutput('actual text', 'actual image', 'actual checksum', audio=None, crash=False, timeout=False, error=False)
def run_test(self, driver_input): text = '' timeout = False crash = False if driver_input.test_name.endswith('pass.html'): text = 'RESULT group_name: test_name= 42 ms' elif driver_input.test_name.endswith('timeout.html'): timeout = True elif driver_input.test_name.endswith('failed.html'): text = None elif driver_input.test_name.endswith('tonguey.html'): text = 'we are not expecting an output from perf tests but RESULT blablabla' elif driver_input.test_name.endswith('crash.html'): crash = True elif driver_input.test_name.endswith('event-target-wrapper.html'): text = """Running 20 times Ignoring warm-up run (1502) 1504 1505 1510 1504 1507 1509 1510 1487 1488 1472 1472 1488 1473 1472 1475 1487 1486 1486 1475 1471 avg 1489.05 median 1487 stdev 14.46 min 1471 max 1510 """ elif driver_input.test_name.endswith('some-parser.html'): text = """Running 20 times Ignoring warm-up run (1115) avg 1100 median 1101 stdev 11 min 1080 max 1120 """ return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
def run_single(driver, path, time_out_ms): called[0] += 1 return DriverOutput(""" Running 20 times Ignoring warm-up run (1115) Time: values 1080, 1120, 1095, 1101, 1104 ms avg 1100 ms median 1101 ms stdev 14.50862 ms min 1080 ms max 1120 ms""", image=None, image_hash=None, audio=None)
def test_ignored_stderr_lines(self): test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') output_with_lines_to_ignore = DriverOutput('', image=None, image_hash=None, audio=None, error=""" Unknown option: --foo-bar Should not be ignored [WARNING:proxy_service.cc] bad moon a-rising [WARNING:chrome.cc] Something went wrong [INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/ [ERROR:main.cc] The sky has fallen""") test._filter_output(output_with_lines_to_ignore) self.assertEqual(output_with_lines_to_ignore.error, "Should not be ignored\n" "[WARNING:chrome.cc] Something went wrong\n" "[ERROR:main.cc] The sky has fallen")
def run_test(self, driver_input): text = '' timeout = False crash = False if driver_input.test_name == 'pass.html': text = 'RESULT group_name: test_name= 42 ms' elif driver_input.test_name == 'timeout.html': timeout = True elif driver_input.test_name == 'failed.html': text = None elif driver_input.test_name == 'tonguey.html': text = 'we are not expecting an output from perf tests but RESULT blablabla' elif driver_input.test_name == 'crash.html': crash = True return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
def test_parse_output_with_description(self): output = DriverOutput(""" Description: this is a test description. Running 20 times Ignoring warm-up run (1115) Time: values 1080, 1120, 1095, 1101, 1104 ms avg 1100 ms median 1101 ms stdev 14.50862 ms min 1080 ms max 1120 ms""", image=None, image_hash=None, audio=None) test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') self._assert_results_are_correct(test, output) self.assertEqual(test.description(), 'this is a test description.')
def output_for_test(self, test_input, is_reftest): port = self._port if self._options.virtual_test_suite_name: test_input.test_name = test_input.test_name.replace( self._options.virtual_test_suite_base, self._options.virtual_test_suite_name) actual_text = port.expected_text(test_input.test_name) actual_audio = port.expected_audio(test_input.test_name) actual_image = None actual_checksum = None if is_reftest: # Make up some output for reftests. actual_text = 'reference text\n' actual_checksum = 'mock-checksum' actual_image = 'blank' if test_input.test_name.endswith('-mismatch.html'): actual_text = 'not reference text\n' actual_checksum = 'not-mock-checksum' actual_image = 'not blank' elif test_input.should_run_pixel_test and test_input.image_hash: actual_checksum = port.expected_checksum(test_input.test_name) actual_image = port.expected_image(test_input.test_name) if self._options.actual_directory: actual_path = port.host.filesystem.join( self._options.actual_directory, test_input.test_name) root, _ = port.host.filesystem.splitext(actual_path) text_path = root + '-actual.txt' if port.host.filesystem.exists(text_path): actual_text = port.host.filesystem.read_binary_file(text_path) audio_path = root + '-actual.wav' if port.host.filesystem.exists(audio_path): actual_audio = port.host.filesystem.read_binary_file( audio_path) image_path = root + '-actual.png' if port.host.filesystem.exists(image_path): actual_image = port.host.filesystem.read_binary_file( image_path) with port.host.filesystem.open_binary_file_for_reading( image_path) as filehandle: actual_checksum = read_checksum_from_png.read_checksum( filehandle) return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
def test_parse_output_with_failing_line(self): output = DriverOutput('\n'.join([ 'Running 20 times', 'Ignoring warm-up run (1115)', '', 'some-unrecognizable-line', '', 'avg 1100', 'median 1101', 'stdev 11', 'min 1080', 'max 1120' ]), image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest('some-test', '/path/some-dir/some-test') self.assertEqual(test.parse_output(output), None) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output( ) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
def output_for_test(self, test_input, is_reftest): port = self._port actual_text = port.expected_text(test_input.test_name) actual_audio = port.expected_audio(test_input.test_name) actual_image = None actual_checksum = None if is_reftest: # Make up some output for reftests. actual_text = 'reference text\n' actual_checksum = 'mock-checksum' actual_image = 'blank' if test_input.test_name.endswith('-mismatch.html'): actual_text = 'not reference text\n' actual_checksum = 'not-mock-checksum' actual_image = 'not blank' elif self._options.pixel_tests and test_input.image_hash: actual_checksum = port.expected_checksum(test_input.test_name) actual_image = port.expected_image(test_input.test_name) return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
def run_test(self, driver_input, stop_when_done): text = '' timeout = False crash = False if driver_input.test_name.endswith('pass.html'): text = InspectorPassTestData.text elif driver_input.test_name.endswith('timeout.html'): timeout = True elif driver_input.test_name.endswith('failed.html'): text = None elif driver_input.test_name.endswith('tonguey.html'): text = 'we are not expecting an output from perf tests but RESULT blablabla' elif driver_input.test_name.endswith('crash.html'): crash = True elif driver_input.test_name.endswith('event-target-wrapper.html'): text = EventTargetWrapperTestData.text elif driver_input.test_name.endswith('some-parser.html'): text = SomeParserTestData.text elif driver_input.test_name.endswith('memory-test.html'): text = MemoryTestData.text return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
def test_parse_output_with_subtests(self): output = DriverOutput('\n'.join([ 'Running 20 times', 'some test: [1, 2, 3, 4, 5]', 'other test = else: [6, 7, 8, 9, 10]', '', 'Time:', 'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms', 'avg 1100 ms', 'median 1101 ms', 'stdev 11 ms', 'min 1080 ms', 'max 1120 ms' ]), image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') self.assertEqual( test.parse_output(output), { 'some-test': { 'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms', 'values': [i for i in range(1, 20)] } }) finally: pass actual_stdout, actual_stderr, actual_logs = output_capture.restore_output( ) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual( actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n' )
def test_parse_output_with_failing_line(self): output = DriverOutput('\n'.join([ 'Running 20 times', 'Ignoring warm-up run (1115)', '', 'some-unrecognizable-line', '', 'Time:' 'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms', 'avg 1100 ms', 'median 1101 ms', 'stdev 11 ms', 'min 1080 ms', 'max 1120 ms' ]), image=None, image_hash=None, audio=None) output_capture = OutputCapture() output_capture.capture_output() try: test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test') self.assertEqual(test.parse_output(output), None) finally: actual_stdout, actual_stderr, actual_logs = output_capture.restore_output( ) self.assertEqual(actual_stdout, '') self.assertEqual(actual_stderr, '') self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
def run_test(test_input): loaded_pages.append(test_input) self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content') return DriverOutput('actual text', 'actual image', 'actual checksum', audio=None, crash=False, timeout=False, error='some error')
def _expected_driver_output(self): return DriverOutput(self._port.expected_text(self._test_name), self._port.expected_image(self._test_name), self._port.expected_checksum(self._test_name), self._port.expected_audio(self._test_name))
def run_test(self, driver_input, stop_when_done): text = '' timeout = False crash = False if driver_input.test_name.endswith('pass.html'): text = 'RESULT group_name: test_name= 42 ms' elif driver_input.test_name.endswith('timeout.html'): timeout = True elif driver_input.test_name.endswith('failed.html'): text = None elif driver_input.test_name.endswith('tonguey.html'): text = 'we are not expecting an output from perf tests but RESULT blablabla' elif driver_input.test_name.endswith('crash.html'): crash = True elif driver_input.test_name.endswith('event-target-wrapper.html'): text = """Running 20 times Ignoring warm-up run (1502) 1504 1505 1510 1504 1507 1509 1510 1487 1488 1472 1472 1488 1473 1472 1475 1487 1486 1486 1475 1471 Time: avg 1489.05 ms median 1487 ms stdev 14.46 ms min 1471 ms max 1510 ms """ elif driver_input.test_name.endswith('some-parser.html'): text = """Running 20 times Ignoring warm-up run (1115) Time: avg 1100 ms median 1101 ms stdev 11 ms min 1080 ms max 1120 ms """ elif driver_input.test_name.endswith('memory-test.html'): text = """Running 20 times Ignoring warm-up run (1115) Time: avg 1100 ms median 1101 ms stdev 11 ms min 1080 ms max 1120 ms JS Heap: avg 832000 bytes median 829000 bytes stdev 15000 bytes min 811000 bytes max 848000 bytes Malloc: avg 532000 bytes median 529000 bytes stdev 13000 bytes min 511000 bytes max 548000 bytes """ return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
def run_test(self, driver_input): output = [] error = [] crash = False timeout = False actual_uri = None actual_checksum = None self._clear_output_image() start_time = time.time() has_audio = False has_base64 = False uri = self._port.test_to_uri(driver_input.test_name) cmd = self._test_shell_command(uri, driver_input.timeout, driver_input.image_hash) (line, crash) = self._write_command_and_read_line(input=cmd) while not crash and line.rstrip() != "#EOF": # Make sure we haven't crashed. if line == '' and self.poll() is not None: # This is hex code 0xc000001d, which is used for abrupt # termination. This happens if we hit ctrl+c from the prompt # and we happen to be waiting on DRT. # sdoyon: Not sure for which OS and in what circumstances the # above code is valid. What works for me under Linux to detect # ctrl+c is for the subprocess returncode to be negative # SIGINT. And that agrees with the subprocess documentation. if (-1073741510 == self._proc.returncode or -signal.SIGINT == self._proc.returncode): raise KeyboardInterrupt crash = True break # Don't include #URL lines in our output if line.startswith("#URL:"): actual_uri = line.rstrip()[5:] if uri != actual_uri: # GURL capitalizes the drive letter of a file URL. if (not re.search("^file:///[a-z]:", uri) or uri.lower() != actual_uri.lower()): _log.fatal("Test got out of sync:\n|%s|\n|%s|" % (uri, actual_uri)) raise AssertionError("test out of sync") elif line.startswith("#MD5:"): actual_checksum = line.rstrip()[5:] elif line.startswith("#TEST_TIMED_OUT"): timeout = True # Test timed out, but we still need to read until #EOF. elif line.startswith("Content-Type: audio/wav"): has_audio = True elif line.startswith("Content-Transfer-Encoding: base64"): has_base64 = True elif line.startswith("Content-Length:"): pass elif actual_uri: output.append(line) else: error.append(line) (line, crash) = self._write_command_and_read_line(input=None) run_time = time.time() - start_time output_image = self._output_image_with_retry() audio_bytes = None text = None if has_audio: if has_base64: audio_bytes = base64.b64decode(''.join(output)) else: audio_bytes = ''.join(output).rstrip() else: text = ''.join(output) if not text: text = None error = ''.join(error) # Currently the stacktrace is in the text output, not error, so append the two together so # that we can see stack in the output. See http://webkit.org/b/66806 # FIXME: We really should properly handle the stderr output separately. if crash: error = error + str(text) return DriverOutput(text, output_image, actual_checksum, audio=audio_bytes, crash=crash, test_time=run_time, timeout=timeout, error=error)
def run_test(self, driver_input, stop_when_done): if not self.started: self.started = True self.pid = TestDriver.next_pid TestDriver.next_pid += 1 start_time = time.time() test_name = driver_input.test_name test_args = driver_input.args or [] test = self._port._tests[test_name] if test.keyboard: raise KeyboardInterrupt if test.exception: raise ValueError('exception from ' + test_name) if test.device_failure: raise DeviceFailure('device failure in ' + test_name) audio = None actual_text = test.actual_text crash = test.crash web_process_crash = test.web_process_crash if 'flaky/text.html' in test_name and not test_name in self._port._flakes: self._port._flakes.add(test_name) actual_text = 'flaky text failure' if 'crash_then_text.html' in test_name: if test_name in self._port._flakes: actual_text = 'text failure' else: self._port._flakes.add(test_name) crashed_process_name = self._port.driver_name() crashed_pid = 1 crash = True if 'text_then_crash.html' in test_name: if test_name in self._port._flakes: crashed_process_name = self._port.driver_name() crashed_pid = 1 crash = True else: self._port._flakes.add(test_name) actual_text = 'text failure' if actual_text and test_args and test_name == 'passes/args.html': actual_text = actual_text + ' ' + ' '.join(test_args) if test.actual_audio: audio = base64.b64decode(test.actual_audio) crashed_process_name = None crashed_pid = None if crash: crashed_process_name = self._port.driver_name() crashed_pid = 1 elif web_process_crash: crashed_process_name = 'WebProcess' crashed_pid = 2 crash_log = '' if crashed_process_name: crash_logs = CrashLogs(self._port.host) crash_log = crash_logs.find_newest_log(crashed_process_name, None) or '' if 'crash-reftest.html' in test_name: crashed_process_name = self._port.driver_name() crashed_pid = 3 crash = True crash_log = 'reftest crash log' if stop_when_done: self.stop() if test.actual_checksum == driver_input.image_hash: image = None else: image = test.actual_image return DriverOutput(actual_text, image, test.actual_checksum, audio, crash=(crash or web_process_crash), crashed_process_name=crashed_process_name, crashed_pid=crashed_pid, crash_log=crash_log, test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid, leak=test.leak)