def _look_for_new_crash_logs(self, result_summary, start_time): """Since crash logs can take a long time to be written out if the system is under stress do a second pass at the end of the test run. result_summary: the results of the test run start_time: time the tests started at. We're looking for crash logs after that time. """ crashed_processes = [] for test, result in result_summary.unexpected_results.iteritems(): if (result.type != test_expectations.CRASH): continue for failure in result.failures: if not isinstance(failure, test_failures.FailureCrash): continue crashed_processes.append( [test, failure.process_name, failure.pid]) crash_logs = self._port.look_for_new_crash_logs( crashed_processes, start_time) if crash_logs: for test, crash_log in crash_logs.iteritems(): writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test) writer.write_crash_log(crash_log)
def run_single(self, driver, url, time_out_ms, record=False): server = self._start_replay_server(self._archive_path, record) if not server: _log.error("Web page replay didn't start.") return None try: _log.debug("Waiting for Web page replay to start.") if not server.wait_until_ready(): _log.error("Web page replay didn't start.") return None _log.debug("Web page replay started. Loading the page.") output = super(ReplayPerfTest, self).run_single(driver, self._url, time_out_ms, should_run_pixel_test=True) if self.run_failed(output): return None if not output.image: _log.error("Loading the page did not generate image results") _log.error(output.text) return None filesystem = self._port.host.filesystem dirname = filesystem.dirname(self._archive_path) filename = filesystem.split(self._archive_path)[1] writer = TestResultWriter(filesystem, self._port, dirname, filename) if record: writer.write_image_files(actual_image=None, expected_image=output.image) else: writer.write_image_files(actual_image=output.image, expected_image=None) return output finally: server.stop()
def _look_for_new_crash_logs(self, result_summary, start_time): """Since crash logs can take a long time to be written out if the system is under stress do a second pass at the end of the test run. result_summary: the results of the test run start_time: time the tests started at. We're looking for crash logs after that time. """ crashed_processes = [] for test, result in result_summary.unexpected_results.iteritems(): if (result.type != test_expectations.CRASH): continue for failure in result.failures: if not isinstance(failure, test_failures.FailureCrash): continue crashed_processes.append( [test, failure.process_name, failure.pid]) crash_logs = self._port.look_for_new_crash_logs( crashed_processes, start_time) if crash_logs: for test, crash_log in crash_logs.iteritems(): writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test) writer.write_crash_log(crash_log)
def _update_for_platform_specific_bot(self, bot_name): platform_name = self._platform_name(bot_name) for test_name, expected_content in self._tests_to_update( bot_name).items(): expected_filename = self.filesystem.join( self.layout_test_repository, TestResultWriter.expected_filename(test_name, self.filesystem, platform_name)) generic_expected_filename = self.filesystem.join( self.layout_test_repository, TestResultWriter.expected_filename(test_name, self.filesystem)) if expected_content != self._file_content_if_exists( generic_expected_filename): if expected_content != self._file_content_if_exists( expected_filename): _log.info("Updating " + test_name + " for " + bot_name + " (" + expected_filename + ")") self.filesystem.maybe_make_directory( self.filesystem.dirname(expected_filename)) self.filesystem.write_text_file(expected_filename, expected_content) elif self.filesystem.exists(expected_filename): _log.info("Updating " + test_name + " for " + bot_name + " ( REMOVED: " + expected_filename + ")") self.filesystem.remove(expected_filename)
def _look_for_new_crash_logs(self, run_results, start_time): """Looks for and writes new crash logs, at the end of the test run. Since crash logs can take a long time to be written out if the system is under stress, do a second pass at the end of the test run. Args: run_results: The results of the test run. start_time: Time the tests started at. We're looking for crash logs after that time. """ crashed_processes = [] for test, result in run_results.unexpected_results_by_name.iteritems(): if result.type != test_expectations.CRASH: continue for failure in result.failures: if not isinstance(failure, test_failures.FailureCrash): continue if failure.has_log: continue crashed_processes.append([test, failure.process_name, failure.pid]) sample_files = self._port.look_for_new_samples(crashed_processes, start_time) if sample_files: for test, sample_file in sample_files.iteritems(): writer = TestResultWriter(self._filesystem, self._port, self._port.results_directory(), test) writer.copy_sample_file(sample_file) crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time) if crash_logs: for test, crash_log in crash_logs.iteritems(): writer = TestResultWriter(self._filesystem, self._port, self._port.results_directory(), test) writer.write_crash_log(crash_log)
def _update_from_generic_attachment(self): for test_name, expected_content in self._tests_to_update( self.generic_attachment).items(): expected_filename = self.filesystem.join( self.layout_test_repository, TestResultWriter.expected_filename(test_name, self.filesystem)) if expected_content != self._file_content_if_exists( expected_filename): _log.info("Updating " + test_name + " (" + expected_filename + ")") self.filesystem.write_text_file(expected_filename, expected_content)
def _look_for_new_crash_logs(self, run_results, start_time): """Looks for and writes new crash logs, at the end of the test run. Since crash logs can take a long time to be written out if the system is under stress, do a second pass at the end of the test run. Args: run_results: The results of the test run. start_time: Time the tests started at. We're looking for crash logs after that time. """ crashed_processes = [] for test, result in run_results.unexpected_results_by_name.iteritems(): if result.type != test_expectations.CRASH: continue for failure in result.failures: if not isinstance(failure, test_failures.FailureCrash): continue if failure.has_log: continue crashed_processes.append([test, failure.process_name, failure.pid]) sample_files = self._port.look_for_new_samples(crashed_processes, start_time) if sample_files: for test, sample_file in sample_files.iteritems(): writer = TestResultWriter(self._filesystem, self._port, self._port.results_directory(), test) writer.copy_sample_file(sample_file) crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time) if crash_logs: for test, crash_log in crash_logs.iteritems(): writer = TestResultWriter(self._filesystem, self._port, self._port.results_directory(), test) writer.write_crash_log(crash_log)
def run_single(self, driver, url, time_out_ms, record=False): server = self._start_replay_server(self._archive_path, record) if not server: _log.error("Web page replay didn't start.") return None try: _log.debug("Waiting for Web page replay to start.") if not server.wait_until_ready(): _log.error("Web page replay didn't start.") return None _log.debug("Web page replay started. Loading the page.") output = super(ReplayPerfTest, self).run_single(driver, self._url, time_out_ms, should_run_pixel_test=True) if self.run_failed(output): return None if not output.image: _log.error("Loading the page did not generate image results") _log.error(output.text) return None filesystem = self._port.host.filesystem dirname = filesystem.dirname(self._archive_path) filename = filesystem.split(self._archive_path)[1] writer = TestResultWriter(filesystem, self._port, dirname, filename) if record: writer.write_image_files(actual_image=None, expected_image=output.image) else: writer.write_image_files(actual_image=output.image, expected_image=None) return output finally: server.stop()
def _tests_to_update(self, attachment, bot_type=None): _log.info("Processing attachment " + str(attachment.id())) zip_file = self.unzip(attachment.contents()) results = LayoutTestResults.results_from_string( zip_file.read("full_results.json")) results_to_update = [ result.test_name for result in results.failing_test_results() if result.type == test_expectations.TEXT ] return { result: zip_file.read( TestResultWriter.actual_filename(result, self.filesystem)) for result in results_to_update }
def _look_for_new_crash_logs(self, run_results, start_time): """Since crash logs can take a long time to be written out if the system is under stress do a second pass at the end of the test run. run_results: the results of the test run start_time: time the tests started at. We're looking for crash logs after that time. """ crashed_processes = [] for test, result in run_results.unexpected_results_by_name.iteritems(): if (result.type != test_expectations.CRASH): continue for failure in result.failures: if not isinstance(failure, test_failures.FailureCrash): continue crashed_processes.append( [test, failure.process_name, failure.pid]) sample_files = self._port.look_for_new_samples(crashed_processes, start_time) if sample_files: for test, sample_file in sample_files.iteritems(): writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test) writer.copy_sample_file(sample_file) crash_logs = self._port.look_for_new_crash_logs( crashed_processes, start_time) if crash_logs: for test, crash_log in crash_logs.iteritems(): writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test) writer.write_crash_log(crash_log) # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results if not any(process[0] == test for process in crashed_processes): result = test_results.TestResult(test) result.type = test_expectations.CRASH result.is_other_crash = True run_results.add(result, expected=False, test_is_slow=False) _log.debug("Adding results for other crash: " + str(test))
def _tests_to_update(self, bot_name): _log.info("{} archive: {}".format( bot_name, self.ews_results[bot_name]['layout-tests-archive-url'])) _log.info("{} status: {}".format( bot_name, self.ews_results[bot_name]['layout-tests-results-string'])) layout_tests_archive_request = requests.get( self.ews_results[bot_name]['layout-tests-archive-url']) layout_tests_archive_content = layout_tests_archive_request.content zip_file = self.unzip(layout_tests_archive_content) results = LayoutTestResults.results_from_string( zip_file.read("full_results.json")) results_to_update = [ result.test_name for result in results.failing_test_results() if result.type in [test_expectations.TEXT, test_expectations.MISSING] ] return { result: zip_file.read( TestResultWriter.actual_filename(result, self.filesystem)) for result in results_to_update }
def _look_for_new_crash_logs(self, run_results, start_time): """Since crash logs can take a long time to be written out if the system is under stress do a second pass at the end of the test run. run_results: the results of the test run start_time: time the tests started at. We're looking for crash logs after that time. """ crashed_processes = [] for test, result in run_results.unexpected_results_by_name.iteritems(): if (result.type != test_expectations.CRASH): continue for failure in result.failures: if not isinstance(failure, test_failures.FailureCrash): continue crashed_processes.append([test, failure.process_name, failure.pid]) sample_files = self._port.look_for_new_samples(crashed_processes, start_time) if sample_files: for test, sample_file in sample_files.iteritems(): writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test) writer.copy_sample_file(sample_file) crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time) if crash_logs: for test, crash_log in crash_logs.iteritems(): writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test) writer.write_crash_log(crash_log) # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results if not any(process[0] == test for process in crashed_processes): result = test_results.TestResult(test) result.type = test_expectations.CRASH result.is_other_crash = True run_results.add(result, expected=False, test_is_slow=False) _log.debug("Adding results for other crash: " + str(test))