def _failures_from_row(cls, row, table_title): if table_title == cls.fail_key: return cls._failures_from_fail_row(row) if table_title == cls.crash_key: return [test_failures.FailureCrash()] if table_title == cls.webprocess_crash_key: return [test_failures.FailureCrash(process_name="WebProcess")] if table_title == cls.timeout_key: return [test_failures.FailureTimeout()] if table_title == cls.missing_key: return [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()] return None
def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0): failures = [] if result_type == test_expectations.TIMEOUT: failures = [test_failures.FailureTimeout()] elif result_type == test_expectations.CRASH: failures = [test_failures.FailureCrash()] return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
def _handle_error(self, driver_output, reference_filename=None): """Returns test failures if some unusual errors happen in driver's run. Args: driver_output: The output from the driver. reference_filename: The full path to the reference file which produced the driver_output. This arg is optional and should be used only in reftests until we have a better way to know which html file is used for producing the driver_output. """ failures = [] if driver_output.timeout: failures.append(test_failures.FailureTimeout(bool(reference_filename))) if reference_filename: testname = self._port.relative_test_filename(reference_filename) else: testname = self._test_name if driver_output.crash: failures.append(test_failures.FailureCrash(bool(reference_filename), driver_output.crashed_process_name, driver_output.crashed_pid)) if driver_output.error: _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname)) else: _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname)) elif driver_output.error: _log.debug("%s %s output stderr lines:" % (self._worker_name, testname)) for line in driver_output.error.splitlines(): _log.debug(" {}".format(string_utils.decode(line, target_type=str))) return failures
def _failure_types_from_actual_result(self, actual): # FIXME: There doesn't seem to be a full list of all possible values of # 'actual' anywhere. However JSONLayoutResultsGenerator.FAILURE_TO_CHAR # is a useful reference as that's for "old" style results.json files if actual == test_expectations.PASS: return [] elif actual == test_expectations.TEXT: return [test_failures.FailureTextMismatch()] elif actual == test_expectations.IMAGE: return [test_failures.FailureImageHashMismatch()] elif actual == test_expectations.IMAGE_PLUS_TEXT: return [ test_failures.FailureImageHashMismatch(), test_failures.FailureTextMismatch() ] elif actual == test_expectations.AUDIO: return [test_failures.FailureAudioMismatch()] elif actual == test_expectations.TIMEOUT: return [test_failures.FailureTimeout()] elif actual == test_expectations.CRASH: # NOTE: We don't know what process crashed from the json, just that a process crashed. return [test_failures.FailureCrash()] elif actual == test_expectations.MISSING: return [ test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage() ] else: log("Failed to handle: %s" % self._result_dict['actual']) return []
def _handle_error(self, driver_output, reference_filename=None): """Returns test failures if some unusual errors happen in driver's run. Args: driver_output: The output from the driver. reference_filename: The full path to the reference file which produced the driver_output. This arg is optional and should be used only in reftests until we have a better way to know which html file is used for producing the driver_output. """ failures = [] fs = self._port._filesystem if driver_output.timeout: failures.append( test_failures.FailureTimeout(bool(reference_filename))) if reference_filename: testname = self._port.relative_test_filename(reference_filename) else: testname = self._test_name if driver_output.crash: failures.append( test_failures.FailureCrash(bool(reference_filename))) _log.debug("%s Stacktrace for %s:\n%s" % (self._worker_name, testname, driver_output.error)) elif driver_output.error: _log.debug("%s %s output stderr lines:\n%s" % (self._worker_name, testname, driver_output.error)) return failures
def get_result(test_name, result_type=test_expectations.PASS, run_time=0): failures = [] if result_type == test_expectations.TIMEOUT: failures = [test_failures.FailureTimeout()] elif result_type == test_expectations.AUDIO: failures = [test_failures.FailureAudioMismatch()] elif result_type == test_expectations.CRASH: failures = [test_failures.FailureCrash()] elif result_type == test_expectations.LEAK: failures = [test_failures.FailureLeak()] return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
def get_result(test_name, result_type=test_expectations.PASS, run_time=0): failures = [] if result_type == test_expectations.TIMEOUT: failures = [test_failures.FailureTimeout()] elif result_type == test_expectations.AUDIO: failures = [test_failures.FailureAudioMismatch()] elif result_type == test_expectations.CRASH: failures = [test_failures.FailureCrash()] elif result_type == test_expectations.LEAK: failures = [ test_failures.FailureDocumentLeak( ['http://localhost:8000/failures/expected/leak.html']) ] return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
def _handle_error(self, driver_output, reference_filename=None): """Returns test failures if some unusual errors happen in driver's run. Args: driver_output: The output from the driver. reference_filename: The full path to the reference file which produced the driver_output. This arg is optional and should be used only in reftests until we have a better way to know which html file is used for producing the driver_output. """ failures = [] fs = self._filesystem if driver_output.timeout: failures.append( test_failures.FailureTimeout(bool(reference_filename))) if reference_filename: testname = self._port.relative_test_filename(reference_filename) else: testname = self._test_name if driver_output.crash: failures.append( test_failures.FailureCrash( bool(reference_filename), driver_output.crashed_process_name, driver_output.crashed_pid, self._port.output_contains_sanitizer_messages( driver_output.crash_log))) if driver_output.error: _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname)) else: _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname)) elif driver_output.leak: failures.append( test_failures.FailureLeak(bool(reference_filename), driver_output.leak_log)) _log.debug("%s %s leaked" % (self._worker_name, testname)) elif driver_output.error: _log.debug("%s %s output stderr lines:" % (self._worker_name, testname)) for line in driver_output.error.splitlines(): _log.debug(" %s" % line) return failures
def _failure_types_from_actual_result(self, actual): # FIXME: There doesn't seem to be a full list of all possible values of # 'actual' anywhere. However JSONLayoutResultsGenerator.FAILURE_TO_CHAR # is a useful reference as that's for "old" style results.json files # # FIXME: TEXT, IMAGE_PLUS_TEXT, and AUDIO are obsolete but we keep them for # now so that we can parse old results.json files. if actual == test_expectations.PASS: return [] elif actual == test_expectations.FAIL: return [ test_failures.FailureTextMismatch(), test_failures.FailureImageHashMismatch(), test_failures.FailureAudioMismatch() ] elif actual == test_expectations.TEXT: return [test_failures.FailureTextMismatch()] elif actual == test_expectations.IMAGE: return [test_failures.FailureImageHashMismatch()] elif actual == test_expectations.IMAGE_PLUS_TEXT: return [ test_failures.FailureImageHashMismatch(), test_failures.FailureTextMismatch() ] elif actual == test_expectations.AUDIO: return [test_failures.FailureAudioMismatch()] elif actual == test_expectations.TIMEOUT: return [test_failures.FailureTimeout()] elif actual == test_expectations.CRASH: # NOTE: We don't know what process crashed from the json, just that a process crashed. return [test_failures.FailureCrash()] elif actual == test_expectations.LEAK: urls = [] for url_dict in self._result_dict['leaks']: urls.append(url_dict['document']) return [test_failures.FailureDocumentLeak(urls)] elif actual == test_expectations.MISSING: return [ test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage() ] else: _log.warning("Failed to handle: %s" % self._result_dict['actual']) return []
def _failure_types_from_actual_result(self, actual): # FIXME: There doesn't seem to be a full list of all possible values of # 'actual' anywhere. # # FIXME: TEXT, IMAGE_PLUS_TEXT, and AUDIO are obsolete but we keep them for # now so that we can parse old results.json files. if actual == test_expectations.PASS: return [] elif actual == test_expectations.FAIL: return [ test_failures.FailureTextMismatch(), test_failures.FailureImageHashMismatch(), test_failures.FailureAudioMismatch() ] elif actual == test_expectations.TEXT: return [test_failures.FailureTextMismatch()] elif actual == test_expectations.IMAGE: return [test_failures.FailureImageHashMismatch()] elif actual == test_expectations.IMAGE_PLUS_TEXT: return [ test_failures.FailureImageHashMismatch(), test_failures.FailureTextMismatch() ] elif actual == test_expectations.AUDIO: return [test_failures.FailureAudioMismatch()] elif actual == test_expectations.TIMEOUT: return [test_failures.FailureTimeout()] elif actual == test_expectations.CRASH: # NOTE: We don't know what process crashed from the json, just that a process crashed. return [test_failures.FailureCrash()] elif actual == test_expectations.MISSING: return [ test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage() ] else: _log.warning("Failed to handle: %s" % self._result_dict['actual']) return []