Ejemplo n.º 1
0
    def _handle_error(self, driver_output, reference_filename=None):
        """Returns test failures if some unusual errors happen in driver's run.

        Args:
          driver_output: The output from the driver.
          reference_filename: The full path to the reference file which produced the driver_output.
              This arg is optional and should be used only in reftests until we have a better way to know
              which html file is used for producing the driver_output.
        """
        failures = []
        if driver_output.timeout:
            failures.append(test_failures.FailureTimeout(bool(reference_filename)))

        if reference_filename:
            testname = self._port.relative_test_filename(reference_filename)
        else:
            testname = self._test_name

        if driver_output.crash:
            failures.append(test_failures.FailureCrash(bool(reference_filename),
                                                       driver_output.crashed_process_name,
                                                       driver_output.crashed_pid))
            if driver_output.error:
                _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
            else:
                _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
        elif driver_output.error:
            _log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
        for line in driver_output.error.splitlines():
            _log.debug("  {}".format(string_utils.decode(line, target_type=str)))
        return failures
Ejemplo n.º 2
0
    def _handle_error(self, driver_output, reference_filename=None):
        """Returns test failures if some unusual errors happen in driver's run.

        Args:
          driver_output: The output from the driver.
          reference_filename: The full path to the reference file which produced the driver_output.
              This arg is optional and should be used only in reftests until we have a better way to know
              which html file is used for producing the driver_output.
        """
        failures = []
        fs = self._port._filesystem
        if driver_output.timeout:
            failures.append(
                test_failures.FailureTimeout(bool(reference_filename)))

        if reference_filename:
            testname = self._port.relative_test_filename(reference_filename)
        else:
            testname = self._test_name

        if driver_output.crash:
            failures.append(
                test_failures.FailureCrash(bool(reference_filename)))
            _log.debug("%s Stacktrace for %s:\n%s" %
                       (self._worker_name, testname, driver_output.error))
        elif driver_output.error:
            _log.debug("%s %s output stderr lines:\n%s" %
                       (self._worker_name, testname, driver_output.error))
        return failures
Ejemplo n.º 3
0
 def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
     failures = []
     if result_type == test_expectations.TIMEOUT:
         failures = [test_failures.FailureTimeout()]
     elif result_type == test_expectations.CRASH:
         failures = [test_failures.FailureCrash()]
     return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
Ejemplo n.º 4
0
 def _failure_types_from_actual_result(self, actual):
     # FIXME: There doesn't seem to be a full list of all possible values of
     # 'actual' anywhere.  However JSONLayoutResultsGenerator.FAILURE_TO_CHAR
     # is a useful reference as that's for "old" style results.json files
     if actual == test_expectations.PASS:
         return []
     elif actual == test_expectations.TEXT:
         return [test_failures.FailureTextMismatch()]
     elif actual == test_expectations.IMAGE:
         return [test_failures.FailureImageHashMismatch()]
     elif actual == test_expectations.IMAGE_PLUS_TEXT:
         return [
             test_failures.FailureImageHashMismatch(),
             test_failures.FailureTextMismatch()
         ]
     elif actual == test_expectations.AUDIO:
         return [test_failures.FailureAudioMismatch()]
     elif actual == test_expectations.TIMEOUT:
         return [test_failures.FailureTimeout()]
     elif actual == test_expectations.CRASH:
         # NOTE: We don't know what process crashed from the json, just that a process crashed.
         return [test_failures.FailureCrash()]
     elif actual == test_expectations.MISSING:
         return [
             test_failures.FailureMissingResult(),
             test_failures.FailureMissingImageHash(),
             test_failures.FailureMissingImage()
         ]
     else:
         log("Failed to handle: %s" % self._result_dict['actual'])
         return []
Ejemplo n.º 5
0
    def _run_test_in_another_thread(self, test_input, thread_timeout_sec,
                                    stop_when_done):
        """Run a test in a separate thread, enforcing a hard time limit.

        Since we can only detect the termination of a thread, not any internal
        state or progress, we can only run per-test timeouts when running test
        files singly.

        Args:
          test_input: Object containing the test filename and timeout
          thread_timeout_sec: time to wait before killing the driver process.
        Returns:
          A TestResult
        """
        worker = self

        driver = self._port.create_driver(
            int((TaskPool.Process.name).split('/')[-1]),
            self._port.get_option('no_timeout'))

        class SingleTestThread(threading.Thread):
            def __init__(self):
                threading.Thread.__init__(self)
                self.result = None

            def run(self):
                self.result = worker._run_single_test(driver, test_input,
                                                      stop_when_done)

        thread = SingleTestThread()
        thread.start()
        thread.join(thread_timeout_sec)
        result = thread.result
        failures = []
        if thread.is_alive():
            # If join() returned with the thread still running, the
            # DumpRenderTree is completely hung and there's nothing
            # more we can do with it.  We have to kill all the
            # DumpRenderTrees to free it up. If we're running more than
            # one DumpRenderTree thread, we'll end up killing the other
            # DumpRenderTrees too, introducing spurious crashes. We accept
            # that tradeoff in order to avoid losing the rest of this
            # thread's results.
            _log.error('Test thread hung: killing all DumpRenderTrees')
            failures = [test_failures.FailureTimeout()]
        else:
            failure_results = self._do_post_tests_work(driver)
            for failure_result in failure_results:
                if failure_result.test_name == result.test_name:
                    result.convert_to_failure(failure_result)

        driver.stop()

        if not result:
            result = test_results.TestResult(test_input,
                                             failures=failures,
                                             test_run_time=0)
        return result
Ejemplo n.º 6
0
 def _failures_from_row(cls, row, table_title):
     if table_title == cls.fail_key:
         return cls._failures_from_fail_row(row)
     if table_title == cls.crash_key:
         return [test_failures.FailureCrash()]
     if table_title == cls.webprocess_crash_key:
         return [test_failures.FailureCrash(process_name="WebProcess")]
     if table_title == cls.timeout_key:
         return [test_failures.FailureTimeout()]
     if table_title == cls.missing_key:
         return [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
     return None
Ejemplo n.º 7
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout()]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch()]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash()]
    elif result_type == test_expectations.LEAK:
        failures = [test_failures.FailureLeak()]
    return test_results.TestResult(test_name,
                                   failures=failures,
                                   test_run_time=run_time)
Ejemplo n.º 8
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout()]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch()]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash()]
    elif result_type == test_expectations.LEAK:
        failures = [
            test_failures.FailureDocumentLeak(
                ['http://localhost:8000/failures/expected/leak.html'])
        ]
    return test_results.TestResult(test_name,
                                   failures=failures,
                                   test_run_time=run_time)
Ejemplo n.º 9
0
    def _handle_error(self, driver_output, reference_filename=None):
        """Returns test failures if some unusual errors happen in driver's run.

        Args:
          driver_output: The output from the driver.
          reference_filename: The full path to the reference file which produced the driver_output.
              This arg is optional and should be used only in reftests until we have a better way to know
              which html file is used for producing the driver_output.
        """
        failures = []
        fs = self._filesystem
        if driver_output.timeout:
            failures.append(
                test_failures.FailureTimeout(bool(reference_filename)))

        if reference_filename:
            testname = self._port.relative_test_filename(reference_filename)
        else:
            testname = self._test_name

        if driver_output.crash:
            failures.append(
                test_failures.FailureCrash(
                    bool(reference_filename),
                    driver_output.crashed_process_name,
                    driver_output.crashed_pid,
                    self._port.output_contains_sanitizer_messages(
                        driver_output.crash_log)))
            if driver_output.error:
                _log.debug("%s %s crashed, (stderr lines):" %
                           (self._worker_name, testname))
            else:
                _log.debug("%s %s crashed, (no stderr)" %
                           (self._worker_name, testname))
        elif driver_output.leak:
            failures.append(
                test_failures.FailureLeak(bool(reference_filename),
                                          driver_output.leak_log))
            _log.debug("%s %s leaked" % (self._worker_name, testname))
        elif driver_output.error:
            _log.debug("%s %s output stderr lines:" %
                       (self._worker_name, testname))
        for line in driver_output.error.splitlines():
            _log.debug("  %s" % line)
        return failures
Ejemplo n.º 10
0
 def _failure_types_from_actual_result(self, actual):
     # FIXME: There doesn't seem to be a full list of all possible values of
     # 'actual' anywhere.  However JSONLayoutResultsGenerator.FAILURE_TO_CHAR
     # is a useful reference as that's for "old" style results.json files
     #
     # FIXME: TEXT, IMAGE_PLUS_TEXT, and AUDIO are obsolete but we keep them for
     # now so that we can parse old results.json files.
     if actual == test_expectations.PASS:
         return []
     elif actual == test_expectations.FAIL:
         return [
             test_failures.FailureTextMismatch(),
             test_failures.FailureImageHashMismatch(),
             test_failures.FailureAudioMismatch()
         ]
     elif actual == test_expectations.TEXT:
         return [test_failures.FailureTextMismatch()]
     elif actual == test_expectations.IMAGE:
         return [test_failures.FailureImageHashMismatch()]
     elif actual == test_expectations.IMAGE_PLUS_TEXT:
         return [
             test_failures.FailureImageHashMismatch(),
             test_failures.FailureTextMismatch()
         ]
     elif actual == test_expectations.AUDIO:
         return [test_failures.FailureAudioMismatch()]
     elif actual == test_expectations.TIMEOUT:
         return [test_failures.FailureTimeout()]
     elif actual == test_expectations.CRASH:
         # NOTE: We don't know what process crashed from the json, just that a process crashed.
         return [test_failures.FailureCrash()]
     elif actual == test_expectations.LEAK:
         urls = []
         for url_dict in self._result_dict['leaks']:
             urls.append(url_dict['document'])
         return [test_failures.FailureDocumentLeak(urls)]
     elif actual == test_expectations.MISSING:
         return [
             test_failures.FailureMissingResult(),
             test_failures.FailureMissingImageHash(),
             test_failures.FailureMissingImage()
         ]
     else:
         _log.warning("Failed to handle: %s" % self._result_dict['actual'])
         return []
 def _failure_types_from_actual_result(self, actual):
     # FIXME: There doesn't seem to be a full list of all possible values of
     # 'actual' anywhere.
     #
     # FIXME: TEXT, IMAGE_PLUS_TEXT, and AUDIO are obsolete but we keep them for
     # now so that we can parse old results.json files.
     if actual == test_expectations.PASS:
         return []
     elif actual == test_expectations.FAIL:
         return [
             test_failures.FailureTextMismatch(),
             test_failures.FailureImageHashMismatch(),
             test_failures.FailureAudioMismatch()
         ]
     elif actual == test_expectations.TEXT:
         return [test_failures.FailureTextMismatch()]
     elif actual == test_expectations.IMAGE:
         return [test_failures.FailureImageHashMismatch()]
     elif actual == test_expectations.IMAGE_PLUS_TEXT:
         return [
             test_failures.FailureImageHashMismatch(),
             test_failures.FailureTextMismatch()
         ]
     elif actual == test_expectations.AUDIO:
         return [test_failures.FailureAudioMismatch()]
     elif actual == test_expectations.TIMEOUT:
         return [test_failures.FailureTimeout()]
     elif actual == test_expectations.CRASH:
         # NOTE: We don't know what process crashed from the json, just that a process crashed.
         return [test_failures.FailureCrash()]
     elif actual == test_expectations.MISSING:
         return [
             test_failures.FailureMissingResult(),
             test_failures.FailureMissingImageHash(),
             test_failures.FailureMissingImage()
         ]
     else:
         _log.warning("Failed to handle: %s" % self._result_dict['actual'])
         return []