コード例 #1
0
    def _run_rebaseline(self):
        """Similar to _run_compare_test(), but has the side effect of updating
        or adding baselines. This is called when --reset-results and/or
        --copy-baselines are specified in the command line. If --reset-results,
        in the returned result we treat baseline mismatch as success."""
        driver_output = self._driver.run_test(self._driver_input())
        expected_driver_output = self._expected_driver_output()

        all_failures = self._compare_output(expected_driver_output,
                                            driver_output)

        if self._options.reset_results:
            # Only report leaks, timeouts and crashes and treat all other
            # failures as successes
            reported_failures = self._handle_error(driver_output)
        else:
            # Return comparison failures between the baseline and the actual
            # output as well as leaks, timeouts and crashes
            reported_failures = all_failures

        self._update_or_add_new_baselines(driver_output, all_failures)

        return build_test_result(driver_output,
                                 self._test_name,
                                 retry_attempt=self._retry_attempt,
                                 failures=reported_failures,
                                 test_run_time=driver_output.test_time,
                                 pid=driver_output.pid,
                                 crash_site=driver_output.crash_site)
コード例 #2
0
    def _run_compare_test(self):
        """Runs the signle test and returns test result."""
        driver_output = self._driver.run_test(self._driver_input())
        expected_driver_output = self._expected_driver_output()
        failures = self._compare_output(expected_driver_output, driver_output)

        return build_test_result(
            driver_output, self._test_name, retry_attempt=self._retry_attempt,
            failures=failures, test_run_time=driver_output.test_time,
            pid=driver_output.pid, crash_site=driver_output.crash_site)
コード例 #3
0
    def _run_sanitized_test(self):
        # running a sanitized test means that we ignore the actual test output and just look
        # for timeouts and crashes (real or forced by the driver). Most crashes should
        # indicate problems found by a sanitizer (ASAN, LSAN, etc.), but we will report
        # on other crashes and timeouts as well in order to detect at least *some* basic failures.
        driver_output = self._driver.run_test(self._driver_input())
        expected_driver_output = self._expected_driver_output()
        failures = self._handle_error(driver_output)

        test_result = build_test_result(
            driver_output, self._test_name, retry_attempt=self._retry_attempt,
            failures=failures, test_run_time=driver_output.test_time,
            pid=driver_output.pid, crash_site=driver_output.crash_site)
        return test_result
コード例 #4
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input())
        total_test_time = test_output.test_time

        expected_text = self._port.expected_text(self._test_name)
        expected_text_output = DriverOutput(text=expected_text,
                                            image=None,
                                            image_hash=None,
                                            audio=None)
        # This _compare_output compares text if expected text exists, ignores
        # image, checks for extra baselines, and generates crash or timeout
        # failures if needed.
        compare_text_failures = self._compare_output(expected_text_output,
                                                     test_output)
        # If the test crashed, or timed out,  or a leak was detected, there's no point
        # in running the reference at all. This can save a lot of execution time if we
        # have a lot of crashes or timeouts.
        if test_output.crash or test_output.timeout or test_output.leak:
            return build_test_result(test_output,
                                     self._test_name,
                                     retry_attempt=self._retry_attempt,
                                     failures=compare_text_failures,
                                     test_run_time=test_output.test_time,
                                     pid=test_output.pid,
                                     crash_site=test_output.crash_site)

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the
        # mismatches first, then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy
        # to do.
        expected_output = None
        reference_test_names = []
        reftest_failures = []
        args = self._port.args_for_test(self._test_name)
        # sort self._reference_files to put mismatch tests first
        for expectation, reference_filename in sorted(self._reference_files):
            reference_test_name = self._port.relative_test_filename(
                reference_filename)
            reference_test_names.append(reference_test_name)
            driver_input = DriverInput(reference_test_name,
                                       self._timeout_ms,
                                       image_hash=test_output.image_hash,
                                       args=args)
            expected_output = self._driver.run_test(driver_input)
            total_test_time += expected_output.test_time
            reftest_failures = self._compare_output_with_reference(
                expected_output, test_output, reference_filename,
                expectation == '!=')

            if ((expectation == '!=' and reftest_failures)
                    or (expectation == '==' and not reftest_failures)):
                break

        assert expected_output

        # Combine compare_text_result and test_result
        expected_output.text = expected_text_output.text
        failures = reftest_failures + compare_text_failures

        # FIXME: We don't really deal with a mix of reftest types properly. We
        # pass in a set() to reftest_type and only really handle the first of
        # the references in the result.
        reftest_type = list(
            set([
                reference_file[0] for reference_file in self._reference_files
            ]))

        return build_test_result(test_output,
                                 self._test_name,
                                 retry_attempt=self._retry_attempt,
                                 failures=failures,
                                 test_run_time=total_test_time,
                                 reftest_type=reftest_type,
                                 pid=test_output.pid,
                                 crash_site=test_output.crash_site,
                                 references=reference_test_names)