def _driver_input(self):
        # The image hash is used to avoid doing an image dump if the
        # checksums match, so it should be set to a blank value if we
        # are generating a new baseline.  (Otherwise, an image from a
        # previous run will be copied into the baseline.)
        image_hash = None
        if self._should_fetch_expected_checksum():
            image_hash = self._port.expected_checksum(self._test_name)

        args = self._port.args_for_test(self._test_name)
        test_name = self._port.name_for_test(self._test_name)
        return DriverInput(test_name, self._timeout_ms, image_hash, args)
Example #2
0
    def input_from_line(self, line):
        vals = line.strip().split(b"'")
        uri = vals[0].decode('utf-8')
        checksum = None
        if len(vals) == 2:
            checksum = vals[1]
        elif len(vals) != 1:
            raise NotImplementedError

        if uri.startswith('http://') or uri.startswith('https://'):
            test_name = self._driver.uri_to_test(uri)
        else:
            test_name = self._port.relative_test_filename(uri)

        return DriverInput(test_name, 0, checksum, args=[])
Example #3
0
    def input_from_line(self, line):
        vals = line.strip().split("'")
        uri = vals[0]
        checksum = None
        should_run_pixel_tests = False
        if len(vals) == 2 and vals[1] == '--pixel-test':
            should_run_pixel_tests = True
        elif len(vals) == 3 and vals[1] == '--pixel-test':
            should_run_pixel_tests = True
            checksum = vals[2]
        elif len(vals) != 1:
            raise NotImplementedError

        if uri.startswith('http://') or uri.startswith('https://'):
            test_name = self._driver.uri_to_test(uri)
        else:
            test_name = self._port.relative_test_filename(uri)

        return DriverInput(test_name, 0, checksum, should_run_pixel_tests, args=[])
    def _driver_input(self):
        # The image hash is used to avoid doing an image dump if the
        # checksums match, so it should be set to a blank value if we
        # are generating a new baseline.  (Otherwise, an image from a
        # previous run will be copied into the baseline.)
        image_hash = None
        if self._should_fetch_expected_checksum():
            image_hash = self._port.expected_checksum(self._test_name)

        test_base = self._port.lookup_virtual_test_base(self._test_name)
        if test_base:
            # If the file actually exists under the virtual dir, we want to use it (largely for virtual references),
            # but we want to use the extra command line args either way.
            if self._filesystem.exists(
                    self._port.abspath_for_test(self._test_name)):
                test_name = self._test_name
            else:
                test_name = test_base
            args = self._port.lookup_virtual_test_args(self._test_name)
        else:
            test_name = self._test_name
            args = self._port.lookup_physical_test_args(self._test_name)
        return DriverInput(test_name, self._timeout_ms, image_hash,
                           self._should_run_pixel_test, args)
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input())
        total_test_time = test_output.test_time

        expected_text = self._port.expected_text(self._test_name)
        expected_text_output = DriverOutput(text=expected_text,
                                            image=None,
                                            image_hash=None,
                                            audio=None)
        # This _compare_output compares text if expected text exists, ignores
        # image, checks for extra baselines, and generates crash or timeout
        # failures if needed.
        compare_text_failures = self._compare_output(expected_text_output,
                                                     test_output)
        # If the test crashed, or timed out,  or a leak was detected, there's no point
        # in running the reference at all. This can save a lot of execution time if we
        # have a lot of crashes or timeouts.
        if test_output.crash or test_output.timeout or test_output.leak:
            return build_test_result(test_output,
                                     self._test_name,
                                     retry_attempt=self._retry_attempt,
                                     failures=compare_text_failures,
                                     test_run_time=test_output.test_time,
                                     pid=test_output.pid,
                                     crash_site=test_output.crash_site)

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the
        # mismatches first, then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy
        # to do.
        expected_output = None
        reference_test_names = []
        reftest_failures = []
        args = self._port.args_for_test(self._test_name)
        # sort self._reference_files to put mismatch tests first
        for expectation, reference_filename in sorted(self._reference_files):
            reference_test_name = self._port.relative_test_filename(
                reference_filename)
            reference_test_names.append(reference_test_name)
            driver_input = DriverInput(reference_test_name,
                                       self._timeout_ms,
                                       image_hash=test_output.image_hash,
                                       args=args)
            expected_output = self._driver.run_test(driver_input)
            total_test_time += expected_output.test_time
            reftest_failures = self._compare_output_with_reference(
                expected_output, test_output, reference_filename,
                expectation == '!=')

            if ((expectation == '!=' and reftest_failures)
                    or (expectation == '==' and not reftest_failures)):
                break

        assert expected_output

        # Combine compare_text_result and test_result
        expected_output.text = expected_text_output.text
        failures = reftest_failures + compare_text_failures

        # FIXME: We don't really deal with a mix of reftest types properly. We
        # pass in a set() to reftest_type and only really handle the first of
        # the references in the result.
        reftest_type = list(
            set([
                reference_file[0] for reference_file in self._reference_files
            ]))

        return build_test_result(test_output,
                                 self._test_name,
                                 retry_attempt=self._retry_attempt,
                                 failures=failures,
                                 test_run_time=total_test_time,
                                 reftest_type=reftest_type,
                                 pid=test_output.pid,
                                 crash_site=test_output.crash_site,
                                 references=reference_test_names)
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input(),
                                            self._stop_when_done)
        total_test_time = test_output.test_time
        expected_output = None
        test_result = None

        expected_text = self._port.expected_text(self._test_name)
        expected_text_output = DriverOutput(text=expected_text,
                                            image=None,
                                            image_hash=None,
                                            audio=None)

        # If the test crashed, or timed out, there's no point in running the reference at all.
        # This can save a lot of execution time if we have a lot of crashes or timeouts.
        if test_output.crash or test_output.timeout:
            test_result = self._compare_output(expected_text_output,
                                               test_output)

            if test_output.crash:
                test_result_writer.write_test_result(
                    self._filesystem, self._port, self._results_directory,
                    self._test_name, test_output, expected_text_output,
                    test_result.failures)
            return test_result

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        reference_test_names = []
        for expectation, reference_filename in putAllMismatchBeforeMatch(
                self._reference_files):
            if self._port.lookup_virtual_test_base(self._test_name):
                args = self._port.lookup_virtual_reference_args(
                    self._test_name)
            else:
                args = self._port.lookup_physical_reference_args(
                    self._test_name)
            reference_test_name = self._port.relative_test_filename(
                reference_filename)
            reference_test_names.append(reference_test_name)
            driver_input = DriverInput(reference_test_name,
                                       self._timeout_ms,
                                       image_hash=test_output.image_hash,
                                       should_run_pixel_test=True,
                                       args=args)
            expected_output = self._reference_driver.run_test(
                driver_input, self._stop_when_done)
            total_test_time += expected_output.test_time
            test_result = self._compare_output_with_reference(
                expected_output, test_output, reference_filename,
                expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (
                    expectation == '==' and not test_result.failures):
                break

        assert expected_output

        if expected_text:
            text_output = DriverOutput(text=test_output.text,
                                       image=None,
                                       image_hash=None,
                                       audio=None)
            text_compare_result = self._compare_output(expected_text_output,
                                                       text_output)
            test_result.failures.extend(text_compare_result.failures)
            test_result.has_repaint_overlay = text_compare_result.has_repaint_overlay
            expected_output.text = expected_text_output.text

        test_result_writer.write_test_result(self._filesystem, self._port,
                                             self._results_directory,
                                             self._test_name, test_output,
                                             expected_output,
                                             test_result.failures)

        # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
        # and only really handle the first of the references in the result.
        reftest_type = list(
            set([
                reference_file[0] for reference_file in self._reference_files
            ]))
        return TestResult(self._test_name,
                          test_result.failures,
                          total_test_time,
                          test_result.has_stderr,
                          reftest_type=reftest_type,
                          pid=test_result.pid,
                          crash_site=test_result.crash_site,
                          references=reference_test_names,
                          has_repaint_overlay=test_result.has_repaint_overlay)