Ejemplo n.º 1
0
    def _run_single_test(self, test, driver):
        test_failed = False
        driver_need_restart = False
        output = driver.run_test(
            DriverInput(test, self._options.time_out_ms, None, False))

        if output.text == None:
            test_failed = True
        elif output.timeout:
            self._printer.write('timeout: %s' %
                                test[self._webkit_base_dir_len + 1:])
            test_failed = True
            driver_need_restart = True
        elif output.crash:
            self._printer.write('crash: %s' %
                                test[self._webkit_base_dir_len + 1:])
            driver_need_restart = True
            test_failed = True
        else:
            got_a_result = False
            for line in re.split('\n', output.text):
                if self._result_regex.match(line):
                    self._buildbot_output.write("%s\n" % line)
                    got_a_result = True
                elif not len(line) == 0:
                    test_failed = True
                    self._printer.write("%s" % line)
            test_failed = test_failed or not got_a_result

        if len(output.error):
            self._printer.write('error:\n%s' % output.error)
            test_failed = True

        return test_failed, driver_need_restart
Ejemplo n.º 2
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
        total_test_time = test_output.test_time
        expected_output = None
        test_result = None

        expected_text = self._port.expected_text(self._test_name)
        expected_text_output = DriverOutput(text=expected_text, image=None, image_hash=None, audio=None)

        # If the test crashed, or timed out, there's no point in running the reference at all.
        # This can save a lot of execution time if we have a lot of crashes or timeouts.
        if test_output.crash or test_output.timeout:
            test_result = self._compare_output(expected_text_output, test_output)

            if test_output.crash:
                test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                                     self._test_name, test_output, expected_text_output, test_result.failures)
            return test_result

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        reference_test_names = []
        for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
            if self._port.lookup_virtual_test_base(self._test_name):
                args = self._port.lookup_virtual_reference_args(self._test_name)
            else:
                args = self._port.lookup_physical_reference_args(self._test_name)
            reference_test_name = self._port.relative_test_filename(reference_filename)
            reference_test_names.append(reference_test_name)
            driver_input = DriverInput(reference_test_name, self._timeout,
                                       image_hash=test_output.image_hash, should_run_pixel_test=True, args=args)
            expected_output = self._reference_driver.run_test(driver_input, self._stop_when_done)
            total_test_time += expected_output.test_time
            test_result = self._compare_output_with_reference(
                expected_output, test_output, reference_filename, expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
                break

        assert(expected_output)

        if expected_text:
            text_output = DriverOutput(text=test_output.text, image=None, image_hash=None, audio=None)
            test_result.failures.extend(self._compare_output(expected_text_output, text_output).failures)
            expected_output.text = expected_text_output.text

        test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory,
                                             self._test_name, test_output, expected_output, test_result.failures)

        # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
        # and only really handle the first of the references in the result.
        reftest_type = list(set([reference_file[0] for reference_file in self._reference_files]))
        return TestResult(self._test_name, test_result.failures, total_test_time,
                          test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid,
                          references=reference_test_names)
Ejemplo n.º 3
0
    def _run_single_test(self, test, driver, is_chromium_style):
        test_failed = False
        driver_need_restart = False
        output = driver.run_test(
            DriverInput(test, self._options.time_out_ms, None, False))

        if output.text == None:
            test_failed = True
        elif output.timeout:
            self._printer.write('timeout: %s' %
                                test[self._webkit_base_dir_len + 1:])
            test_failed = True
            driver_need_restart = True
        elif output.crash:
            self._printer.write('crash: %s' %
                                test[self._webkit_base_dir_len + 1:])
            driver_need_restart = True
            test_failed = True
        else:
            if is_chromium_style:
                test_failed = self._process_chromium_style_test_result(
                    test, output)
            else:
                test_failed = self._process_parser_test_result(test, output)

        if len(output.error):
            self._printer.write('error:\n%s' % output.error)
            test_failed = True

        if test_failed:
            self._printer.write('FAILED')

        return test_failed, driver_need_restart
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
        total_test_time = 0
        reference_output = None
        test_result = None

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        reference_test_names = []
        for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
            reference_test_name = self._port.relative_test_filename(reference_filename)
            reference_test_names.append(reference_test_name)
            reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done)
            test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
                break
            total_test_time += test_result.test_run_time

        assert(reference_output)
        test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
        reftest_type = set([reference_file[0] for reference_file in self._reference_files])
        return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid, references=reference_test_names)
Ejemplo n.º 5
0
    def _run_reftest(self):
        driver_output1 = self._driver.run_test(self._driver_input())
        reference_test_name = self._port.relative_test_filename(self._reference_filename)
        driver_output2 = self._driver.run_test(DriverInput(reference_test_name, self._timeout, driver_output1.image_hash))
        test_result = self._compare_output_with_reference(driver_output1, driver_output2)

        test_result_writer.write_test_result(self._port, self._test_name, driver_output1, driver_output2, test_result.failures)
        return test_result
Ejemplo n.º 6
0
 def _driver_input(self):
     # The image hash is used to avoid doing an image dump if the
     # checksums match, so it should be set to a blank value if we
     # are generating a new baseline.  (Otherwise, an image from a
     # previous run will be copied into the baseline."""
     image_hash = None
     if self._should_fetch_expected_checksum():
         image_hash = self._port.expected_checksum(self._test_name)
     return DriverInput(self._test_name, self._timeout, image_hash)
Ejemplo n.º 7
0
 def run_single(self,
                driver,
                path_or_url,
                time_out_ms,
                should_run_pixel_test=False):
     return driver.run_test(
         DriverInput(path_or_url,
                     time_out_ms,
                     image_hash=None,
                     should_run_pixel_test=should_run_pixel_test))
Ejemplo n.º 8
0
    def input_from_line(self, line):
        vals = line.strip().split()
        if len(vals) == 3:
            uri, timeout, checksum = vals
        else:
            uri, timeout = vals
            checksum = None

        test_name = self._driver.uri_to_test(uri)
        return DriverInput(test_name, timeout, checksum, self._options.pixel_tests)
Ejemplo n.º 9
0
    def test_crashed_process_name(self):
        self.driver._proc = Mock()

        # Simulate a crash by having stdout close unexpectedly.
        def mock_readline():
            raise IOError
        self.driver._proc.stdout.readline = mock_readline

        self.driver.test_to_uri = lambda test: 'mocktesturi'
        driver_output = self.driver.run_test(DriverInput(test_name='some/test.html', timeout=1, image_hash=None, is_reftest=False))
        self.assertEqual(self.driver._port.driver_name(), driver_output.crashed_process_name)
Ejemplo n.º 10
0
 def run_single(self,
                driver,
                test_path,
                time_out_ms,
                should_run_pixel_test=False):
     return driver.run_test(DriverInput(
         test_path,
         time_out_ms,
         image_hash=None,
         should_run_pixel_test=should_run_pixel_test),
                            stop_when_done=False)
Ejemplo n.º 11
0
    def input_from_line(self, line):
        vals = line.strip().split("'")
        if len(vals) == 1:
            uri = vals[0]
            checksum = None
        else:
            uri = vals[0]
            checksum = vals[1]
        if uri.startswith('http://') or uri.startswith('https://'):
            test_name = self._driver.uri_to_test(uri)
        else:
            test_name = self._port.relative_test_filename(uri)

        return DriverInput(test_name, 0, checksum, self._options.pixel_tests)
Ejemplo n.º 12
0
    def test_crash_log(self):
        self.driver._proc = Mock()

        # Simulate a crash by having stdout close unexpectedly.
        def mock_readline():
            raise IOError
        self.driver._proc.stdout.readline = mock_readline
        self.driver._proc.pid = 1234

        self.driver.test_to_uri = lambda test: 'mocktesturi'
        self.driver._port.driver_name = lambda: 'mockdriver'
        self.driver._port._get_crash_log = lambda name, pid, out, err, newer_than: 'mockcrashlog'
        driver_output = self.driver.run_test(DriverInput(test_name='some/test.html', timeout=1, image_hash=None, should_run_pixel_test=False))
        self.assertTrue(driver_output.crash)
        self.assertEqual(driver_output.crashed_process_name, 'mockdriver')
        self.assertEqual(driver_output.crashed_pid, 1234)
        self.assertEqual(driver_output.crash_log, 'mockcrashlog')
Ejemplo n.º 13
0
    def input_from_line(self, line):
        vals = line.strip().split("'")
        uri = vals[0]
        checksum = None
        should_run_pixel_tests = False
        if len(vals) == 2 and vals[1] == '--pixel-test':
            should_run_pixel_tests = True
        elif len(vals) == 3 and vals[1] == '--pixel-test':
            should_run_pixel_tests = True
            checksum = vals[2]
        elif len(vals) != 1:
            raise NotImplementedError

        if uri.startswith('http://') or uri.startswith('https://'):
            test_name = self._driver.uri_to_test(uri)
        else:
            test_name = self._port.relative_test_filename(uri)

        return DriverInput(test_name, 0, checksum, should_run_pixel_tests, args=[])
Ejemplo n.º 14
0
    def run(self, driver, timeout_ms):
        test_times = []

        for i in range(0, 20):
            output = driver.run_test(
                DriverInput(self.path_or_url(), timeout_ms, None, False))
            if self.run_failed(output):
                return None
            if i == 0:
                continue
            test_times.append(output.test_time * 1000)

        test_times = sorted(test_times)

        # Compute the mean and variance using a numerically stable algorithm.
        squareSum = 0
        mean = 0
        valueSum = sum(test_times)
        for i, time in enumerate(test_times):
            delta = time - mean
            sweep = i + 1.0
            mean += delta / sweep
            squareSum += delta * delta * (i / sweep)

        middle = int(len(test_times) / 2)
        results = {
            'avg':
            mean,
            'min':
            min(test_times),
            'max':
            max(test_times),
            'median':
            test_times[middle] if len(test_times) % 2 else
            (test_times[middle - 1] + test_times[middle]) / 2,
            'stdev':
            math.sqrt(squareSum),
            'unit':
            'ms'
        }
        self.output_statistics(self.test_name(), results)
        return {self.test_name(): results}
Ejemplo n.º 15
0
    def _driver_input(self):
        # The image hash is used to avoid doing an image dump if the
        # checksums match, so it should be set to a blank value if we
        # are generating a new baseline.  (Otherwise, an image from a
        # previous run will be copied into the baseline."""
        image_hash = None
        if self._should_fetch_expected_checksum():
            image_hash = self._port.expected_checksum(self._test_name)

        test_base = self._port.lookup_virtual_test_base(self._test_name)
        if test_base:
            # If the file actually exists under the virtual dir, we want to use it (largely for virtual references),
            # but we want to use the extra command line args either way.
            if self._filesystem.exists(self._port.abspath_for_test(self._test_name)):
                test_name = self._test_name
            else:
                test_name = test_base
            args = self._port.lookup_virtual_test_args(self._test_name)
        else:
            test_name = self._test_name
            args = self._port.lookup_physical_test_args(self._test_name)
        return DriverInput(test_name, self._timeout, image_hash, self._should_run_pixel_test, args)
Ejemplo n.º 16
0
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input())
        total_test_time = 0
        reference_output = None
        test_result = None

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        for expectation, reference_filename in putAllMismatchBeforeMatch(
                self._reference_files):
            reference_test_name = self._port.relative_test_filename(
                reference_filename)
            reference_output = self._driver.run_test(
                DriverInput(reference_test_name,
                            self._timeout,
                            test_output.image_hash,
                            is_reftest=True))
            test_result = self._compare_output_with_reference(
                test_output, reference_output, reference_filename,
                expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (
                    expectation == '==' and not test_result.failures):
                break
            total_test_time += test_result.test_run_time

        assert (reference_output)
        test_result_writer.write_test_result(self._port, self._test_name,
                                             test_output, reference_output,
                                             test_result.failures)
        return TestResult(self._test_name, test_result.failures,
                          total_test_time + test_result.test_run_time,
                          test_result.has_stderr)
Ejemplo n.º 17
0
 def run(self, driver, timeout_ms):
     output = driver.run_test(
         DriverInput(self.path_or_url(), timeout_ms, None, False))
     if self.run_failed(output):
         return None
     return self.parse_output(output)