def setUp(self):
     self._actual_output = DriverOutput(text=None,
                                        image=None,
                                        image_hash=None,
                                        audio=None)
     self._expected_output = DriverOutput(text=None,
                                          image=None,
                                          image_hash=None,
                                          audio=None)
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    dummy_1, dummy_2 = DriverOutput(None, None, None, None), DriverOutput(None, None, None, None)
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout(dummy_1)]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash(dummy_1)]
    elif result_type == test_expectations.FAIL:
        failures = [test_failures.TestFailure(dummy_1, dummy_2)]
    return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
def get_result(test_name, result_type=ResultType.Pass, run_time=0):
    failures = []
    dummy_1, dummy_2 = DriverOutput(None, None, None, None), DriverOutput(
        None, None, None, None)
    if result_type == ResultType.Timeout:
        failures = [test_failures.FailureTimeout(dummy_1)]
    elif result_type == ResultType.Crash:
        failures = [test_failures.FailureCrash(dummy_1)]
    elif result_type == ResultType.Failure:
        failures = [test_failures.TestFailure(dummy_1, dummy_2)]
    return test_results.TestResult(test_name,
                                   failures=failures,
                                   test_run_time=run_time)
Example #4
0
    def output_for_test(self, test_input, is_reftest):
        port = self._port
        actual_text = port.expected_text(test_input.test_name)
        actual_audio = port.expected_audio(test_input.test_name)
        actual_image = None
        actual_checksum = None
        if is_reftest:
            # Make up some output for reftests.
            actual_text = 'reference text\n'
            actual_checksum = 'mock-checksum'
            actual_image = 'blank'
            if test_input.test_name.endswith('-mismatch.html'):
                actual_text = 'not reference text\n'
                actual_checksum = 'not-mock-checksum'
                actual_image = 'not blank'
        elif test_input.image_hash:
            actual_checksum = port.expected_checksum(test_input.test_name)
            actual_image = port.expected_image(test_input.test_name)

        if self._options.actual_directory:
            actual_path = port.host.filesystem.join(self._options.actual_directory, test_input.test_name)
            root, _ = port.host.filesystem.splitext(actual_path)
            text_path = root + '-actual.txt'
            if port.host.filesystem.exists(text_path):
                actual_text = port.host.filesystem.read_binary_file(text_path)
            audio_path = root + '-actual.wav'
            if port.host.filesystem.exists(audio_path):
                actual_audio = port.host.filesystem.read_binary_file(audio_path)
            image_path = root + '-actual.png'
            if port.host.filesystem.exists(image_path):
                actual_image = port.host.filesystem.read_binary_file(image_path)
                with port.host.filesystem.open_binary_file_for_reading(image_path) as filehandle:
                    actual_checksum = read_checksum_from_png.read_checksum(filehandle)

        return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
Example #5
0
 def run_test(self, failures=None, files=None, filename='foo.html'):
     failures = failures or []
     host = MockSystemHost()
     host.filesystem.files = files or {}
     port = TestPort(host=host,
                     port_name='test-mac-mac10.11',
                     options=optparse.Values())
     actual_output = DriverOutput(text='',
                                  image=None,
                                  image_hash=None,
                                  audio=None)
     expected_output = DriverOutput(text='',
                                    image=None,
                                    image_hash=None,
                                    audio=None)
     write_test_result(host.filesystem, port, '/tmp', filename,
                       actual_output, expected_output, failures)
     return host.filesystem
Example #6
0
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
    failures = []
    dummy_1, dummy_2 = DriverOutput(None, None, None, None), DriverOutput(
        None, None, None, None)
    if result_type == test_expectations.TIMEOUT:
        failures = [test_failures.FailureTimeout(dummy_1)]
    elif result_type == test_expectations.AUDIO:
        failures = [test_failures.FailureAudioMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.TEXT:
        failures = [test_failures.FailureTextMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.IMAGE:
        failures = [test_failures.FailureImageHashMismatch(dummy_1, dummy_2)]
    elif result_type == test_expectations.CRASH:
        failures = [test_failures.FailureCrash(dummy_1)]
    elif result_type == test_expectations.LEAK:
        failures = [test_failures.FailureLeak(dummy_1)]
    return test_results.TestResult(test_name,
                                   failures=failures,
                                   test_run_time=run_time)
    def test_repeated_test_artifacts(self):
        host = MockSystemHost()
        port = Port(host, 'baseport')
        artifacts = Artifacts('/dir', host.filesystem, repeat_tests=True)

        def init_test_failure(test_failure):
            test_failure.port = port
            test_failure.filesystem = host.filesystem
            test_failure.test_name = 'foo.html'
            test_failure.result_directory = '/dir'

        pass_with_stderr = PassWithStderr(
            DriverOutput(None, None, None, None, error=b'pass with stderr'))
        init_test_failure(pass_with_stderr)
        crash = FailureCrash(
            DriverOutput(None,
                         None,
                         None,
                         None,
                         crash=True,
                         error=b'crash stderr'))
        init_test_failure(crash)
        timeout = FailureTimeout(
            DriverOutput(None, None, None, None, error=b'timeout with stderr'))
        init_test_failure(timeout)

        pass_with_stderr.create_artifacts(artifacts)
        self.assertEqual('pass with stderr',
                         host.filesystem.read_text_file('/dir/foo-stderr.txt'))

        crash.create_artifacts(artifacts)
        self.assertEqual('crash stderr',
                         host.filesystem.read_text_file('/dir/foo-stderr.txt'))

        timeout.create_artifacts(artifacts)
        self.assertEqual('timeout with stderr',
                         host.filesystem.read_text_file('/dir/foo-stderr.txt'))

        pass_with_stderr.create_artifacts(artifacts)
        self.assertEqual('timeout with stderr',
                         host.filesystem.read_text_file('/dir/foo-stderr.txt'))
Example #8
0
 def test_results_multiple(self):
     driver_output = DriverOutput(None, None, None, None)
     failure_crash = [
         test_failures.FailureCrash(driver_output, None),
         test_failures.TestFailure(driver_output, None)
     ]
     failure_timeout = [
         test_failures.FailureTimeout(driver_output, None),
         test_failures.TestFailure(driver_output, None)
     ]
     failure_early_exit = [
         test_failures.FailureEarlyExit(driver_output, None),
         test_failures.TestFailure(driver_output, None)
     ]
     # Should not raise an exception for CRASH and FAIL.
     TestResult('foo', failures=failure_crash)
     # Should not raise an exception for TIMEOUT and FAIL.
     TestResult('foo', failures=failure_timeout)
     with self.assertRaises(AssertionError):
         TestResult('foo', failures=failure_early_exit)
Example #9
0
    def run_test(self, driver_input):
        if not self.started:
            self.started = True
            self.pid = TestDriver.next_pid
            TestDriver.next_pid += 1

        start_time = time.time()
        test_name = driver_input.test_name
        test_args = driver_input.args or []
        test = self._port._tests[test_name]
        if test.keyboard:
            raise KeyboardInterrupt
        if test.exception:
            raise ValueError('exception from ' + test_name)
        if test.device_failure:
            raise DeviceFailure('device failure in ' + test_name)

        audio = None
        actual_text = test.actual_text
        crash = test.crash
        web_process_crash = test.web_process_crash
        leak = test.leak

        if 'flaky/text.html' in test_name and not test_name in self._port._flakes:
            self._port._flakes.add(test_name)
            actual_text = 'flaky text failure'

        if 'crash_then_text.html' in test_name:
            if test_name in self._port._flakes:
                actual_text = 'text failure'
            else:
                self._port._flakes.add(test_name)
                crashed_process_name = self._port.driver_name()
                crashed_pid = 1
                crash = True

        if 'text_then_crash.html' in test_name:
            if test_name in self._port._flakes:
                crashed_process_name = self._port.driver_name()
                crashed_pid = 1
                crash = True
            else:
                self._port._flakes.add(test_name)
                actual_text = 'text failure'

        if actual_text and test_args and test_name == 'passes/args.html':
            actual_text = actual_text + ' ' + ' '.join(test_args)

        if test.actual_audio:
            audio = base64.b64decode(test.actual_audio)
        crashed_process_name = None
        crashed_pid = None

        leak_log = ''
        if leak:
            leak_log = 'leak detected'

        crash_log = ''
        if crash:
            crashed_process_name = self._port.driver_name()
            crashed_pid = 1
            crash_log = 'crash log'
        elif web_process_crash:
            crashed_process_name = 'WebProcess'
            crashed_pid = 2
            crash_log = 'web process crash log'

        if crashed_process_name:
            crash_logs = CrashLogs(self._port.host)
            crash_log = crash_logs.find_newest_log(crashed_process_name,
                                                   None) or crash_log

        if 'crash-reftest.html' in test_name:
            crashed_process_name = self._port.driver_name()
            crashed_pid = 3
            crash = True
            crash_log = 'reftest crash log'
        if test.actual_checksum == driver_input.image_hash:
            image = None
        else:
            image = test.actual_image
        return DriverOutput(actual_text,
                            image,
                            test.actual_checksum,
                            audio,
                            crash=(crash or web_process_crash),
                            crashed_process_name=crashed_process_name,
                            crashed_pid=crashed_pid,
                            crash_log=crash_log,
                            test_time=time.time() - start_time,
                            timeout=test.timeout,
                            error=test.error,
                            pid=self.pid,
                            leak=test.leak,
                            leak_log=leak_log)
 def _expected_driver_output(self):
     return DriverOutput(self._port.expected_text(self._test_name),
                         self._port.expected_image(self._test_name),
                         self._port.expected_checksum(self._test_name),
                         self._port.expected_audio(self._test_name))
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input())
        total_test_time = test_output.test_time

        expected_text = self._port.expected_text(self._test_name)
        expected_text_output = DriverOutput(text=expected_text,
                                            image=None,
                                            image_hash=None,
                                            audio=None)
        # This _compare_output compares text if expected text exists, ignores
        # image, checks for extra baselines, and generates crash or timeout
        # failures if needed.
        compare_text_failures = self._compare_output(expected_text_output,
                                                     test_output)
        # If the test crashed, or timed out,  or a leak was detected, there's no point
        # in running the reference at all. This can save a lot of execution time if we
        # have a lot of crashes or timeouts.
        if test_output.crash or test_output.timeout or test_output.leak:
            return build_test_result(test_output,
                                     self._test_name,
                                     retry_attempt=self._retry_attempt,
                                     failures=compare_text_failures,
                                     test_run_time=test_output.test_time,
                                     pid=test_output.pid,
                                     crash_site=test_output.crash_site)

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the
        # mismatches first, then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy
        # to do.
        expected_output = None
        reference_test_names = []
        reftest_failures = []
        args = self._port.args_for_test(self._test_name)
        # sort self._reference_files to put mismatch tests first
        for expectation, reference_filename in sorted(self._reference_files):
            reference_test_name = self._port.relative_test_filename(
                reference_filename)
            reference_test_names.append(reference_test_name)
            driver_input = DriverInput(reference_test_name,
                                       self._timeout_ms,
                                       image_hash=test_output.image_hash,
                                       args=args)
            expected_output = self._driver.run_test(driver_input)
            total_test_time += expected_output.test_time
            reftest_failures = self._compare_output_with_reference(
                expected_output, test_output, reference_filename,
                expectation == '!=')

            if ((expectation == '!=' and reftest_failures)
                    or (expectation == '==' and not reftest_failures)):
                break

        assert expected_output

        # Combine compare_text_result and test_result
        expected_output.text = expected_text_output.text
        failures = reftest_failures + compare_text_failures

        # FIXME: We don't really deal with a mix of reftest types properly. We
        # pass in a set() to reftest_type and only really handle the first of
        # the references in the result.
        reftest_type = list(
            set([
                reference_file[0] for reference_file in self._reference_files
            ]))

        return build_test_result(test_output,
                                 self._test_name,
                                 retry_attempt=self._retry_attempt,
                                 failures=failures,
                                 test_run_time=total_test_time,
                                 reftest_type=reftest_type,
                                 pid=test_output.pid,
                                 crash_site=test_output.crash_site,
                                 references=reference_test_names)
 def setUp(self):
     host = MockHost()
     self.port = host.port_factory.get(port_name='test')
     self._actual_output = DriverOutput(None, None, None, None)
     self._expected_output = DriverOutput(None, None, None, None)
Example #13
0
 def test_results_has_repaint_overlay(self):
     driver_output = DriverOutput('"invalidations": [', None, None, None)
     failures = [test_failures.FailureTextMismatch(driver_output, None)]
     result = TestResult('foo', failures=failures)
     self.assertTrue(result.has_repaint_overlay)
Example #14
0
 def test_results_has_stderr(self):
     driver_output = DriverOutput(None, None, None, None, error='error')
     failures = [test_failures.FailureCrash(driver_output, None)]
     result = TestResult('foo', failures=failures)
     self.assertTrue(result.has_stderr)
    def _run_reftest(self):
        test_output = self._driver.run_test(self._driver_input(),
                                            self._stop_when_done)
        total_test_time = test_output.test_time
        expected_output = None
        test_result = None

        expected_text = self._port.expected_text(self._test_name)
        expected_text_output = DriverOutput(text=expected_text,
                                            image=None,
                                            image_hash=None,
                                            audio=None)

        # If the test crashed, or timed out, there's no point in running the reference at all.
        # This can save a lot of execution time if we have a lot of crashes or timeouts.
        if test_output.crash or test_output.timeout:
            test_result = self._compare_output(expected_text_output,
                                               test_output)

            if test_output.crash:
                test_result_writer.write_test_result(
                    self._filesystem, self._port, self._results_directory,
                    self._test_name, test_output, expected_text_output,
                    test_result.failures)
            return test_result

        # A reftest can have multiple match references and multiple mismatch references;
        # the test fails if any mismatch matches and all of the matches don't match.
        # To minimize the number of references we have to check, we run all of the mismatches first,
        # then the matches, and short-circuit out as soon as we can.
        # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.

        putAllMismatchBeforeMatch = sorted
        reference_test_names = []
        for expectation, reference_filename in putAllMismatchBeforeMatch(
                self._reference_files):
            if self._port.lookup_virtual_test_base(self._test_name):
                args = self._port.lookup_virtual_reference_args(
                    self._test_name)
            else:
                args = self._port.lookup_physical_reference_args(
                    self._test_name)
            reference_test_name = self._port.relative_test_filename(
                reference_filename)
            reference_test_names.append(reference_test_name)
            driver_input = DriverInput(reference_test_name,
                                       self._timeout_ms,
                                       image_hash=test_output.image_hash,
                                       should_run_pixel_test=True,
                                       args=args)
            expected_output = self._reference_driver.run_test(
                driver_input, self._stop_when_done)
            total_test_time += expected_output.test_time
            test_result = self._compare_output_with_reference(
                expected_output, test_output, reference_filename,
                expectation == '!=')

            if (expectation == '!=' and test_result.failures) or (
                    expectation == '==' and not test_result.failures):
                break

        assert expected_output

        if expected_text:
            text_output = DriverOutput(text=test_output.text,
                                       image=None,
                                       image_hash=None,
                                       audio=None)
            text_compare_result = self._compare_output(expected_text_output,
                                                       text_output)
            test_result.failures.extend(text_compare_result.failures)
            test_result.has_repaint_overlay = text_compare_result.has_repaint_overlay
            expected_output.text = expected_text_output.text

        test_result_writer.write_test_result(self._filesystem, self._port,
                                             self._results_directory,
                                             self._test_name, test_output,
                                             expected_output,
                                             test_result.failures)

        # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
        # and only really handle the first of the references in the result.
        reftest_type = list(
            set([
                reference_file[0] for reference_file in self._reference_files
            ]))
        return TestResult(self._test_name,
                          test_result.failures,
                          total_test_time,
                          test_result.has_stderr,
                          reftest_type=reftest_type,
                          pid=test_result.pid,
                          crash_site=test_result.crash_site,
                          references=reference_test_names,
                          has_repaint_overlay=test_result.has_repaint_overlay)