예제 #1
0
def test_expected_subtest_failure(capfd):
    # Tests that an expected subtest failure does not cause the test to fail

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    formatter = ChromiumFormatter()
    logger.add_handler(handlers.StreamHandler(output, formatter))

    # Run a test with some expected subtest failures.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1", status="FAIL", expected="FAIL", subtest="t1_a",
                       message="t1_a_message")
    logger.test_status("t1", status="PASS", subtest="t1_b",
                       message="t1_b_message")
    logger.test_status("t1", status="TIMEOUT", expected="TIMEOUT", subtest="t1_c",
                       message="t1_c_message")

    # The subtest failures are all expected so this test should not be added to
    # the set of tests with subtest failures.
    assert "t1" not in formatter.tests_with_subtest_fails

    # The test status is reported as a pass here because the harness was able to
    # run the test to completion.
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    t1_log = test_obj["artifacts"]["log"]
    assert t1_log == "[FAIL] t1_a: t1_a_message\n" \
                     "[PASS] t1_b: t1_b_message\n" \
                     "[TIMEOUT] t1_c: t1_c_message\n"
    # The status of the test in the output is a pass because the subtest
    # failures were all expected.
    assert test_obj["actual"] == "PASS"
예제 #2
0
    def create_log(self, *args, **kwargs):
        logger = structuredlog.StructuredLogger("expected_test")
        data = StringIO.StringIO()
        handler = handlers.StreamHandler(data, formatters.JSONFormatter())
        logger.add_handler(handler)

        log_entries = ([("suite_start", {
            "tests": [],
            "run_info": kwargs.get("run_info", {})
        })] + list(args) + [("suite_end", {})])

        for item in log_entries:
            action, kwargs = item
            getattr(logger, action)(**kwargs)
        logger.remove_handler(handler)
        data.seek(0)
        return data
예제 #3
0
def test_reftest_screenshots(capfd):
    # reftest_screenshots, if present, should be plumbed into artifacts.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run a reftest with reftest_screenshots.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1",
                    status="FAIL",
                    expected="PASS",
                    extra={
                        "reftest_screenshots": [
                            {
                                "url": "foo.html",
                                "hash": "HASH1",
                                "screenshot": "DATA1"
                            },
                            "!=",
                            {
                                "url": "foo-ref.html",
                                "hash": "HASH2",
                                "screenshot": "DATA2"
                            },
                        ]
                    })
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    assert test_obj["artifacts"]["screenshots"] == [
        "foo.html: DATA1",
        "foo-ref.html: DATA2",
    ]
예제 #4
0
def test_unexpected_subtest_pass(capfd):
    # A subtest that unexpectedly passes is considered a failure condition.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    formatter = ChromiumFormatter()
    logger.add_handler(handlers.StreamHandler(output, formatter))

    # Run a test with a subtest that is expected to fail but passes.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1", status="PASS", expected="FAIL", subtest="t1_a",
                       message="t1_a_message")

    # Since the subtest behaviour is unexpected, it's considered a failure, so
    # the test should be added to the set of tests with subtest failures.
    assert "t1" in formatter.tests_with_subtest_fails

    # The test status is reported as a pass here because the harness was able to
    # run the test to completion.
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    t1_artifacts = test_obj["artifacts"]
    assert t1_artifacts["log"] == "[PASS expected FAIL] t1_a: t1_a_message\n"
    assert t1_artifacts["wpt_subtest_failure"] == "true"
    # Since the subtest status is unexpected, we fail the test. But we report
    # wpt_actual_status as an artifact
    assert t1_artifacts["wpt_actual_status"] == "PASS"
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "PASS"
    # Also ensure that the formatter cleaned up its internal state
    assert "t1" not in formatter.tests_with_subtest_fails
예제 #5
0
def test_subtest_messages(capfd):
    # Tests accumulation of test output

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run two tests with subtest messages. The subtest name should be included
    # in the output. We should also tolerate missing messages.
    logger.suite_start(["t1", "t2"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1", status="FAIL", subtest="t1_a",
                       message="t1_a_message")
    logger.test_status("t1", status="PASS", subtest="t1_b",
                       message="t1_b_message")
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.test_start("t2")
    # Currently, subtests with empty messages will be ignored
    logger.test_status("t2", status="PASS", subtest="t2_a")
    # A test-level message will also be appended
    logger.test_end("t2", status="TIMEOUT", expected="PASS",
                    message="t2_message")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    t1_log = output_json["tests"]["t1"]["artifacts"]["log"]
    assert t1_log == "subtest_failure\n" \
                     "[FAIL expected PASS] t1_a: t1_a_message\n" \
                     "[PASS] t1_b: t1_b_message\n"

    t2_log = output_json["tests"]["t2"]["artifacts"]["log"]
    assert t2_log == "[TIMEOUT expected PASS] t2_message\n"
예제 #6
0
def test_update_pickle():
    logger = structuredlog.StructuredLogger("expected_test")
    args = {
        "test_paths": {
            "/": {
                "tests_path":
                os.path.abspath(
                    os.path.join(here, os.pardir, os.pardir, os.pardir,
                                 os.pardir))
            },
        },
        "abort": False,
        "continue": False,
        "sync": False,
    }
    args2 = args.copy()
    args2["abort"] = True
    wptupdate = WPTUpdate(logger, **args2)
    wptupdate = WPTUpdate(logger, runner_cls=UpdateRunner, **args)
    wptupdate.run()
예제 #7
0
def test_chromium_test_name_trie(capfd):
    # Ensure test names are broken into directories and stored in a trie with
    # test results at the leaves.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(
        handlers.StreamHandler(output, chromium.ChromiumFormatter()))

    # output a bunch of stuff
    logger.suite_start(["/foo/bar/test-id-1", "/foo/test-id-2"],
                       run_info={},
                       time=123)
    logger.test_start("/foo/bar/test-id-1")
    logger.test_end("/foo/bar/test-id-1", status="TIMEOUT", expected="FAIL")
    logger.test_start("/foo/test-id-2")
    logger.test_end("/foo/test-id-2", status="ERROR", expected="TIMEOUT")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)

    # Ensure that the test names are broken up by directory name and that the
    # results are stored at the leaves.
    test_obj = output_obj["tests"]["foo"]["bar"]["test-id-1"]
    assert test_obj["actual"] == "TIMEOUT"
    assert test_obj["expected"] == "FAIL"

    test_obj = output_obj["tests"]["foo"]["test-id-2"]
    # The ERROR status is mapped to FAIL for Chromium
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "TIMEOUT"
예제 #8
0
파일: testrunner.py 프로젝트: svgeesus/wpt
    def __init__(self,
                 suite_name,
                 size,
                 test_source_cls,
                 test_source_kwargs,
                 browser_cls,
                 browser_kwargs,
                 executor_cls,
                 executor_kwargs,
                 rerun=1,
                 pause_after_test=False,
                 pause_on_unexpected=False,
                 restart_on_unexpected=True,
                 debug_info=None,
                 capture_stdio=True,
                 restart_on_new_group=True,
                 recording=None):
        self.suite_name = suite_name
        self.size = size
        self.test_source_cls = test_source_cls
        self.test_source_kwargs = test_source_kwargs
        self.browser_cls = browser_cls
        self.browser_kwargs = browser_kwargs
        self.executor_cls = executor_cls
        self.executor_kwargs = executor_kwargs
        self.pause_after_test = pause_after_test
        self.pause_on_unexpected = pause_on_unexpected
        self.restart_on_unexpected = restart_on_unexpected
        self.debug_info = debug_info
        self.rerun = rerun
        self.capture_stdio = capture_stdio
        self.restart_on_new_group = restart_on_new_group
        self.recording = recording
        assert recording is not None

        self.pool = set()
        # Event that is polled by threads so that they can gracefully exit in the face
        # of sigint
        self.stop_flag = threading.Event()
        self.logger = structuredlog.StructuredLogger(suite_name)
예제 #9
0
def test_wptreport_run_info_optional(capfd):
    """per the mozlog docs, run_info is optional; check we work without it"""
    # setup the logger
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))

    # output a bunch of stuff
    logger.suite_start(["test-id-1"])  # no run_info arg!
    logger.test_start("test-id-1")
    logger.test_end("test-id-1", "PASS")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)
    assert "run_info" not in output_obj or output_obj["run_info"] == {}
예제 #10
0
def test_flaky_test_unexpected(capfd):
    # Check that a flaky test with multiple possible statuses is seen as
    # unexpected if its actual status is NOT one of the possible ones.

    # set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run a test that is known to be flaky
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1",
                    status="ERROR",
                    expected="OK",
                    known_intermittent=["TIMEOUT"])
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    # The test's statuses are all mapped, changing ERROR->FAIL and OK->PASS
    assert test_obj["actual"] == "FAIL"
    # All the possible statuses are merged and sorted together into expected.
    assert test_obj["expected"] == "PASS TIMEOUT"
    # ...this is a regression and unexpected because the actual status is not
    # one of the expected ones
    assert test_obj["is_regression"] is True
    assert test_obj["is_unexpected"] is True
예제 #11
0
    def test_filter_component(self):
        component_logger = structuredlog.StructuredLogger(
            self.logger.name, "test_component")
        component_logger.component_filter = handlers.LogLevelFilter(
            lambda x: x, "info")

        self.logger.debug("Test")
        self.assertFalse(self.handler.empty)
        self.assert_log_equals({
            "action": "log",
            "level": "DEBUG",
            "message": "Test"
        })
        self.assertTrue(self.handler.empty)

        component_logger.info("Test 1")
        self.assertFalse(self.handler.empty)
        self.assert_log_equals({
            "action": "log",
            "level": "INFO",
            "message": "Test 1",
            "component": "test_component"
        })

        component_logger.debug("Test 2")
        self.assertTrue(self.handler.empty)

        component_logger.component_filter = None

        component_logger.debug("Test 3")
        self.assertFalse(self.handler.empty)
        self.assert_log_equals({
            "action": "log",
            "level": "DEBUG",
            "message": "Test 3",
            "component": "test_component"
        })
예제 #12
0
def test_wptreport_lone_surrogate_ucs2(capfd):
    # Since UCS4 is a superset of UCS2 we can meaningfully test the UCS2 code on a
    # UCS4 build, but not the reverse. However UCS2 is harder to handle and UCS4 is
    # the commonest (and sensible) configuration, so that's OK.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))

    with mock.patch.object(wptreport,
                           'surrogate_replacement',
                           wptreport.SurrogateReplacementUcs2()):
        # output a bunch of stuff
        logger.suite_start(["test-id-1"])  # no run_info arg!
        logger.test_start("test-id-1")
        logger.test_status("test-id-1",
                           subtest=u"Name with surrogate\uD800",
                           status="FAIL",
                           message=u"\U0001F601 \uDE0A\uD83D \uD83D\uDE0A")
        logger.test_end("test-id-1",
                        status="PASS",
                        message=u"\uDE0A\uD83D \uD83D\uDE0A \U0001F601")
        logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)
    test = output_obj["results"][0]
    assert test["message"] == u"U+de0aU+d83d \U0001f60a \U0001F601"
    subtest = test["subtests"][0]
    assert subtest["name"] == u"Name with surrogateU+d800"
    assert subtest["message"] == u"\U0001F601 U+de0aU+d83d \U0001f60a"
예제 #13
0
 def setUp(self):
     self.logger = structuredlog.StructuredLogger("test")
     self.handler = TestHandler()
     self.logger.add_handler(self.handler)
예제 #14
0
 def setUp(self):
     self.logger = structuredlog.StructuredLogger("testBuffer")
     self.handler = handlers.BufferHandler(TestHandler(), message_limit=4)
     self.logger.add_handler(self.handler)
예제 #15
0
from collections import defaultdict, namedtuple

from mozlog import structuredlog
from six import ensure_str, ensure_text, iteritems, iterkeys, itervalues, text_type
from six.moves import intern, range

from . import manifestupdate
from . import testloader
from . import wptmanifest
from . import wpttest
from .expected import expected_path
from .vcs import git
manifest = None  # Module that will be imported relative to test_root
manifestitem = None

logger = structuredlog.StructuredLogger("web-platform-tests")

try:
    import ujson as json
except ImportError:
    import json


class RunInfo(object):
    """A wrapper around RunInfo dicts so that they can be hashed by identity"""
    def __init__(self, dict_value):
        self.data = dict_value
        self.canonical_repr = tuple(
            tuple(item) for item in sorted(dict_value.items()))

    def __getitem__(self, key):
예제 #16
0
    def run(self):
        """Main loop for the TestRunnerManager.

        TestRunnerManagers generally receive commands from their
        TestRunner updating them on the status of a test. They
        may also have a stop flag set by the main thread indicating
        that the manager should shut down the next time the event loop
        spins."""
        self.logger = structuredlog.StructuredLogger(self.suite_name)
        with self.browser_cls(self.logger, **self.browser_kwargs) as browser:
            self.browser = BrowserManager(self.logger,
                                          browser,
                                          self.command_queue,
                                          no_timeout=self.debug_info
                                          is not None)
            dispatch = {
                RunnerManagerState.before_init: self.start_init,
                RunnerManagerState.initializing: self.init,
                RunnerManagerState.running: self.run_test,
                RunnerManagerState.restarting: self.restart_runner
            }

            self.state = RunnerManagerState.before_init()
            end_states = (RunnerManagerState.stop, RunnerManagerState.error)

            try:
                while not isinstance(self.state, end_states):
                    f = dispatch.get(self.state.__class__)
                    while f:
                        self.logger.debug("Dispatch %s" % f.__name__)
                        if self.should_stop():
                            return
                        new_state = f()
                        if new_state is None:
                            break
                        self.state = new_state
                        self.logger.debug("new state: %s" %
                                          self.state.__class__.__name__)
                        if isinstance(self.state, end_states):
                            return
                        f = dispatch.get(self.state.__class__)

                    new_state = None
                    while new_state is None:
                        new_state = self.wait_event()
                        if self.should_stop():
                            return
                    self.state = new_state
                    self.logger.debug("new state: %s" %
                                      self.state.__class__.__name__)
            except Exception as e:
                self.logger.error(traceback.format_exc(e))
                raise
            finally:
                self.logger.debug(
                    "TestRunnerManager main loop terminating, starting cleanup"
                )
                clean = isinstance(self.state, RunnerManagerState.stop)
                self.stop_runner(force=not clean)
                self.teardown()
        self.logger.debug("TestRunnerManager main loop terminated")
예제 #17
0
def test_subtest_failure(capfd):
    # Tests that a test fails if a subtest fails

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    formatter = ChromiumFormatter()
    logger.add_handler(handlers.StreamHandler(output, formatter))

    # Run a test with some subtest failures.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1",
                       status="FAIL",
                       subtest="t1_a",
                       message="t1_a_message")
    logger.test_status("t1",
                       status="PASS",
                       subtest="t1_b",
                       message="t1_b_message")
    logger.test_status("t1",
                       status="TIMEOUT",
                       subtest="t1_c",
                       message="t1_c_message")

    # Make sure the test name was added to the set of tests with subtest fails
    assert "t1" in formatter.tests_with_subtest_fails

    # The test status is reported as a pass here because the harness was able to
    # run the test to completion.
    logger.test_end("t1",
                    status="PASS",
                    expected="PASS",
                    message="top_message")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    t1_artifacts = test_obj["artifacts"]
    assert t1_artifacts["wpt_actual_metadata"] == [
        "[t1]\n  expected: PASS\n",
        "  [t1_a]\n    expected: FAIL\n",
        "  [t1_b]\n    expected: PASS\n",
        "  [t1_c]\n    expected: TIMEOUT\n",
    ]
    assert t1_artifacts["wpt_log"] == [
        "Harness: top_message\n",
        "t1_a: t1_a_message\n",
        "t1_b: t1_b_message\n",
        "t1_c: t1_c_message\n",
    ]
    assert t1_artifacts["wpt_subtest_failure"] == ["true"]
    # The status of the test in the output is a failure because subtests failed,
    # despite the harness reporting that the test passed. But the harness status
    # is logged as an artifact.
    assert t1_artifacts["wpt_actual_status"] == ["PASS"]
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "PASS"
    # Also ensure that the formatter cleaned up its internal state
    assert "t1" not in formatter.tests_with_subtest_fails
예제 #18
0
    def run(self):
        """Main loop for the TestManager.

        TestManagers generally receive commands from their
        TestRunner updating them on the status of a test. They
        may also have a stop flag set by the main thread indicating
        that the manager should shut down the next time the event loop
        spins."""
        self.logger = structuredlog.StructuredLogger(self.suite_name)
        with self.browser_cls(
                self.logger,
                **self.browser_kwargs) as browser, self.test_source_cls(
                    self.test_queue) as test_source:
            self.browser = browser
            self.test_source = test_source
            try:
                if self.init() is Stop:
                    return
                while True:
                    commands = {
                        "init_succeeded": self.init_succeeded,
                        "init_failed": self.init_failed,
                        "test_start": self.test_start,
                        "test_ended": self.test_ended,
                        "after_test_ended": self.after_test_ended,
                        "restart_runner": self.restart_runner,
                        "runner_teardown": self.runner_teardown,
                        "log": self.log,
                        "error": self.error
                    }
                    try:
                        command, data = self.command_queue.get(True, 1)
                    except IOError:
                        if not self.should_stop():
                            self.logger.error("Got IOError from poll")
                            self.restart_count += 1
                            if self.restart_runner() is Stop:
                                break
                    except Empty:
                        command = None

                    if self.should_stop():
                        self.logger.debug("A flag was set; stopping")
                        break

                    if command is not None:
                        self.restart_count = 0
                        if commands[command](*data) is Stop:
                            break
                    else:
                        if (self.debug_info and self.debug_info.interactive
                                and self.browser_started
                                and not browser.is_alive()):
                            self.logger.debug("Debugger exited")
                            break
                        if not self.test_runner_proc.is_alive():
                            if not self.command_queue.empty():
                                # We got a new message so process that
                                continue

                            # If we got to here the runner presumably shut down
                            # unexpectedly
                            self.logger.info("Test runner process shut down")

                            if self.test is not None:
                                # This could happen if the test runner crashed for some other
                                # reason
                                # Need to consider the unlikely case where one test causes the
                                # runner process to repeatedly die
                                self.logger.critical(
                                    "Last test did not complete")
                                break
                            self.logger.warning(
                                "More tests found, but runner process died, restarting"
                            )
                            self.restart_count += 1
                            if self.restart_runner() is Stop:
                                break
            finally:
                self.logger.debug(
                    "TestRunnerManager main loop terminating, starting cleanup"
                )
                self.stop_runner()
                self.teardown()
                self.logger.debug("TestRunnerManager main loop terminated")