コード例 #1
0
ファイル: test_chromium.py プロジェクト: zachlewis2020/wpt
def test_subtest_failure(capfd):
    # Tests that a test fails if a subtest fails

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    formatter = ChromiumFormatter()
    logger.add_handler(handlers.StreamHandler(output, formatter))

    # Run a test with some subtest failures.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1",
                       status="FAIL",
                       subtest="t1_a",
                       message="t1_a_message")
    logger.test_status("t1",
                       status="PASS",
                       subtest="t1_b",
                       message="t1_b_message")
    logger.test_status("t1",
                       status="TIMEOUT",
                       subtest="t1_c",
                       message="t1_c_message")

    # Make sure the test name was added to the set of tests with subtest fails
    assert "t1" in formatter.tests_with_subtest_fails

    # The test status is reported as a pass here because the harness was able to
    # run the test to completion.
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    t1_artifacts = test_obj["artifacts"]
    assert t1_artifacts["log"] == [
        "[t1_a] [FAIL expected PASS] t1_a_message",
        "[t1_b] [PASS expected PASS] t1_b_message",
        "[t1_c] [TIMEOUT expected PASS] t1_c_message",
        "[] [FAIL expected PASS] ",
    ]
    assert t1_artifacts["wpt_subtest_failure"] == ["true"]
    # The status of the test in the output is a failure because subtests failed,
    # despite the harness reporting that the test passed. But the harness status
    # is logged as an artifact.
    assert t1_artifacts["wpt_actual_status"] == ["PASS"]
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "PASS"
    # Also ensure that the formatter cleaned up its internal state
    assert "t1" not in formatter.tests_with_subtest_fails
コード例 #2
0
def test_unexpected_test_fail(capfd):
    # Check that an unexpected test-level failure is marked as unexpected and
    # as a regression.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run some tests with different statuses: 3 passes, 1 timeout
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1", status="ERROR", expected="OK")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    # The test's actual and expected status should be mapped, ERROR->FAIL and
    # OK->PASS
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "PASS"
    # ..and this test should be a regression and unexpected
    assert test_obj["is_regression"] is True
    assert test_obj["is_unexpected"] is True
コード例 #3
0
def test_precondition_failed(capfd):
    # Check that a failed precondition gets properly handled.

    # set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run a test with a precondition failure
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1", status="PRECONDITION_FAILED", expected="OK")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    # The precondition failure should map to FAIL status, but we should also
    # have an artifact containing the original PRECONDITION_FAILED status.
    assert test_obj["actual"] == "FAIL"
    assert test_obj["artifacts"]["wpt_actual_status"] == [
        "PRECONDITION_FAILED"
    ]
    # ...this is an unexpected regression because we expected a pass but failed
    assert test_obj["is_regression"] is True
    assert test_obj["is_unexpected"] is True
コード例 #4
0
def test_num_failures_by_type(capfd):
    # Test that the number of failures by status type is correctly calculated.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run some tests with different statuses: 3 passes, 1 timeout
    logger.suite_start(["t1", "t2", "t3", "t4"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.test_start("t2")
    logger.test_end("t2", status="PASS", expected="PASS")
    logger.test_start("t3")
    logger.test_end("t3", status="PASS", expected="FAIL")
    logger.test_start("t4")
    logger.test_end("t4", status="TIMEOUT", expected="CRASH")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    num_failures_by_type = json.load(output)["num_failures_by_type"]

    # We expect 3 passes and 1 timeout, nothing else.
    assert sorted(num_failures_by_type.keys()) == ["PASS", "TIMEOUT"]
    assert num_failures_by_type["PASS"] == 3
    assert num_failures_by_type["TIMEOUT"] == 1
コード例 #5
0
def test_chromium_required_fields(capfd):
    # Test that the test results contain a handful of required fields.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # output a bunch of stuff
    logger.suite_start(["test-id-1"], run_info={}, time=123)
    logger.test_start("test-id-1")
    logger.test_end("test-id-1", status="PASS", expected="PASS")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)

    # Check for existence of required fields
    assert "interrupted" in output_obj
    assert "path_delimiter" in output_obj
    assert "version" in output_obj
    assert "num_failures_by_type" in output_obj
    assert "tests" in output_obj

    test_obj = output_obj["tests"]["test-id-1"]
    assert "actual" in test_obj
    assert "expected" in test_obj
コード例 #6
0
ファイル: test_chromium.py プロジェクト: clopez/wpt
def test_known_intermittent_duplicate(logger, capfd):
    # We don't want to have duplicate statuses in the final "expected" field.

    # Set up the handler.
    output = StringIO()
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # There are two duplications in this input:
    # 1. known_intermittent already contains expected;
    # 2. both statuses in known_intermittent map to FAIL in Chromium.
    # In the end, we should only get one FAIL in Chromium "expected".
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1",
                    status="ERROR",
                    expected="ERROR",
                    known_intermittent=["FAIL", "ERROR"])
    logger.suite_end()
    logger.shutdown()

    # Check nothing got output to stdout/stderr.
    # (Note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # Check the actual output of the formatter.
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    assert test_obj["actual"] == "FAIL"
    # No duplicate "FAIL" in "expected".
    assert test_obj["expected"] == "FAIL"
コード例 #7
0
def test_flaky_test_unexpected(capfd):
    # Check that a flaky test with multiple possible statuses is seen as
    # unexpected if its actual status is NOT one of the possible ones.

    # set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run a test that is known to be flaky
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1", status="ERROR", expected="OK", known_intermittent=["TIMEOUT"])
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    # The test's statuses are all mapped, changing ERROR->FAIL and OK->PASS
    assert test_obj["actual"] == "FAIL"
    # All the possible statuses are concatenated together into expected.
    assert test_obj["expected"] == "PASS TIMEOUT"
    # ...this is a regression and unexpected because the actual status is not
    # one of the expected ones
    assert test_obj["is_regression"] is True
    assert test_obj["is_unexpected"] is True
コード例 #8
0
ファイル: test_chromium.py プロジェクト: clopez/wpt
def test_known_intermittent_empty(logger, capfd):
    # If the known_intermittent list is empty, we want to ensure we don't append
    # any extraneous characters to the output.

    # set up the handler.
    output = StringIO()
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run a test and include an empty known_intermittent list
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1", status="OK", expected="OK", known_intermittent=[])
    logger.suite_end()
    logger.shutdown()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    # Both actual and expected statuses get mapped to Pass. No extra whitespace
    # anywhere.
    assert test_obj["actual"] == "PASS"
    assert test_obj["expected"] == "PASS"
コード例 #9
0
ファイル: test_chromium.py プロジェクト: clopez/wpt
def test_expected_test_fail(logger, capfd):
    # Check that an expected test-level failure is treated as a Pass

    # Set up the handler.
    output = StringIO()
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run some tests with different statuses: 3 passes, 1 timeout
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1", status="ERROR", expected="ERROR")
    logger.suite_end()
    logger.shutdown()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    # The test's actual and expected status should map from "ERROR" to "FAIL"
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "FAIL"
    # ..and this test should not be a regression nor unexpected
    assert "is_regression" not in test_obj
    assert "is_unexpected" not in test_obj
コード例 #10
0
ファイル: test_chromium.py プロジェクト: graycatya/qtwebkit
def test_expected_subtest_failure(capfd):
    # Tests that an expected subtest failure does not cause the test to fail

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    formatter = ChromiumFormatter()
    logger.add_handler(handlers.StreamHandler(output, formatter))

    # Run a test with some expected subtest failures.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1",
                       status="FAIL",
                       expected="FAIL",
                       subtest="t1_a",
                       message="t1_a_message")
    logger.test_status("t1",
                       status="PASS",
                       subtest="t1_b",
                       message="t1_b_message")
    logger.test_status("t1",
                       status="TIMEOUT",
                       expected="TIMEOUT",
                       subtest="t1_c",
                       message="t1_c_message")

    # The subtest failures are all expected so this test should not be added to
    # the set of tests with subtest failures.
    assert "t1" not in formatter.tests_with_subtest_fails

    # The test status is reported as a pass here because the harness was able to
    # run the test to completion.
    logger.test_end("t1", status="OK", expected="OK")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    t1_log = test_obj["artifacts"]["log"]
    print("Lpz t1log=%s" % t1_log)
    assert t1_log == [
        "[t1]\n  expected: OK\n",
        "  [t1_a]\n    expected: FAIL\n    message: t1_a_message\n",
        "  [t1_b]\n    expected: PASS\n    message: t1_b_message\n",
        "  [t1_c]\n    expected: TIMEOUT\n    message: t1_c_message\n",
    ]
    # The status of the test in the output is a pass because the subtest
    # failures were all expected.
    assert test_obj["actual"] == "PASS"
    assert test_obj["expected"] == "PASS"
コード例 #11
0
def test_subtest_messages(capfd):
    # Tests accumulation of test output

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run two tests with subtest messages. The subtest name should be included
    # in the output. We should also tolerate missing messages.
    logger.suite_start(["t1", "t2"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1",
                       status="FAIL",
                       subtest="t1_a",
                       message="t1_a_message")
    logger.test_status("t1",
                       status="PASS",
                       subtest="t1_b",
                       message="t1_b_message")
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.test_start("t2")
    # Subtests with empty messages should not be ignored.
    logger.test_status("t2", status="PASS", subtest="t2_a")
    # A test-level message will also be appended
    logger.test_end("t2",
                    status="TIMEOUT",
                    expected="PASS",
                    message="t2_message")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    t1_artifacts = output_json["tests"]["t1"]["artifacts"]
    assert t1_artifacts["wpt_actual_metadata"] == [
        "[t1]\n  expected: PASS\n",
        "  [t1_a]\n    expected: FAIL\n",
        "  [t1_b]\n    expected: PASS\n",
    ]
    assert t1_artifacts["wpt_log"] == [
        "t1_a: t1_a_message\n",
        "t1_b: t1_b_message\n",
    ]
    assert t1_artifacts["wpt_subtest_failure"] == ["true"]
    t2_artifacts = output_json["tests"]["t2"]["artifacts"]
    assert t2_artifacts["wpt_actual_metadata"] == [
        "[t2]\n  expected: TIMEOUT\n",
        "  [t2_a]\n    expected: PASS\n",
    ]
    assert t2_artifacts["wpt_log"] == ["Harness: t2_message\n"]
    assert "wpt_subtest_failure" not in t2_artifacts.keys()
コード例 #12
0
ファイル: test_structured.py プロジェクト: joegana/gecko-dev
 def setUp(self):
     self.position = 0
     self.logger = structuredlog.StructuredLogger("test_%s" %
                                                  type(self).__name__)
     self.output_file = StringIO()
     self.handler = handlers.StreamHandler(self.output_file,
                                           self.get_formatter())
     self.logger.add_handler(self.handler)
コード例 #13
0
def test_process_output_crashing_test(capfd):
    """Test that chromedriver logs are preserved for crashing tests"""

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    logger.suite_start(["t1", "t2", "t3"], run_info={}, time=123)

    logger.test_start("t1")
    logger.process_output(100, "This message should be recorded",
                          "/some/path/to/chromedriver --some-flag")
    logger.process_output(101, "This message should not be recorded",
                          "/some/other/process --another-flag")
    logger.process_output(100, "This message should also be recorded",
                          "/some/path/to/chromedriver --some-flag")
    logger.test_end("t1", status="CRASH", expected="CRASH")

    logger.test_start("t2")
    logger.process_output(100, "Another message for the second test",
                          "/some/path/to/chromedriver --some-flag")
    logger.test_end("t2", status="CRASH", expected="PASS")

    logger.test_start("t3")
    logger.process_output(100, "This test fails",
                          "/some/path/to/chromedriver --some-flag")
    logger.process_output(100, "But the output should not be captured",
                          "/some/path/to/chromedriver --some-flag")
    logger.process_output(100, "Because it does not crash",
                          "/some/path/to/chromedriver --some-flag")
    logger.test_end("t3", status="FAIL", expected="PASS")

    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    assert test_obj["artifacts"]["wpt_crash_log"] == [
        "This message should be recorded",
        "This message should also be recorded"
    ]

    test_obj = output_json["tests"]["t2"]
    assert test_obj["artifacts"]["wpt_crash_log"] == [
        "Another message for the second test"
    ]

    test_obj = output_json["tests"]["t3"]
    assert "wpt_crash_log" not in test_obj["artifacts"]
コード例 #14
0
def test_unexpected_subtest_pass(capfd):
    # A subtest that unexpectedly passes is considered a failure condition.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    formatter = ChromiumFormatter()
    logger.add_handler(handlers.StreamHandler(output, formatter))

    # Run a test with a subtest that is expected to fail but passes.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1",
                       status="PASS",
                       expected="FAIL",
                       subtest="t1_a",
                       message="t1_a_message")

    # Since the subtest behaviour is unexpected, it's considered a failure, so
    # the test should be added to the set of tests with subtest failures.
    assert "t1" in formatter.tests_with_subtest_fails

    # The test status is reported as a pass here because the harness was able to
    # run the test to completion.
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    t1_artifacts = test_obj["artifacts"]
    assert t1_artifacts["wpt_actual_metadata"] == [
        "[t1]\n  expected: PASS\n",
        "  [t1_a]\n    expected: PASS\n",
    ]
    assert t1_artifacts["wpt_log"] == [
        "t1_a: t1_a_message\n",
    ]
    assert t1_artifacts["wpt_subtest_failure"] == ["true"]
    # Since the subtest status is unexpected, we fail the test. But we report
    # wpt_actual_status as an artifact
    assert t1_artifacts["wpt_actual_status"] == ["PASS"]
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "PASS"
    # Also ensure that the formatter cleaned up its internal state
    assert "t1" not in formatter.tests_with_subtest_fails
コード例 #15
0
 def test_utf8str_write(self):
     with mozfile.NamedTemporaryFile() as logfile:
         _fmt = formatters.TbplFormatter()
         _handler = handlers.StreamHandler(logfile, _fmt)
         self.logger.add_handler(_handler)
         self.logger.suite_start([])
         self.logger.info("☺")
         logfile.seek(0)
         data = logfile.readlines()[-1].strip()
         self.assertEquals(data, "☺")
         self.logger.suite_end()
コード例 #16
0
def create_log(entries):
    logger = structuredlog.StructuredLogger("expected_test")
    data = BytesIO()
    handler = handlers.StreamHandler(data, formatters.JSONFormatter())
    logger.add_handler(handler)

    for item in entries:
        action, kwargs = item
        getattr(logger, action)(**kwargs)
    logger.remove_handler(handler)
    data.seek(0)
    return data
コード例 #17
0
    def create_log(self, *args, **kwargs):
        logger = structuredlog.StructuredLogger("expected_test")
        data = StringIO.StringIO()
        handler = handlers.StreamHandler(data, formatters.JSONFormatter())
        logger.add_handler(handler)

        log_entries = ([("suite_start", {"tests": [], "run_info": kwargs.get("run_info", {})})] +
                       list(args) +
                       [("suite_end", {})])

        for item in log_entries:
            action, kwargs = item
            getattr(logger, action)(**kwargs)
        logger.remove_handler(handler)
        data.seek(0)
        return data
コード例 #18
0
def test_subtest_messages(capfd):
    # Tests accumulation of test output

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run two tests with subtest messages. The subtest name should be included
    # in the output. We should also tolerate missing messages.
    logger.suite_start(["t1", "t2"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_status("t1",
                       status="FAIL",
                       subtest="t1_a",
                       message="t1_a_message")
    logger.test_status("t1",
                       status="PASS",
                       subtest="t1_b",
                       message="t1_b_message")
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.test_start("t2")
    # Currently, subtests with empty messages will be ignored
    logger.test_status("t2", status="PASS", subtest="t2_a")
    # A test-level message will also be appended
    logger.test_end("t2",
                    status="TIMEOUT",
                    expected="PASS",
                    message="t2_message")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    t1_log = output_json["tests"]["t1"]["artifacts"]["log"]
    assert t1_log == "[FAIL] t1_a: t1_a_message\n" \
                     "[PASS] t1_b: t1_b_message\n"

    t2_log = output_json["tests"]["t2"]["artifacts"]["log"]
    assert t2_log == "[TIMEOUT] t2_message\n"
コード例 #19
0
ファイル: test_chromium.py プロジェクト: svgeesus/wpt
def test_repeated_test_statuses(logger, capfd):
    # Check that the logger outputs all statuses from multiple runs of a test.

    # Set up the handler.
    output = StringIO()
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run a test suite for the first time.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1",
                    status="PASS",
                    expected="PASS",
                    known_intermittent=[])
    logger.suite_end()

    # Run the test suite for the second time.
    logger.suite_start(["t1"], run_info={}, time=456)
    logger.test_start("t1")
    logger.test_end("t1",
                    status="FAIL",
                    expected="PASS",
                    known_intermittent=[])
    logger.suite_end()

    # Write the final results.
    logger.shutdown()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    status_totals = output_json["num_failures_by_type"]
    assert status_totals["PASS"] == 1
    # A missing result type is the same as being present and set to zero (0).
    assert status_totals.get("FAIL", 0) == 0

    # The actual statuses are accumulated in a ordered space-separated list.
    test_obj = output_json["tests"]["t1"]
    assert test_obj["actual"] == "PASS FAIL"
    assert test_obj["expected"] == "PASS"
コード例 #20
0
def test_reftest_screenshots(capfd):
    # reftest_screenshots, if present, should be plumbed into artifacts.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run a reftest with reftest_screenshots.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1",
                    status="FAIL",
                    expected="PASS",
                    extra={
                        "reftest_screenshots": [
                            {
                                "url": "foo.html",
                                "hash": "HASH1",
                                "screenshot": "DATA1"
                            },
                            "!=",
                            {
                                "url": "foo-ref.html",
                                "hash": "HASH2",
                                "screenshot": "DATA2"
                            },
                        ]
                    })
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    test_obj = output_json["tests"]["t1"]
    assert test_obj["artifacts"]["screenshots"] == [
        "foo.html: DATA1",
        "foo-ref.html: DATA2",
    ]
コード例 #21
0
ファイル: test_chromium.py プロジェクト: svgeesus/wpt
def test_time_per_test(logger, capfd):
    # Test that the formatter measures time per test correctly.

    # Set up the handler.
    output = StringIO()
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    logger.suite_start(["test-id-1", "test-id-2"], run_info={}, time=50)
    logger.test_start("test-id-1", time=100)
    logger.test_start("test-id-2", time=200)
    logger.test_end("test-id-1", status="PASS", expected="PASS", time=300)
    logger.test_end("test-id-2", status="PASS", expected="PASS", time=199)
    logger.suite_end()

    logger.suite_start(["test-id-1"], run_info={}, time=400)
    logger.test_start("test-id-1", time=500)
    logger.test_end("test-id-1", status="PASS", expected="PASS", time=600)
    logger.suite_end()

    # Write the final results.
    logger.shutdown()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)

    test1_obj = output_obj["tests"]["test-id-1"]
    test2_obj = output_obj["tests"]["test-id-2"]
    # Test 1 run 1: 300ms - 100ms = 0.2s
    # Test 1 run 2: 600ms - 500ms = 0.1s
    assert test1_obj["time"] == pytest.approx(0.2)
    assert len(test1_obj["times"]) == 2
    assert test1_obj["times"][0] == pytest.approx(0.2)
    assert test1_obj["times"][1] == pytest.approx(0.1)
    assert "time" not in test2_obj
    assert "times" not in test2_obj
コード例 #22
0
ファイル: test_chromium.py プロジェクト: haaami01/wpt
def test_repeated_runs(logger, capfd):
    # Check that the logger outputs one report after multiple test suite runs.

    # Set up the handler.
    output = StringIO()
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    # Run a test suite for the first time.
    logger.suite_start(["t1"], run_info={}, time=123)
    logger.test_start("t1")
    logger.test_end("t1",
                    status="PASS",
                    expected="PASS",
                    known_intermittent=[])
    logger.suite_end()

    # Run the test suite for the second time.
    logger.suite_start(["t1"], run_info={}, time=456)
    logger.test_start("t1")
    logger.test_end("t1",
                    status="FAIL",
                    expected="PASS",
                    known_intermittent=[])
    logger.suite_end()

    logger.shutdown()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    # The latest status (FAIL) replaces the earlier one (PASS).
    test_obj = output_json["tests"]["t1"]
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "PASS"
コード例 #23
0
def test_chromium_test_name_trie(capfd):
    # Ensure test names are broken into directories and stored in a trie with
    # test results at the leaves.

    # Set up the handler.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(
        handlers.StreamHandler(output, chromium.ChromiumFormatter()))

    # output a bunch of stuff
    logger.suite_start(["/foo/bar/test-id-1", "/foo/test-id-2"],
                       run_info={},
                       time=123)
    logger.test_start("/foo/bar/test-id-1")
    logger.test_end("/foo/bar/test-id-1", status="TIMEOUT", expected="FAIL")
    logger.test_start("/foo/test-id-2")
    logger.test_end("/foo/test-id-2", status="ERROR", expected="TIMEOUT")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)

    # Ensure that the test names are broken up by directory name and that the
    # results are stored at the leaves.
    test_obj = output_obj["tests"]["foo"]["bar"]["test-id-1"]
    assert test_obj["actual"] == "TIMEOUT"
    assert test_obj["expected"] == "FAIL"

    test_obj = output_obj["tests"]["foo"]["test-id-2"]
    # The ERROR status is mapped to FAIL for Chromium
    assert test_obj["actual"] == "FAIL"
    assert test_obj["expected"] == "TIMEOUT"
コード例 #24
0
ファイル: test_chromium.py プロジェクト: svgeesus/wpt
def test_flaky_test_detection(logger, capfd):
    # Check that the logger detects flakiness for a test run multiple times.

    # Set up the handler.
    output = StringIO()
    logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))

    logger.suite_start(["t1", "t2"], run_info={})
    logger.test_start("t1")
    logger.test_start("t2")
    logger.test_end("t1", status="FAIL", expected="PASS")
    logger.test_end("t2", status="FAIL", expected="FAIL")
    logger.suite_end()

    logger.suite_start(["t1", "t2"], run_info={})
    logger.test_start("t1")
    logger.test_start("t2")
    logger.test_end("t1", status="PASS", expected="PASS")
    logger.test_end("t2", status="FAIL", expected="FAIL")
    logger.suite_end()

    # Write the final results.
    logger.shutdown()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_json = json.load(output)

    # We consider a test flaky if it runs multiple times and produces more than
    # one kind of result.
    test1_obj = output_json["tests"]["t1"]
    test2_obj = output_json["tests"]["t2"]
    assert test1_obj["is_flaky"] is True
    assert "is_flaky" not in test2_obj
コード例 #25
0
def test_wptreport_run_info_optional(capfd):
    """per the mozlog docs, run_info is optional; check we work without it"""
    # setup the logger
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))

    # output a bunch of stuff
    logger.suite_start(["test-id-1"])  # no run_info arg!
    logger.test_start("test-id-1")
    logger.test_end("test-id-1", "PASS")
    logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)
    assert "run_info" not in output_obj or output_obj["run_info"] == {}
コード例 #26
0
def test_wptreport_lone_surrogate_ucs2(capfd):
    # Since UCS4 is a superset of UCS2 we can meaningfully test the UCS2 code on a
    # UCS4 build, but not the reverse. However UCS2 is harder to handle and UCS4 is
    # the commonest (and sensible) configuration, so that's OK.
    output = StringIO()
    logger = structuredlog.StructuredLogger("test_a")
    logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))

    with mock.patch.object(wptreport,
                           'surrogate_replacement',
                           wptreport.SurrogateReplacementUcs2()):
        # output a bunch of stuff
        logger.suite_start(["test-id-1"])  # no run_info arg!
        logger.test_start("test-id-1")
        logger.test_status("test-id-1",
                           subtest=u"Name with surrogate\uD800",
                           status="FAIL",
                           message=u"\U0001F601 \uDE0A\uD83D \uD83D\uDE0A")
        logger.test_end("test-id-1",
                        status="PASS",
                        message=u"\uDE0A\uD83D \uD83D\uDE0A \U0001F601")
        logger.suite_end()

    # check nothing got output to stdout/stderr
    # (note that mozlog outputs exceptions during handling to stderr!)
    captured = capfd.readouterr()
    assert captured.out == ""
    assert captured.err == ""

    # check the actual output of the formatter
    output.seek(0)
    output_obj = json.load(output)
    test = output_obj["results"][0]
    assert test["message"] == u"U+de0aU+d83d \U0001f60a \U0001F601"
    subtest = test["subtests"][0]
    assert subtest["name"] == u"Name with surrogateU+d800"
    assert subtest["message"] == u"\U0001F601 U+de0aU+d83d \U0001f60a"