Esempio n. 1
0
def test_runner_01(mocker, tmp_path):
    """test Runner()"""
    fake_time = mocker.patch("grizzly.common.runner.time", autospec=True)
    server = mocker.Mock(spec=Sapphire)
    target = mocker.Mock(spec=Target)
    target.detect_failure.return_value = target.RESULT_NONE
    runner = Runner(server, target)
    assert runner._idle is None
    serv_files = ["a.bin", "/another/file.bin"]
    testcase = mocker.Mock(spec=TestCase,
                           landing_page=serv_files[0],
                           optional=[])
    # all files served
    fake_time.side_effect = (1, 2)
    server.serve_path.return_value = (SERVED_ALL, serv_files)
    result = runner.run([], ServerMap(), testcase)
    assert result.duration == 1
    assert result.status == RunResult.COMPLETE
    assert result.served == serv_files
    assert not result.timeout
    assert target.close.call_count == 0
    assert target.dump_coverage.call_count == 0
    assert testcase.dump.call_count == 1
    # some files served
    fake_time.side_effect = (1, 2)
    server.serve_path.return_value = (SERVED_REQUEST, serv_files)
    result = runner.run([], ServerMap(), testcase, coverage=True)
    assert result.status == RunResult.COMPLETE
    assert result.served == serv_files
    assert not result.timeout
    assert target.close.call_count == 0
    assert target.dump_coverage.call_count == 1
    # existing test path
    fake_time.side_effect = (1, 2)
    testcase.reset_mock()
    tc_path = (tmp_path / "tc")
    tc_path.mkdir()
    server.serve_path.return_value = (SERVED_ALL, serv_files)
    result = runner.run([], ServerMap(), testcase, test_path=str(tc_path))
    assert result.status == RunResult.COMPLETE
    assert target.close.call_count == 0
    assert testcase.dump.call_count == 0
    tc_path.is_dir()
Esempio n. 2
0
def test_runner_03(mocker):
    """test reporting timeout"""
    server = mocker.Mock(spec=Sapphire)
    target = mocker.Mock(spec=Target)
    serv_files = ["a.bin", "/another/file.bin"]
    server.serve_testcase.return_value = (SERVED_TIMEOUT, serv_files)
    runner = Runner(server, target)
    target.detect_failure.return_value = target.RESULT_FAILURE
    runner.run([], ServerMap(), mocker.Mock(spec=TestCase, landing_page="x"))
    assert runner.result == runner.FAILED
    assert runner.served == serv_files
    assert runner.timeout
Esempio n. 3
0
def test_runner_02(mocker):
    """test Runner() errors"""
    server = mocker.Mock(spec=Sapphire)
    target = mocker.Mock(spec=Target)
    testcase = mocker.Mock(spec=TestCase, landing_page="x")
    runner = Runner(server, target)
    # no files served
    server.serve_testcase.return_value = (SERVED_NONE, [])
    target.detect_failure.return_value = target.RESULT_NONE
    runner.run([], ServerMap(), testcase)
    assert runner.result == runner.ERROR
    assert not runner.served
    assert not runner.timeout
    assert target.close.call_count == 1
    target.reset_mock()
    # landing page not served
    server.serve_testcase.return_value = (SERVED_REQUEST, ["harness"])
    runner.run([], ServerMap(), testcase)
    assert runner.result == runner.ERROR
    assert runner.served
    assert target.close.call_count == 1
Esempio n. 4
0
def test_runner_09(mocker, tmp_path):
    """test Runner.run() adding includes to testcase"""
    server = mocker.Mock(spec=Sapphire)
    target = mocker.Mock(spec=Target)
    target.detect_failure.return_value = target.RESULT_NONE
    runner = Runner(server, target)
    # create test files
    inc_path1 = (tmp_path / "include")
    inc_path1.mkdir()
    inc1 = (inc_path1 / "inc_file.bin")
    inc1.write_bytes(b"a")
    (inc_path1 / "nested").mkdir()
    inc2 = (inc_path1 / "nested" / "nested_inc.bin")
    inc2.write_bytes(b"a")
    inc_path2 = (tmp_path / "include2")
    inc_path2.mkdir()
    inc3 = (inc_path2 / "inc_file3.txt")
    inc3.write_bytes(b"a")
    # build server map
    smap = ServerMap()
    smap.set_include("/", str(inc_path1))
    smap.set_include("/test", str(inc_path2))
    serv_files = ["a.b", str(inc1), str(inc2), str(inc3)]
    server.serve_testcase.return_value = (SERVED_ALL, serv_files)
    with TestCase("a.b", "x", "x") as tcase:
        runner.run([], smap, tcase)
        assert runner.result == runner.COMPLETE
        assert "inc_file.bin" in tcase._existing_paths
        assert pathjoin("nested", "nested_inc.bin") in tcase._existing_paths
        assert pathjoin("test", "inc_file3.txt") in tcase._existing_paths
Esempio n. 5
0
def test_runner_10(mocker, tmp_path):
    """test Runner.run() adding includes to testcase"""
    server = mocker.Mock(spec_set=Sapphire)
    target = mocker.Mock(spec_set=Target)
    target.check_result.return_value = Result.NONE
    runner = Runner(server, target, relaunch=10)
    # create test files
    inc_path1 = tmp_path / "include"
    inc_path1.mkdir()
    inc1 = inc_path1 / "inc_file.bin"
    inc1.write_bytes(b"a")
    (inc_path1 / "nested").mkdir()
    inc2 = inc_path1 / "nested" / "nested_inc.bin"
    inc2.write_bytes(b"a")
    inc_path2 = tmp_path / "include2"
    inc_path2.mkdir()
    inc3 = inc_path2 / "inc_file3.txt"
    inc3.write_bytes(b"a")
    # build server map
    smap = ServerMap()
    smap.set_include("/", str(inc_path1))
    smap.set_include("/test", str(inc_path2))
    serv_files = ["a.b", str(inc1), str(inc2), str(inc3)]
    server.serve_path.return_value = (Served.ALL, serv_files)
    with TestCase("a.b", "x", "x") as tcase:
        result = runner.run([], smap, tcase)
        assert result.attempted
        assert result.status == Result.NONE
        assert "inc_file.bin" in tcase.contents
        assert "nested/nested_inc.bin" in tcase.contents
        assert "test/inc_file3.txt" in tcase.contents
Esempio n. 6
0
def test_runner_05(mocker):
    """test Runner() with idle checking"""
    server = mocker.Mock(spec=Sapphire)
    target = mocker.Mock(spec=Target)
    target.detect_failure.return_value = target.RESULT_NONE
    serv_files = ["/fake/file", "/another/file.bin"]
    server.serve_testcase.return_value = (SERVED_REQUEST, serv_files)
    runner = Runner(server, target, idle_threshold=0.01, idle_delay=0.01)
    assert runner._idle is not None
    runner.run([], ServerMap(),
               mocker.Mock(spec=TestCase, landing_page=serv_files[0]))
    assert runner.result == runner.COMPLETE
    assert target.close.call_count == 0
Esempio n. 7
0
def test_runner_05(mocker):
    """test reporting failures"""
    server = mocker.Mock(spec=Sapphire)
    target = mocker.Mock(spec=Target, launch_timeout=10)
    target.monitor.is_healthy.return_value = False
    serv_files = ["file.bin"]
    server.serve_path.return_value = (SERVED_REQUEST, serv_files)
    testcase = mocker.Mock(spec=TestCase,
                           landing_page=serv_files[0],
                           optional=[])
    runner = Runner(server, target)
    # test FAILURE
    target.detect_failure.return_value = target.RESULT_FAILURE
    runner.launch("http://a/")
    result = runner.run([], ServerMap(), testcase)
    assert result.attempted
    assert result.status == RunResult.FAILED
    assert result.served == serv_files
    assert not result.timeout
    # test IGNORED
    target.detect_failure.return_value = target.RESULT_IGNORED
    runner.launch("http://a/")
    result = runner.run([], ServerMap(), testcase)
    assert result.attempted
    assert result.status == RunResult.IGNORED
    assert result.served == serv_files
    assert not result.timeout
    # failure before serving landing page
    server.serve_path.return_value = (SERVED_REQUEST, ["harness"])
    target.detect_failure.return_value = target.RESULT_FAILURE
    runner.launch("http://a/")
    result = runner.run([], ServerMap(), testcase)
    assert not result.attempted
    assert result.status == RunResult.FAILED
    assert result.served
    assert not result.timeout
Esempio n. 8
0
def test_runner_03(mocker, srv_result, served):
    """test Runner() errors"""
    server = mocker.Mock(spec=Sapphire)
    server.serve_path.return_value = (srv_result, served)
    target = mocker.Mock(spec=Target)
    target.detect_failure.return_value = target.RESULT_NONE
    testcase = mocker.Mock(spec=TestCase, landing_page="x", optional=[])
    runner = Runner(server, target)
    result = runner.run([], ServerMap(), testcase)
    assert result.status is None
    assert not result.attempted
    assert result.initial
    assert set(result.served) == set(served)
    assert not result.timeout
    assert target.close.call_count == 1
Esempio n. 9
0
def test_runner_06(mocker):
    """test Runner() with idle checking"""
    server = mocker.Mock(spec=Sapphire)
    target = mocker.Mock(spec=Target)
    target.detect_failure.return_value = target.RESULT_NONE
    serv_files = ["/fake/file", "/another/file.bin"]
    server.serve_path.return_value = (SERVED_ALL, serv_files)
    runner = Runner(server, target, idle_threshold=0.01, idle_delay=0.01, relaunch=10)
    assert runner._idle is not None
    result = runner.run(
        [],
        ServerMap(),
        mocker.Mock(spec=TestCase, landing_page=serv_files[0], optional=[]),
    )
    assert result.status is None
    assert result.attempted
    assert target.close.call_count == 0
Esempio n. 10
0
def test_session_06(tmp_path, mocker):
    """test Session.generate_testcase()"""
    Status.PATH = str(tmp_path)
    mocker.patch("grizzly.session.TestFile", autospec=True)
    fake_adapter = mocker.Mock(spec=Adapter)
    fake_adapter.NAME = "fake_adapter"
    fake_iomgr = mocker.Mock(spec=IOManager, server_map=ServerMap())
    fake_iomgr.create_testcase.return_value = mocker.Mock(spec=TestCase)
    fake_target = mocker.Mock(spec=Target, prefs="fake")
    with Session(fake_adapter, fake_iomgr, None, None, fake_target) as session:
        assert fake_adapter.generate.call_count == 0
        testcase = session.generate_testcase()
        assert fake_iomgr.create_testcase.call_count == 1
        fake_iomgr.create_testcase.assert_called_with("fake_adapter")
        assert fake_adapter.generate.call_count == 1
        fake_adapter.generate.assert_called_with(testcase,
                                                 fake_iomgr.server_map)
        assert testcase.add_meta.call_count == 1
Esempio n. 11
0
def test_runner_05(mocker, served, attempted, target_result, status):
    """test reporting failures"""
    server = mocker.Mock(spec_set=Sapphire)
    server.serve_path.return_value = (Served.REQUEST, served)
    target = mocker.Mock(spec_set=Target, launch_timeout=10)
    target.check_result.return_value = target_result
    target.monitor.is_healthy.return_value = False
    testcase = mocker.Mock(spec_set=TestCase,
                           landing_page="a.bin",
                           optional=[])
    runner = Runner(server, target)
    runner.launch("http://a/")
    result = runner.run([], ServerMap(), testcase)
    assert result.attempted == attempted
    assert result.status == status
    assert not result.timeout
    assert target.handle_hang.call_count == 0
    assert target.close.call_count == 1
Esempio n. 12
0
def test_runner_04(mocker, ignore, status, idle, detect_failure):
    """test reporting timeout"""
    server = mocker.Mock(spec=Sapphire)
    target = mocker.Mock(spec=Target)
    testcase = mocker.Mock(spec=TestCase, landing_page="a.bin", optional=[])
    serv_files = ["a.bin", "/another/file.bin"]
    server.serve_path.return_value = (SERVED_TIMEOUT, serv_files)
    target.detect_failure.return_value = target.RESULT_FAILURE
    target.handle_hang.return_value = idle
    target.monitor.is_healthy.return_value = False
    runner = Runner(server, target, relaunch=1)
    serv_map = ServerMap()
    result = runner.run(ignore, serv_map, testcase)
    assert result.status == status
    assert result.served == serv_files
    assert result.timeout
    assert "grz_empty" not in serv_map.dynamic
    assert target.detect_failure.call_count == detect_failure
    assert target.handle_hang.call_count == 1
Esempio n. 13
0
def test_session_09(tmp_path, mocker):
    """test Session.run() handle TargetLaunchError"""
    Status.PATH = str(tmp_path)
    fake_report = mocker.Mock(spec=Report, major="major123", minor="minor456")
    fake_report.crash_info.createShortSignature.return_value = "[@ sig]"
    fake_runner = mocker.patch("grizzly.session.Runner", autospec=True)
    fake_runner.return_value.launch.side_effect = TargetLaunchError(
        "test", fake_report)
    mocker.patch("grizzly.session.TestFile", autospec=True)
    fake_adapter = mocker.Mock(spec=Adapter, remaining=None)
    fake_iomgr = mocker.Mock(spec=IOManager,
                             harness=None,
                             server_map=ServerMap())
    fake_serv = mocker.Mock(spec=Sapphire, port=0x1337)
    fake_target = mocker.Mock(spec=Target)
    fake_target.monitor.launches = 1
    with Session(fake_adapter, fake_iomgr, mocker.Mock(), fake_serv,
                 fake_target) as session:
        with raises(TargetLaunchError, match="test"):
            session.run([], iteration_limit=1)
        assert session.status.iteration == 1
        assert session.status.results == 1
        assert session.status.ignored == 0
Esempio n. 14
0
def test_session_09(tmp_path, mocker):
    """test Session.run() handle TargetLaunchError"""
    Status.PATH = str(tmp_path)
    mocker.patch("grizzly.session.Report", autospec=True)
    fake_runner = mocker.patch("grizzly.session.Runner", autospec=True)
    fake_runner.return_value.launch.side_effect = TargetLaunchError(
        "test", mocker.Mock(spec=Report))
    mocker.patch("grizzly.session.TestFile", autospec=True)
    fake_adapter = mocker.Mock(spec=Adapter)
    fake_iomgr = mocker.Mock(spec=IOManager,
                             harness=None,
                             input_files=[],
                             server_map=ServerMap(),
                             tests=deque())
    fake_serv = mocker.Mock(spec=Sapphire, port=0x1337)
    fake_target = mocker.Mock(spec=Target)
    fake_target.monitor.launches = 1
    with Session(fake_adapter, fake_iomgr, mocker.Mock(), fake_serv,
                 fake_target) as session:
        with raises(TargetLaunchError, match="test"):
            session.run([], iteration_limit=1)
        assert session.status.iteration == 1
        assert session.status.results == 0
        assert session.status.ignored == 0
Esempio n. 15
0
def test_session_05(tmp_path, mocker):
    """test basic Session functions"""
    Status.PATH = str(tmp_path)
    fake_adapter = mocker.Mock(spec=Adapter, remaining=None)
    fake_adapter.IGNORE_UNSERVED = True
    fake_adapter.TEST_DURATION = 10
    fake_testcase = mocker.Mock(spec=TestCase,
                                landing_page="page.htm",
                                optional=[])
    fake_iomgr = mocker.Mock(spec=IOManager)
    fake_iomgr = mocker.Mock(spec=IOManager,
                             harness=None,
                             server_map=ServerMap())
    fake_iomgr.create_testcase.return_value = fake_testcase
    fake_iomgr.tests = mocker.Mock(spec=deque)
    fake_serv = mocker.Mock(spec=Sapphire, port=0x1337)
    fake_serv.serve_path.return_value = (SERVED_TIMEOUT,
                                         [fake_testcase.landing_page])
    fake_target = mocker.Mock(spec=Target, launch_timeout=30, prefs=None)
    fake_target.monitor.launches = 1
    fake_target.monitor.is_healthy.return_value = False
    with Session(fake_adapter, fake_iomgr, None, fake_serv,
                 fake_target) as session:
        session.run([], iteration_limit=1)
    assert fake_adapter.setup.call_count == 0
    assert fake_adapter.pre_launch.call_count == 1
    assert fake_adapter.generate.call_count == 1
    assert fake_adapter.on_served.call_count == 0
    assert fake_adapter.on_timeout.call_count == 1
    assert fake_iomgr.create_testcase.call_count == 1
    assert fake_iomgr.tests.pop.call_count == 0
    assert fake_testcase.purge_optional.call_count == 1
    assert fake_serv.serve_path.call_count == 1
    assert fake_target.launch.call_count == 1
    assert fake_target.detect_failure.call_count == 1
    assert fake_target.monitor.is_healthy.call_count == 1
Esempio n. 16
0
def test_runner_02(mocker):
    """test Runner.run() relaunch"""
    mocker.patch("grizzly.common.runner.time", autospec=True, return_value=1)
    mocker.patch("grizzly.common.runner.sleep", autospec=True)
    server = mocker.Mock(spec=Sapphire)
    target = mocker.Mock(spec=Target)
    target.detect_failure.return_value = target.RESULT_NONE
    serv_files = ["a.bin"]
    server.serve_path.return_value = (SERVED_ALL, serv_files)
    testcase = mocker.Mock(spec=TestCase,
                           landing_page=serv_files[0],
                           optional=[])
    # single run/iteration relaunch (not idle exit)
    target.is_idle.return_value = False
    runner = Runner(server, target, relaunch=1)
    assert runner._relaunch == 1
    smap = ServerMap()
    result = runner.run([], smap, testcase)
    assert result.attempted
    assert result.initial
    assert target.close.call_count == 1
    assert target.is_idle.call_count > 0
    assert target.monitor.is_healthy.call_count > 0
    assert result.status is None
    assert result.served == serv_files
    assert not smap.dynamic
    assert smap.redirect.get("grz_next_test").target == "grz_empty"
    assert not result.timeout
    target.reset_mock()
    testcase.reset_mock()
    # single run/iteration relaunch (idle exit)
    target.is_idle.return_value = True
    runner = Runner(server, target, relaunch=1)
    assert runner._relaunch == 1
    result = runner.run([], ServerMap(), testcase)
    assert result.attempted
    assert target.close.call_count == 1
    assert target.monitor.is_healthy.call_count > 0
    target.reset_mock()
    testcase.reset_mock()
    # multiple runs/iterations relaunch (is_healty exit)
    runner = Runner(server, target, relaunch=3)
    target.monitor.is_healthy.return_value = False
    for _ in range(2):
        smap = ServerMap()
        result = runner.run([], smap, testcase)
        assert result.attempted
        assert target.close.call_count == 0
        assert target.monitor.is_healthy.call_count == 0
        assert result.status is None
        assert result.served == serv_files
        assert not result.timeout
        assert not smap.dynamic
        assert "grz_next_test" not in smap.redirect
    smap = ServerMap()
    result = runner.run([], smap, testcase)
    assert runner._tests_run == 3
    assert result.attempted
    assert target.close.call_count == 1
    assert target.is_idle.call_count == 0
    assert target.monitor.is_healthy.call_count == 1
    assert result.status is None
    assert result.served == serv_files
    assert not smap.dynamic
    assert smap.redirect.get("grz_next_test").target == "grz_empty"
Esempio n. 17
0
def test_session_07(tmp_path, mocker):
    """test Session.run() - test case was not served"""
    Status.PATH = str(tmp_path)
    fake_runner = mocker.patch("grizzly.session.Runner", autospec=True)
    mocker.patch("grizzly.session.TestFile", autospec=True)
    fake_adapter = mocker.Mock(spec=Adapter, remaining=None)
    fake_adapter.TEST_DURATION = 10
    fake_iomgr = mocker.Mock(spec=IOManager,
                             harness=None,
                             server_map=ServerMap())
    fake_iomgr.create_testcase.return_value = mocker.Mock(spec=TestCase)
    fake_iomgr.tests = mocker.Mock(spec=deque)
    fake_report = mocker.Mock(spec=Report, major="major123", minor="minor456")
    fake_report.crash_info.createShortSignature.return_value = "[@ sig]"
    fake_serv = mocker.Mock(spec=Sapphire, port=0x1337)
    fake_target = mocker.Mock(spec=Target, prefs="prefs.js")
    fake_target.monitor.launches = 1
    fake_target.create_report.return_value = fake_report
    fake_reporter = mocker.Mock(spec=Reporter)
    # between iterations
    run_result = RunResult([], 1, status=RunResult.FAILED)
    run_result.attempted = False
    run_result.initial = False
    fake_runner.return_value.run.return_value = run_result
    with Session(fake_adapter, fake_iomgr, fake_reporter, fake_serv,
                 fake_target) as session:
        session.run([], iteration_limit=1)
        assert fake_runner.return_value.run.call_count == 1
        assert fake_adapter.on_served.call_count == 1
        assert fake_adapter.on_timeout.call_count == 0
        assert fake_iomgr.purge_tests.call_count == 1
        assert fake_runner.return_value.launch.call_count == 1
        assert session.status.iteration == 1
        assert session.status.results == 1
        assert session.status.ignored == 0
        assert fake_reporter.submit.call_count == 1
        assert fake_iomgr.tests.pop.call_count == 1
    fake_reporter.reset_mock()
    fake_runner.reset_mock()
    # delayed startup crash
    run_result = RunResult([], 1, status=RunResult.FAILED)
    run_result.attempted = False
    run_result.initial = True
    fake_runner.return_value.run.return_value = run_result
    with Session(fake_adapter, fake_iomgr, fake_reporter, fake_serv,
                 fake_target) as session:
        with raises(SessionError, match="Please check Adapter and Target"):
            session.run([], iteration_limit=1)
        assert fake_runner.return_value.run.call_count == 1
        assert fake_runner.return_value.launch.call_count == 1
        assert session.status.iteration == 1
        assert session.status.results == 1
        assert session.status.ignored == 0
        assert fake_reporter.submit.call_count == 1
    fake_reporter.reset_mock()
    fake_runner.reset_mock()
    # startup hang/unresponsive
    run_result = RunResult([], 1)
    run_result.attempted = False
    run_result.initial = True
    fake_runner.return_value.run.return_value = run_result
    with Session(fake_adapter, fake_iomgr, fake_reporter, fake_serv,
                 fake_target) as session:
        with raises(SessionError, match="Please check Adapter and Target"):
            session.run([], iteration_limit=1)
        assert fake_runner.return_value.run.call_count == 1
        assert fake_runner.return_value.launch.call_count == 1
        assert session.status.iteration == 1
        assert session.status.results == 0
        assert session.status.ignored == 0
        assert fake_reporter.submit.call_count == 0
Esempio n. 18
0
    def run(
        self,
        testcases,
        time_limit,
        repeat=1,
        min_results=1,
        exit_early=True,
        expect_hang=False,
        idle_delay=0,
        idle_threshold=0,
        on_iteration_cb=None,
    ):
        """Run testcase replay.

        Args:
            testcases (list): One or more TestCases to run.
            time_limit (int): Maximum time in seconds a test should take to complete.
            repeat (int): Maximum number of times to run the TestCase.
            min_results (int): Minimum number of results needed before run can
                               be considered successful.
            expect_hang (bool): Running testcases is expected to result in a hang.
            exit_early (bool): If True the minimum required number of iterations
                               are performed to either meet `min_results` or
                               determine that it is not possible to do so.
                               If False `repeat` number of iterations are
                               performed.
            idle_delay (int): Number of seconds to wait before polling for idle.
            idle_threshold (int): CPU usage threshold to mark the process as idle.
            on_iteration_cb (callable): called every time a single iteration is run

        Returns:
            list: ReplayResults that were found running testcases.
        """
        assert idle_delay >= 0
        assert idle_threshold >= 0
        assert min_results > 0
        assert repeat > 0
        assert repeat >= min_results
        assert testcases
        assert time_limit > 0
        assert self._harness is not None or len(testcases) == 1
        assert not expect_hang or self._signature is not None

        self.status = Status.start(db_file=self.STATUS_DB)

        server_map = ServerMap()
        if self._harness is None:
            server_map.set_redirect("grz_start", "grz_current_test", required=False)
        else:
            server_map.set_dynamic_response(
                "grz_harness", lambda _: self._harness, mime_type="text/html"
            )
            server_map.set_redirect("grz_start", "grz_harness", required=False)

        # track unprocessed results
        reports = dict()
        try:
            sig_hash = Report.calc_hash(self._signature) if self._signature else None
            test_count = len(testcases)
            relaunch = min(self._relaunch, repeat)
            runner = Runner(
                self.server,
                self.target,
                idle_threshold=idle_threshold,
                idle_delay=idle_delay,
                relaunch=relaunch * test_count,
            )
            # perform iterations
            for _ in range(repeat):
                self.status.iteration += 1
                if on_iteration_cb is not None:
                    on_iteration_cb()
                if self.target.closed:
                    if self._harness is None:
                        location = runner.location("/grz_start", self.server.port)
                    else:
                        location = runner.location(
                            "/grz_start",
                            self.server.port,
                            close_after=relaunch * test_count,
                            time_limit=time_limit,
                        )
                    runner.launch(location)
                # run tests
                durations = list()
                served = list()
                for test_idx in range(test_count):
                    if test_count > 1:
                        LOG.info(
                            "Running test, part %d/%d (%d/%d)...",
                            test_idx + 1,
                            test_count,
                            self.status.iteration,
                            repeat,
                        )
                    else:
                        LOG.info(
                            "Running test (%d/%d)...", self.status.iteration, repeat
                        )
                    # update redirects
                    if self._harness is not None:
                        next_idx = (test_idx + 1) % test_count
                        server_map.set_redirect(
                            "grz_next_test",
                            testcases[next_idx].landing_page,
                            required=True,
                        )
                    server_map.set_redirect(
                        "grz_current_test",
                        testcases[test_idx].landing_page,
                        required=False,
                    )
                    # run testcase
                    run_result = runner.run(
                        self.ignore,
                        server_map,
                        testcases[test_idx],
                        wait_for_callback=self._harness is None,
                    )
                    durations.append(run_result.duration)
                    served.append(run_result.served)
                    if run_result.status != Result.NONE or not run_result.attempted:
                        break
                if not run_result.attempted:
                    LOG.warning("Test case was not served")
                    if runner.initial:
                        if run_result.status == Result.FOUND:
                            # TODO: what is to best action to take in this case?
                            LOG.warning("Delayed startup failure detected")
                        else:
                            LOG.warning("Timeout too short? System too busy?")
                # process run results
                if run_result.status == Result.FOUND:
                    # processing the result may take a few minutes (rr)
                    # update console to show progress
                    LOG.info("Processing result...")
                    # TODO: use self.target.create_report here
                    log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs"))
                    self.target.save_logs(log_path)
                    report = Report(
                        log_path, self.target.binary, is_hang=run_result.timeout
                    )
                    # check signatures
                    if run_result.timeout:
                        short_sig = "Potential hang detected"
                    elif report.crash_signature is not None:
                        short_sig = report.crash_info.createShortSignature()
                    else:
                        # FM crash signature creation failed
                        short_sig = "Signature creation failed"

                    # set active signature
                    if (
                        not runner.startup_failure
                        and not self._any_crash
                        and not run_result.timeout
                        and self._signature is None
                    ):
                        assert not expect_hang
                        LOG.debug("no signature given, using %r", short_sig)
                        self._signature = report.crash_signature

                    # bucket result
                    if not runner.startup_failure and (
                        self._any_crash
                        or self.check_match(self._signature, report, expect_hang)
                    ):
                        if sig_hash is not None:
                            LOG.debug("using provided signature (hash) to bucket")
                            bucket_hash = sig_hash
                        else:
                            bucket_hash = report.crash_hash
                        self.status.results.count(bucket_hash, short_sig)
                        LOG.info(
                            "Result: %s (%s:%s)",
                            short_sig,
                            report.major[:8],
                            report.minor[:8],
                        )
                        if bucket_hash not in reports:
                            reports[bucket_hash] = ReplayResult(
                                report, served, durations, True
                            )
                            LOG.debug("now tracking %s", bucket_hash)
                            report = None  # don't remove report
                        else:
                            reports[bucket_hash].count += 1
                            LOG.debug("already tracking %s", bucket_hash)
                    else:
                        LOG.info(
                            "Result: Different signature: %s (%s:%s)",
                            short_sig,
                            report.major[:8],
                            report.minor[:8],
                        )
                        self.status.ignored += 1
                        if report.crash_hash not in reports:
                            reports[report.crash_hash] = ReplayResult(
                                report, served, durations, False
                            )
                            LOG.debug("now tracking %s", report.crash_hash)
                            report = None  # don't remove report
                        else:
                            reports[report.crash_hash].count += 1
                            LOG.debug("already tracking %s", report.crash_hash)
                    # purge untracked report
                    if report is not None:
                        report.cleanup()
                        report = None
                elif run_result.status == Result.IGNORED:
                    self.status.ignored += 1
                    LOG.info("Result: Ignored (%d)", self.status.ignored)

                if exit_early and self.status.iteration < repeat:
                    # check if failed to meet minimum number of results
                    if (
                        repeat - self.status.iteration + self.status.results.total
                        < min_results
                    ):
                        # failed to reproduce issue
                        LOG.debug("skipping remaining attempts")
                        LOG.debug(
                            "results (%d) < minimum (%d), after %d attempts",
                            self.status.results.total,
                            min_results,
                            self.status.iteration,
                        )
                        # NOTE: this can be tricky if the harness is used because it can
                        # skip the shutdown performed in the harness and runner, if this
                        # is an issue for now use relaunch=1
                        break
                    # check if complete (minimum number of results found)
                    if self.status.results.total >= min_results:
                        assert self.status.results.total == min_results
                        assert (
                            sum(x.count for x in reports.values() if x.expected)
                            >= min_results
                        )
                        LOG.debug(
                            "results == expected (%d), after %d attempts",
                            min_results,
                            self.status.iteration,
                        )
                        break

                # TODO: should we warn about large browser logs?

            # process results
            if self._any_crash:
                assert (
                    all(x.expected for x in reports.values()) or runner.startup_failure
                )
                # add all results if min_results was reached
                if sum(x.count for x in reports.values() if x.expected) >= min_results:
                    results = list(reports.values())
                else:
                    # add only unexpected results since min_results was not reached
                    results = list()
                    for report in reports.values():
                        if report.expected:
                            report.report.cleanup()
                        else:
                            results.append(report)
                    LOG.debug(
                        "%d (any_crash) less than minimum %d",
                        self.status.results.total,
                        min_results,
                    )

            else:
                # there should be at most one expected bucket
                assert sum(x.expected for x in reports.values()) <= 1
                # filter out unreliable expected results
                results = list()
                for crash_hash, report in reports.items():
                    if report.expected and report.count < min_results:
                        LOG.debug(
                            "%r less than minimum (%d/%d)",
                            crash_hash,
                            report.count,
                            min_results,
                        )
                        report.report.cleanup()
                        continue
                    results.append(report)
            # this should only be displayed when both conditions are met:
            # 1) runner does not close target (no delay was given before shutdown)
            # 2) result has not been successfully reproduced
            if (
                self._relaunch > 1
                and not self.target.closed
                and not any(x.expected for x in results)
            ):
                LOG.info("Perhaps try with --relaunch=1")
            # active reports have been moved to results
            # clear reports to avoid cleanup of active reports
            reports.clear()
            return results

        finally:
            # we don't want to clean up but we are not checking results
            self.target.close(force_close=True)
            # remove unprocessed reports
            for report in reports.values():
                report.report.cleanup()
Esempio n. 19
0
    def run(self,
            testcases,
            repeat=1,
            min_results=1,
            exit_early=True,
            idle_delay=0,
            idle_threshold=0):
        """Run testcase replay.

        Args:
            testcases (list): One or more TestCases to run.
            repeat (int): Maximum number of times to run the TestCase.
            min_results (int): Minimum number of results needed before run can
                               be considered successful.
            exit_early (bool): If True the minimum required number of iterations
                               are performed to either meet `min_results` or
                               determine that it is not possible to do so.
                               If False `repeat` number of iterations are
                               performed.
            idle_delay (int): Number of seconds to wait before polling for idle.
            idle_threshold (int): CPU usage threshold to mark the process as idle.

        Returns:
            list: ReplayResults that were found running testcases.
        """
        assert idle_delay >= 0
        assert idle_threshold >= 0
        assert min_results > 0
        assert repeat > 0
        assert repeat >= min_results
        assert testcases
        assert len(testcases) == 1 or self._harness is not None

        if self.status is not None:
            LOG.debug("clearing previous status data")
            self.status.cleanup()
        self.status = Status.start()

        server_map = ServerMap()
        if self._harness is not None:
            server_map.set_dynamic_response("grz_harness",
                                            lambda: self._harness,
                                            mime_type="text/html")

        # track unprocessed results
        reports = dict()
        # track unpacked testcases
        unpacked = list()
        try:
            sig_hash = Report.calc_hash(
                self._signature) if self._signature else None
            test_count = len(testcases)
            LOG.debug("unpacking testcases (%d)...", test_count)
            for test in testcases:
                dst_path = mkdtemp(prefix="tc_", dir=grz_tmp("serve"))
                unpacked.append(dst_path)
                test.dump(dst_path)
            relaunch = min(self._relaunch, repeat)
            runner = Runner(self.server,
                            self.target,
                            idle_threshold=idle_threshold,
                            idle_delay=idle_delay,
                            relaunch=relaunch * test_count)
            # perform iterations
            for _ in range(repeat):
                self.status.iteration += 1
                if self.target.closed:
                    if self._harness is None:
                        location = runner.location("/grz_current_test",
                                                   self.server.port)
                    else:
                        location = runner.location("/grz_harness",
                                                   self.server.port,
                                                   close_after=relaunch *
                                                   test_count,
                                                   timeout=self.server.timeout)
                    startup_error = False
                    # The environment from the initial testcase is used because
                    # a sequence of testcases is expected to be run without
                    # relaunching the Target to match the functionality of
                    # Grizzly. If this is not the case each TestCase should
                    # be run individually.
                    runner.launch(location, env_mod=testcases[0].env_vars)
                # run tests
                durations = list()
                served = list()
                for test_idx in range(test_count):
                    if test_count > 1:
                        LOG.info("Running test, part %d/%d (%d/%d)...",
                                 test_idx + 1, test_count,
                                 self.status.iteration, repeat)
                    else:
                        LOG.info("Running test (%d/%d)...",
                                 self.status.iteration, repeat)
                    # update redirects
                    if self._harness is not None:
                        next_idx = (test_idx + 1) % test_count
                        server_map.set_redirect(
                            "grz_next_test",
                            testcases[next_idx].landing_page,
                            required=True)
                    server_map.set_redirect("grz_current_test",
                                            testcases[test_idx].landing_page,
                                            required=False)
                    # run testcase
                    run_result = runner.run(
                        self.ignore,
                        server_map,
                        testcases[test_idx],
                        test_path=unpacked[test_idx],
                        wait_for_callback=self._harness is None)
                    durations.append(run_result.duration)
                    served.append(run_result.served)
                    if run_result.status is not None or not run_result.attempted:
                        break
                if not run_result.attempted:
                    LOG.warning("Test case was not served")
                    if run_result.initial:
                        startup_error = True
                        if run_result.status == RunResult.FAILED:
                            # TODO: what is to best action to take in this case?
                            LOG.warning("Delayed startup failure detected")
                        else:
                            LOG.warning("Timeout too short? System too busy?")
                # process run results
                if run_result.status == RunResult.FAILED:
                    # TODO: use self.target.create_report here
                    log_path = mkdtemp(prefix="logs_", dir=grz_tmp("logs"))
                    self.target.save_logs(log_path)
                    report = Report(log_path, self.target.binary)
                    # check signatures
                    short_sig = report.crash_info.createShortSignature()
                    if (not startup_error and not self._any_crash
                            and self._signature is None
                            and short_sig != "No crash detected"):
                        LOG.debug("no signature given, using %r", short_sig)
                        self._signature = report.crash_signature
                    if short_sig == "No crash detected":
                        # TODO: verify report.major == "NO_STACK" otherwise FM failed to parse the logs
                        # TODO: change this to support hangs/timeouts, etc
                        LOG.info("Result: No crash detected")
                    elif (not startup_error
                          and (self._any_crash
                               or self._signature.matches(report.crash_info))):
                        self.status.count_result(short_sig)
                        LOG.info("Result: %s (%s:%s)", short_sig,
                                 report.major[:8], report.minor[:8])
                        if sig_hash:
                            LOG.debug(
                                "using provided signature (hash) to bucket")
                            bucket_hash = sig_hash
                        else:
                            bucket_hash = report.crash_hash
                        if bucket_hash not in reports:
                            reports[bucket_hash] = ReplayResult(
                                report, served, durations, True)
                            LOG.debug("now tracking %s", bucket_hash)
                            report = None  # don't remove report
                        else:
                            reports[bucket_hash].count += 1
                            LOG.debug("already tracking %s", bucket_hash)
                    else:
                        LOG.info("Result: Different signature: %s (%s:%s)",
                                 short_sig, report.major[:8], report.minor[:8])
                        self.status.ignored += 1
                        if report.crash_hash not in reports:
                            reports[report.crash_hash] = ReplayResult(
                                report, served, durations, False)
                            LOG.debug("now tracking %s", report.crash_hash)
                            report = None  # don't remove report
                        else:
                            reports[report.crash_hash].count += 1
                            LOG.debug("already tracking %s", report.crash_hash)
                    # purge untracked report
                    if report is not None:
                        report.cleanup()
                        report = None
                elif run_result.status == RunResult.IGNORED:
                    self.status.ignored += 1
                    LOG.info("Result: Ignored (%d)", self.status.ignored)

                if exit_early:
                    # failed to meet minimum number of results
                    if repeat - self.status.iteration + self.status.results < min_results:
                        if self.status.iteration < repeat:
                            LOG.debug("skipping remaining attempts")
                        # failed to reproduce issue
                        LOG.debug(
                            "results (%d) < minimum (%d), after %d attempts",
                            self.status.results, min_results,
                            self.status.iteration)
                        # NOTE: this can be tricky if the harness is used because it can
                        # skip the shutdown performed in the harness and runner, if this
                        # is an issue for now use relaunch=1
                        break
                    # check if complete (minimum number of results found)
                    if self.status.results >= min_results:
                        assert self.status.results == min_results
                        assert sum(x.count for x in reports.values()
                                   if x.expected) >= min_results
                        LOG.debug(
                            "results == expected (%d), after %d attempts",
                            min_results, self.status.iteration)
                        break

                # warn about large browser logs
                #self.status.log_size = self.target.log_size()
                #if self.status.log_size > self.TARGET_LOG_SIZE_WARN:
                #    LOG.warning("Large browser logs: %dMBs", (self.status.log_size / 0x100000))

            # process results
            results = list()
            if self._any_crash:
                assert all(x.expected for x in reports.values())
                if sum(x.count for x in reports.values()) >= min_results:
                    results = list(reports.values())
                else:
                    LOG.debug("%d (any_crash) less than minimum %d",
                              self.status.results, min_results)
                    for report in reports.values():
                        report.report.cleanup()
            else:
                assert sum(x.expected for x in reports.values()) <= 1
                # filter out unreliable expected results
                for crash_hash, report in reports.items():
                    if report.expected and report.count < min_results:
                        LOG.debug("%r less than minimum (%d/%d)", crash_hash,
                                  report.count, min_results)
                        report.report.cleanup()
                        continue
                    results.append(report)
            # this should only be displayed when both conditions are met:
            # 1) runner does not close target (no delay was given before shutdown)
            # 2) result has not been successfully reproduced
            if self._relaunch > 1 and not self.target.closed and not any(
                    x.expected for x in results):
                LOG.info("Perhaps try with --relaunch=1")
            # active reports have been moved to results
            # clear reports to avoid cleanup of active reports
            reports.clear()
            return results

        finally:
            # remove unpacked testcase data
            for tc_path in unpacked:
                rmtree(tc_path)
            self.target.close()
            # remove unprocessed reports
            for report in reports.values():
                report.report.cleanup()
Esempio n. 20
0
    def run(self, repeat=1, min_results=1):
        """Run testcase replay.

        Args:
            repeat (int): Maximum number of times to run the test case.
            min_results (int): Minimum number of results needed before run can
                               be considered successful.

        Returns:
            bool: True if results were reproduced otherwise False.
        """
        assert repeat > 0
        assert min_results > 0
        assert min_results <= repeat
        self.status = Status.start()

        server_map = ServerMap()
        if self._harness is not None:

            def _dyn_close():  # pragma: no cover
                if self.target.monitor.is_healthy():
                    # delay to help catch window close/shutdown related crashes
                    sleep(0.1)
                    self.target.close()
                return b"<h1>Close Browser</h1>"

            server_map.set_dynamic_response("grz_close_browser",
                                            _dyn_close,
                                            mime_type="text/html")
            server_map.set_dynamic_response("grz_harness",
                                            lambda: self._harness,
                                            mime_type="text/html")
            server_map.set_redirect("grz_next_test",
                                    self.testcase.landing_page,
                                    required=True)
        server_map.set_redirect("grz_current_test",
                                self.testcase.landing_page,
                                required=False)

        success = False
        for _ in range(repeat):
            self.status.iteration += 1
            if self.target.closed:
                LOG.info("Launching target...")
                if self._harness is None:
                    location = self._runner.location("/grz_current_test",
                                                     self.server.port)
                else:
                    location = self._runner.location(
                        "/grz_harness",
                        self.server.port,
                        close_after=self.target.rl_reset,
                        forced_close=self.target.forced_close)
                try:
                    self._runner.launch(location,
                                        env_mod=self.testcase.env_vars)
                except TargetLaunchError:
                    LOG.error(
                        "Target launch error. Check browser logs for details.")
                    log_path = mkdtemp(prefix="grzreplay_logs_")
                    self.target.save_logs(log_path)
                    self._reports_other["STARTUP"] = Report.from_path(log_path)
                    raise
            self.target.step()
            LOG.info("Performing replay (%d/%d)...", self.status.iteration,
                     repeat)
            # run test case
            self._runner.run(self.ignore,
                             server_map,
                             self.testcase,
                             wait_for_callback=self._harness is None)
            # process results
            if self._runner.result == self._runner.FAILED:
                log_path = mkdtemp(prefix="grzreplay_logs_")
                self.target.save_logs(log_path)
                report = Report.from_path(log_path)
                # check signatures
                crash_info = report.crash_info(self.target.binary)
                short_sig = crash_info.createShortSignature()
                if not self._any_crash and self._signature is None and short_sig != "No crash detected":
                    # signature has not been specified use the first one created
                    self._signature = report.crash_signature(crash_info)
                if short_sig == "No crash detected":
                    # TODO: verify report.major == "NO_STACK" otherwise FM failed to parse the logs
                    # TODO: change this to support hangs/timeouts, etc
                    LOG.info("Result: No crash detected")
                    crash_hash = None
                elif self._any_crash or self._signature.matches(crash_info):
                    self.status.results += 1
                    LOG.info("Result: %s (%s:%s)", short_sig, report.major[:8],
                             report.minor[:8])
                    crash_hash = report.crash_hash(crash_info)
                    if crash_hash not in self._reports_expected:
                        LOG.debug("now tracking %s", crash_hash)
                        self._reports_expected[crash_hash] = report
                        report = None  # don't remove report
                    assert self._any_crash or len(self._reports_expected) == 1
                else:
                    LOG.info("Result: Different signature: %s (%s:%s)",
                             short_sig, report.major[:8], report.minor[:8])
                    self.status.ignored += 1
                    crash_hash = report.crash_hash(crash_info)
                    if crash_hash not in self._reports_other:
                        LOG.debug("now tracking %s", crash_hash)
                        self._reports_other[crash_hash] = report
                        report = None  # don't remove report
                # purge untracked report
                if report is not None:
                    if crash_hash is not None:
                        LOG.debug("already tracking %s", crash_hash)
                    report.cleanup()
                    report = None
            elif self._runner.result == self._runner.IGNORED:
                self.status.ignored += 1
                LOG.info("Result: Ignored (%d)", self.status.ignored)
            elif self._runner.result == self._runner.ERROR:
                LOG.error(
                    "ERROR: Replay malfunction, test case was not served")
                break

            # check status and exit early if possible
            if repeat - self.status.iteration + self.status.results < min_results:
                if self.status.iteration < repeat:
                    LOG.debug("skipping remaining attempts")
                # failed to reproduce issue
                LOG.debug("results (%d) < expected (%s) after %d attempts",
                          self.status.results, min_results,
                          self.status.iteration)
                break
            if self.status.results >= min_results:
                assert self.status.results == min_results
                success = True
                LOG.debug("results == expected (%s) after %d attempts",
                          min_results, self.status.iteration)
                break

            # warn about large browser logs
            #self.status.log_size = self.target.log_size()
            #if self.status.log_size > self.TARGET_LOG_SIZE_WARN:
            #    LOG.warning("Large browser logs: %dMBs", (self.status.log_size / 0x100000))

            # trigger relaunch by closing the browser if needed
            self.target.check_relaunch()

        if success:
            LOG.info("Result successfully reproduced")
        else:
            LOG.info("Failed to reproduce results")
        self.target.close()
        return success