示例#1
0
    def test_run_schedule0005(self, mock_platform, monkeypatch,
                              no_results_instantiation_check):
        def dummy_execjob_run(self, mailer, dryrun=False):
            return self.empty_measurements, {}, "E"  # pretend jobs fail

        monkeypatch.setattr(ExecutionJob, 'run', dummy_execjob_run)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert n_reboots == 8  # 2 benchmarks, 2 vms, 2 execs

        results = Results(config,
                          mock_platform,
                          results_file=config.results_filename())
        type_check_results(results)

        assert len(results.wallclock_times) == 4  # 2 benchmarks, 2 vms
        for key, execs in results.wallclock_times.iteritems():
            assert len(execs) == 2

            for _exec in execs:
                assert len(_exec) == 0  # due to error

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
示例#2
0
    def test_audit_differs0001(self, monkeypatch, mock_platform, caplog):
        """Check that if the audit differs, a crash occurs"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config, monkeypatch)
        results_path = config.results_filename()

        # mutate the audit, so it won't match later
        results = Results(config, mock_platform, results_file=results_path)
        results.audit._audit["wibble"] = "wut"
        results.write_to_file()

        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = "This is only valid if the machine you are using is identical"
        assert expect in caplog.text()

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
示例#3
0
    def test_boot_loop0001(self, monkeypatch, mock_platform, caplog):
        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)

        emulate_first_reboot(mock_platform, config, monkeypatch)

        # Simulate a boot loop
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        sched.manifest.num_reboots = 9998  # way too many
        sched.manifest.update_num_reboots() # increments and writes out file

        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = ("HALTING now to prevent an infinite reboot loop: "
                  "INVARIANT num_reboots <= num_jobs violated. Krun was about "
                  "to execute reboot number: 10000. 1 jobs have been "
                  "completed, 7 are left to go.")
        assert expect in caplog.text()

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
示例#4
0
    def test_pre_and_post_exec_cmds0002(self, monkeypatch, mock_platform,
                                        no_results_instantiation_check):
        config = Config(os.path.join(TEST_DIR, "one_exec.krun"))
        path = os.path.join(TEST_DIR, "shell-out")
        cmd = "echo ${KRUN_RESULTS_FILE}:${KRUN_LOG_FILE}:${KRUN_MANIFEST_FILE} > %s" % path
        config.POST_EXECUTION_CMDS = [cmd]

        krun.util.assign_platform(config, mock_platform)
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        os.unlink(config.results_filename())

        with open(path) as fh:
            got = fh.read().strip()

        os.unlink(path)
        os.unlink(sched.manifest.path)

        assert n_reboots == 1
        elems = got.split(":")
        assert os.path.basename(elems[0]) == "one_exec_results.json.bz2"
        assert os.path.basename(elems[1]) == "one_exec.log"
        assert os.path.basename(elems[2]) == "one_exec.manifest"

        # all paths should be in the same dir
        dirnames = [os.path.dirname(x) for x in elems]
        assert dirnames[0] == dirnames[1] == dirnames[2]
示例#5
0
    def test_pre_and_post_exec_cmds0002(self, monkeypatch, mock_platform):
        config = Config(os.path.join(TEST_DIR, "one_exec.krun"))
        path = os.path.join(TEST_DIR, "shell-out")
        cmd = "echo ${KRUN_RESULTS_FILE}:${KRUN_LOG_FILE}:${KRUN_MANIFEST_FILE} > %s" % path
        config.POST_EXECUTION_CMDS = [cmd]

        krun.util.assign_platform(config, mock_platform)
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        os.unlink(config.results_filename())

        with open(path) as fh:
            got = fh.read().strip()

        os.unlink(path)
        os.unlink(sched.manifest.path)

        assert n_reboots == 1
        elems = got.split(":")
        assert os.path.basename(elems[0]) == "one_exec_results.json.bz2"
        assert os.path.basename(elems[1]) == "one_exec.log"
        assert os.path.basename(elems[2]) == "one_exec.manifest"

        # all paths should be in the same dir
        dirnames = [os.path.dirname(x) for x in elems]
        assert dirnames[0] == dirnames[1] == dirnames[2]
示例#6
0
    def test_boot_loop0001(self, monkeypatch, mock_platform, caplog,
                           no_results_instantiation_check):
        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)

        emulate_first_reboot(mock_platform, config)

        # Simulate a boot loop
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        sched.manifest.num_reboots = 9998  # way too many
        sched.manifest.update_num_reboots()  # increments and writes out file

        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = ("HALTING now to prevent an infinite reboot loop: "
                  "INVARIANT num_reboots <= num_jobs violated. Krun was about "
                  "to execute reboot number: 10000. 1 jobs have been "
                  "completed, 7 are left to go.")
        assert expect in caplog.text

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
示例#7
0
    def test_audit_differs0001(self, monkeypatch, mock_platform, caplog,
                               no_results_instantiation_check):
        """Check that if the audit differs, a crash occurs"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config)
        results_path = config.results_filename()

        # mutate the audit, so it won't match later
        results = Results(config, mock_platform, results_file=results_path)
        results.audit._audit["wibble"] = "wut"
        results.write_to_file()

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = "This is only valid if the machine you are using is identical"
        assert expect in caplog.text

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
示例#8
0
def test_results_filename():
    example = os.path.join(TEST_DIR, "example.krun")
    touch(example)
    example_config = Config(example)
    # not exact match due to absolute path
    assert example_config.results_filename().endswith(
        "example_results.json.bz2")
示例#9
0
 def test_run_schedule0003(self, mock_platform, monkeypatch):
     config = Config(os.path.join(TEST_DIR, "example_all_skip.krun"))
     n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                  monkeypatch)
     assert n_reboots == 0 # all skipped!
     os.unlink(sched.manifest.path)
     os.unlink(config.results_filename())
示例#10
0
    def test_run_schedule0002(self, mock_platform, monkeypatch):
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert n_reboots == 8  # 2 benchmarks, 2 vms, 2 execs

        results = Results(config, mock_platform,
                          results_file=config.results_filename())
        type_check_results(results)

        assert len(results.wallclock_times) == 4  # 2 benchmarks, 2 vms
        for key, execs in results.wallclock_times.iteritems():
            assert len(execs) == 2
            for _exec in execs:
               assert len(_exec) == 0  # due to dry run

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
示例#11
0
文件: test_util.py 项目: bennn/krun
def to_strip():
    from krun.platform import detect_platform
    from krun.results import Results

    path = os.path.join(TEST_DIR, "quick.krun")
    config = Config(path)

    platform = detect_platform(None)
    results = Results(config, platform, results_file=config.results_filename())
    return results
示例#12
0
文件: test_util.py 项目: bennn/krun
def to_strip():
    from krun.platform import detect_platform
    from krun.results import Results

    path = os.path.join(TEST_DIR, "quick.krun")
    config = Config(path)

    platform = detect_platform(None)
    results = Results(config, platform,
                      results_file=config.results_filename())
    return results
示例#13
0
    def test_run_schedule0001(self, monkeypatch, mock_platform):
        config = Config(os.path.join(TEST_DIR, "one_exec.krun"))
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert sched.manifest.total_num_execs == 1
        assert sched.manifest.num_execs_left == 0
        assert n_reboots == 1

        results = Results(config, mock_platform,
                          results_file=config.results_filename())
        type_check_results(results)

        assert len(results.wallclock_times) == 1  # 1 benchmark, 1 vm
        for key, execs in results.wallclock_times.iteritems():
            assert len(execs) == 1
            for _exec in execs:
                assert len(_exec) == 0  # due to dry run

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
示例#14
0
    def test_run_schedule0005(self, mock_platform, monkeypatch):

        def dummy_execjob_run(self, mailer, dryrun=False):
            return self.empty_measurements, {}, "E"  # pretend jobs fail
        monkeypatch.setattr(ExecutionJob, 'run', dummy_execjob_run)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert n_reboots == 8  # 2 benchmarks, 2 vms, 2 execs

        results = Results(config, mock_platform,
                          results_file=config.results_filename())
        type_check_results(results)

        assert len(results.wallclock_times) == 4  # 2 benchmarks, 2 vms
        for key, execs in results.wallclock_times.iteritems():
            assert len(execs) == 2

            for _exec in execs:
                assert len(_exec) == 0  # due to error

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
示例#15
0
    def test_run_schedule0004(self, mock_platform, monkeypatch,
                              no_results_instantiation_check):
        config = Config(os.path.join(TEST_DIR, "example_skip_1vm.krun"))
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert n_reboots == 4  # 2 benchmarks, 2 vms, 2 execs, one VM skipped

        results = Results(config,
                          mock_platform,
                          results_file=config.results_filename())
        type_check_results(results)

        assert len(results.wallclock_times) == 4  # 2 benchmarks, 2 vms
        for key, execs in results.wallclock_times.iteritems():
            if "CPython" in key:
                assert len(execs) == 0
            else:
                assert len(execs) == 2

            for _exec in execs:
                assert len(_exec) == 0  # due to dry run

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
示例#16
0
    def test_pre_and_post_exec_cmds0001(self, monkeypatch, mock_platform):
        cap_cmds = []
        def dummy_run_shell_cmd(cmd, failure_fatal=False, extra_env=None):
            cap_cmds.append(cmd)
            return "", "", 0

        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_run_shell_cmd)

        config = Config(os.path.join(TEST_DIR, "one_exec.krun"))
        config.PRE_EXECUTION_CMDS = ["cmd1", "cmd2"]
        config.POST_EXECUTION_CMDS = ["cmd3", "cmd4"]

        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)

        assert n_reboots == 1
        expect = ["cmd1", "cmd2", "cmd3", "cmd4"]
        assert cap_cmds == expect
示例#17
0
    def test_error_flag_persists0002(self, monkeypatch, mock_platform,
                                     no_results_instantiation_check):
        """Check a changed dmesg will correctly set the error flag"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        # pretend dmesg changes
        def dummy_check_for_dmesg_changes(self):
            return True

        monkeypatch.setattr(mock_platform, 'check_dmesg_for_changes',
                            dummy_check_for_dmesg_changes)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config)
        results_path = config.results_filename()

        # To start, the error flag is not set
        results = Results(config, mock_platform, results_file=results_path)
        assert not results.error_flag

        # run an execution where the dmesg changes
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # reload results and check the error flag is now set
        results = Results(config, mock_platform, results_file=results_path)
        assert results.error_flag

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
示例#18
0
    def test_error_flag_persists0001(self, monkeypatch, mock_platform,
                                     no_results_instantiation_check):
        """Check a failing exec will correctly set the error flag"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        # pretend exec fails
        def dummy_job_run(self, mailer, dry):
            measurements = self.make_empty_measurement()
            return measurements, None, 'E'  # measurements, instr_data, flag

        monkeypatch.setattr(ExecutionJob, 'run', dummy_job_run)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config)
        results_path = config.results_filename()

        # To start, the error flag is not set
        results = Results(config, mock_platform, results_file=results_path)
        assert not results.error_flag

        # run a (failing) execution, which will dump the results file
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # reload results and check the error flag is now set
        results = Results(config, mock_platform, results_file=results_path)
        assert results.error_flag

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
示例#19
0
    def test_pre_and_post_cmds0003(self, monkeypatch, mock_platform):
        """Check that the pre/post commands use a shell and don't just exec(3)"""

        config = Config(os.path.join(TEST_DIR, "one_exec.krun"))
        tmp_file = os.path.join(TEST_DIR, "prepost.txt")

        # commands use shell syntax
        config.PRE_EXECUTION_CMDS = ["echo 'pre' > %s" % tmp_file]
        config.POST_EXECUTION_CMDS = ["echo 'post' >> %s" % tmp_file]

        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert n_reboots == 1
        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)

        with open(tmp_file) as fh:
            got = fh.read()

        os.unlink(tmp_file)
        assert got == "pre\npost\n"
示例#20
0
    def test_error_flag_persists0001(self, monkeypatch, mock_platform):
        """Check a failing exec will correctly set the error flag"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        # pretend exec fails
        def dummy_job_run(self, mailer, dry):
            measurements = self.make_empty_measurement()
            return measurements, None, 'E'  # measurements, instr_data, flag
        monkeypatch.setattr(ExecutionJob, 'run', dummy_job_run)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config, monkeypatch)
        results_path = config.results_filename()

        # To start, the error flag is not set
        results = Results(config, mock_platform, results_file=results_path)
        assert not results.error_flag

        # run a (failing) execution, which will dump the results file
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # reload results and check the error flag is now set
        results = Results(config, mock_platform, results_file=results_path)
        assert results.error_flag

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
示例#21
0
    def test_error_flag_persists0002(self, monkeypatch, mock_platform):
        """Check a changed dmesg will correctly set the error flag"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        # pretend dmesg changes
        def dummy_check_for_dmesg_changes(self):
            return True
        monkeypatch.setattr(mock_platform, 'check_dmesg_for_changes',
                            dummy_check_for_dmesg_changes)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config, monkeypatch)
        results_path = config.results_filename()

        # To start, the error flag is not set
        results = Results(config, mock_platform, results_file=results_path)
        assert not results.error_flag

        # run an execution where the dmesg changes
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # reload results and check the error flag is now set
        results = Results(config, mock_platform, results_file=results_path)
        assert results.error_flag

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
示例#22
0
def test_results_filename():
    example = os.path.join(TEST_DIR, "example.krun")
    touch(example)
    example_config = Config(example)
    # not exact match due to absolute path
    assert example_config.results_filename().endswith("example_results.json.bz2")