Exemple #1
0
    def test_pre_and_post_cmds0001(self, monkeypatch, mock_platform):
        cap_cmds = []
        def dummy_run_shell_cmd(cmd, failure_fatal=False, extra_env=None):
            cap_cmds.append(cmd)
            return "", "", 0

        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_run_shell_cmd)

        config = Config(os.path.join(TEST_DIR, "example.krun"))

        config.PRE_EXECUTION_CMDS = ["pre1", "pre2"]
        config.POST_EXECUTION_CMDS = ["post1", "post2"]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        expect = ["pre1", "pre2", "post1", "post2"] * 8
        assert cap_cmds == expect
Exemple #2
0
    def test_num_emails_sent_persists0001(self, monkeypatch, mock_platform):
        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config, monkeypatch)
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        sched.mailer.recipients = ["noone@localhost"]

        assert sched.manifest.num_mails_sent == 0
        sched.mailer.send("subject", "body", manifest=sched.manifest)
        assert sched.manifest.num_mails_sent == 1
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # suppose a reboot happened now
        del sched
        del config
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        assert sched.manifest.num_mails_sent == 1
        os.unlink(sched.manifest.path)
Exemple #3
0
    def test_audit_differs0001(self, monkeypatch, mock_platform, caplog,
                               no_results_instantiation_check):
        """Check that if the audit differs, a crash occurs"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config)
        results_path = config.results_filename()

        # mutate the audit, so it won't match later
        results = Results(config, mock_platform, results_file=results_path)
        results.audit._audit["wibble"] = "wut"
        results.write_to_file()

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = "This is only valid if the machine you are using is identical"
        assert expect in caplog.text

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
Exemple #4
0
    def test_post_exec_cmds0002(self, monkeypatch, mock_platform):
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        path = os.path.join(TEST_DIR, "shell-out")
        cmd = "echo ${KRUN_RESULTS_FILE}:${KRUN_LOG_FILE} > %s" % path
        config.POST_EXECUTION_CMDS = [cmd]
        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        with open(path) as fh:
            got = fh.read().strip()

        os.unlink(path)

        elems = got.split(":")

        assert elems[0].endswith(".json.bz2")
        assert elems[1].endswith(".log")
Exemple #5
0
    def test_boot_loop0001(self, monkeypatch, mock_platform, caplog,
                           no_results_instantiation_check):
        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)

        emulate_first_reboot(mock_platform, config)

        # Simulate a boot loop
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        sched.manifest.num_reboots = 9998  # way too many
        sched.manifest.update_num_reboots()  # increments and writes out file

        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = ("HALTING now to prevent an infinite reboot loop: "
                  "INVARIANT num_reboots <= num_jobs violated. Krun was about "
                  "to execute reboot number: 10000. 1 jobs have been "
                  "completed, 7 are left to go.")
        assert expect in caplog.text

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
Exemple #6
0
    def test_pre_and_post_cmds0002(self, monkeypatch, mock_platform):
        """Check that the pre/post commands use a shell and don't just exec(3)"""

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        tmp_file = os.path.join(TEST_DIR, "prepost.txt")

        # commands use shell syntax
        config.PRE_EXECUTION_CMDS = ["echo 'pre' > %s" % tmp_file]
        config.POST_EXECUTION_CMDS = ["echo 'post' >> %s" % tmp_file]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        sched.run()

        with open(tmp_file) as fh:
            got = fh.read()

        os.unlink(tmp_file)
        assert got == "pre\npost\n"
Exemple #7
0
    def test_boot_loop0001(self, monkeypatch, mock_platform, caplog):
        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)

        emulate_first_reboot(mock_platform, config, monkeypatch)

        # Simulate a boot loop
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        sched.manifest.num_reboots = 9998  # way too many
        sched.manifest.update_num_reboots() # increments and writes out file

        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = ("HALTING now to prevent an infinite reboot loop: "
                  "INVARIANT num_reboots <= num_jobs violated. Krun was about "
                  "to execute reboot number: 10000. 1 jobs have been "
                  "completed, 7 are left to go.")
        assert expect in caplog.text()

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
Exemple #8
0
def run_with_captured_reboots(config, platform, monkeypatch):
    """Runs a session to completion using exceptions to capture reboots

    Returns the number of reboots and the last scheduler"""

    no_envlogs(monkeypatch)
    make_reboot_raise(monkeypatch)
    krun.util.assign_platform(config, platform)
    reboots = 0

    manifest = emulate_first_reboot(platform, config, monkeypatch)
    if manifest.num_execs_left == 0:
        sched = ExecutionScheduler(config, platform.mailer, platform,
                                   dry_run=True)
        return reboots, sched
    reboots += 1

    # Run the main benchmarking loop
    while True:
        sched = ExecutionScheduler(config, platform.mailer, platform, dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            reboots += 1
        else:
            # normal exit() from run -- schedule finished
            break

    return reboots, sched
Exemple #9
0
    def test_audit_differs0001(self, monkeypatch, mock_platform, caplog):
        """Check that if the audit differs, a crash occurs"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config, monkeypatch)
        results_path = config.results_filename()

        # mutate the audit, so it won't match later
        results = Results(config, mock_platform, results_file=results_path)
        results.audit._audit["wibble"] = "wut"
        results.write_to_file()

        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = "This is only valid if the machine you are using is identical"
        assert expect in caplog.text()

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
Exemple #10
0
def get_session_info(config):
    """Gets information about the session (for --info)

    Separated from print_session_info for ease of testing"""

    from krun.scheduler import ScheduleEmpty, ExecutionScheduler
    from krun.platform import detect_platform

    platform = detect_platform(None)
    sched = ExecutionScheduler(config, None, platform)
    non_skipped_keys, skipped_keys = sched.build_schedule()

    n_proc_execs = 0
    n_in_proc_iters = 0

    while True:
        try:
            job = sched.next_job()
        except ScheduleEmpty:
            break

        n_proc_execs += 1
        n_in_proc_iters += job.vm_info["n_iterations"]

    return {
        "n_proc_execs": n_proc_execs,
        "n_in_proc_iters": n_in_proc_iters,
        "skipped_keys": skipped_keys,
        "non_skipped_keys": non_skipped_keys,
    }
Exemple #11
0
def get_session_info(config):
    """Gets information about the session (for --info)

    Separated from print_session_info for ease of testing"""

    from krun.scheduler import ScheduleEmpty, ExecutionScheduler
    from krun.platform import detect_platform

    platform = detect_platform(None)
    sched = ExecutionScheduler(config, None, platform)
    non_skipped_keys, skipped_keys = sched.build_schedule()

    n_proc_execs = 0
    n_in_proc_iters = 0

    while True:
        try:
            job = sched.next_job()
        except ScheduleEmpty:
            break

        n_proc_execs += 1
        n_in_proc_iters += job.vm_info["n_iterations"]

    return {
        "n_proc_execs": n_proc_execs,
        "n_in_proc_iters": n_in_proc_iters,
        "skipped_keys": skipped_keys,
        "non_skipped_keys": non_skipped_keys,
    }
Exemple #12
0
 def test_part_complete_schedule(self, mock_platform):
     sched = ExecutionScheduler(Config(os.path.join(TEST_DIR, "quick.krun")),
                                mock_platform.mailer,
                                mock_platform, resume=True,
                                reboot=True, dry_run=True,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 0
Exemple #13
0
 def test_queue_len0001(self, mock_platform):
     config_path = os.path.join(TEST_DIR, "more_complicated.krun")
     sched = ExecutionScheduler(Config(config_path),
                                mock_platform.mailer,
                                mock_platform, resume=False,
                                reboot=True, dry_run=False,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 90  # taking into account skips
Exemple #14
0
 def test_queue_len0001(self, mock_platform):
     config_path = os.path.join(TEST_DIR, "more_complicated.krun")
     sched = ExecutionScheduler(Config(config_path),
                                mock_platform.mailer,
                                mock_platform,
                                resume=False,
                                reboot=True,
                                dry_run=False,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 90  # taking into account skips
Exemple #15
0
 def test_part_complete_schedule(self, mock_platform):
     sched = ExecutionScheduler(Config(os.path.join(TEST_DIR,
                                                    "quick.krun")),
                                mock_platform.mailer,
                                mock_platform,
                                resume=True,
                                reboot=True,
                                dry_run=True,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 0
Exemple #16
0
    def test_etas_dont_agree_with_schedule(self, mock_platform):
        """ETAs don't exist for all jobs for which there is iterations data"""

        sched = ExecutionScheduler(Config(os.path.join(TEST_DIR, "broken_etas.krun")),
                                   mock_platform.mailer, mock_platform,
                                   resume=True, reboot=False, dry_run=True,
                                   started_by_init=False)
        try:
            sched.build_schedule()
        except krun.util.FatalKrunError:
            pass
        else:
            assert False, "Krun did not exit when ETAs failed to tally with results!"
Exemple #17
0
    def test_etas_dont_agree_with_schedule(self, mock_platform):
        """ETAs don't exist for all jobs for which there is iterations data"""

        sched = ExecutionScheduler(Config(
            os.path.join(TEST_DIR, "broken_etas.krun")),
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=True,
                                   reboot=False,
                                   dry_run=True,
                                   started_by_init=False)
        try:
            sched.build_schedule()
        except krun.util.FatalKrunError:
            pass
        else:
            assert False, "Krun did not exit when ETAs failed to tally with results!"
Exemple #18
0
 def test_build_schedule(self, mock_platform):
     sched = ExecutionScheduler(Config(os.path.join(TEST_DIR, "example.krun")),
                                mock_platform.mailer,
                                mock_platform, resume=False,
                                reboot=True, dry_run=True,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 8
     dummy_py = ExecutionJob(sched, "CPython", "", "dummy",
                             "default-python", 1000)
     dummy_java = ExecutionJob(sched, "Java", "", "dummy", "default-java", 1000)
     nbody_py = ExecutionJob(sched, "CPython", "", "nbody",
                             "default-python", 1000)
     nbody_java = ExecutionJob(sched, "Java", "", "nbody", "default-java", 1000)
     assert sched.work_deque.count(dummy_py) == 2
     assert sched.work_deque.count(dummy_java) == 2
     assert sched.work_deque.count(nbody_py) == 2
     assert sched.work_deque.count(nbody_java) == 2
Exemple #19
0
    def test_error_flag_persists0001(self, monkeypatch, mock_platform,
                                     no_results_instantiation_check):
        """Check a failing exec will correctly set the error flag"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        # pretend exec fails
        def dummy_job_run(self, mailer, dry):
            measurements = self.make_empty_measurement()
            return measurements, None, 'E'  # measurements, instr_data, flag

        monkeypatch.setattr(ExecutionJob, 'run', dummy_job_run)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config)
        results_path = config.results_filename()

        # To start, the error flag is not set
        results = Results(config, mock_platform, results_file=results_path)
        assert not results.error_flag

        # run a (failing) execution, which will dump the results file
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # reload results and check the error flag is now set
        results = Results(config, mock_platform, results_file=results_path)
        assert results.error_flag

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
Exemple #20
0
    def test_error_flag_persists0002(self, monkeypatch, mock_platform,
                                     no_results_instantiation_check):
        """Check a changed dmesg will correctly set the error flag"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        # pretend dmesg changes
        def dummy_check_for_dmesg_changes(self):
            return True

        monkeypatch.setattr(mock_platform, 'check_dmesg_for_changes',
                            dummy_check_for_dmesg_changes)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config)
        results_path = config.results_filename()

        # To start, the error flag is not set
        results = Results(config, mock_platform, results_file=results_path)
        assert not results.error_flag

        # run an execution where the dmesg changes
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # reload results and check the error flag is now set
        results = Results(config, mock_platform, results_file=results_path)
        assert results.error_flag

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
Exemple #21
0
 def test_run_schedule_reboot(self, monkeypatch, mock_platform):
     def dummy_shell_cmd(text):
         pass
     def dummy_execv(text, lst):
         pass
     monkeypatch.setattr(os, "execv", dummy_execv)
     monkeypatch.setattr(subprocess, "call", dummy_shell_cmd)
     monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_shell_cmd)
     config = Config(os.path.join(TEST_DIR, "example.krun"))
     krun.util.assign_platform(config, mock_platform)
     sched = ExecutionScheduler(config,
                                mock_platform.mailer,
                                mock_platform, resume=False,
                                reboot=True, dry_run=True,
                                started_by_init=True)
     sched.build_schedule()
     assert len(sched) == 8
     with pytest.raises(AssertionError):
         sched.run()
     assert len(sched) == 7
     os.unlink(os.path.join(TEST_DIR, "example_results.json.bz2"))
Exemple #22
0
    def test_num_emails_sent_persists0001(self, monkeypatch, mock_platform,
                                          no_results_instantiation_check):
        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        sched.mailer.recipients = ["noone@localhost"]

        assert sched.manifest.num_mails_sent == 0
        sched.mailer.send("subject", "body", manifest=sched.manifest)
        assert sched.manifest.num_mails_sent == 1
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # suppose a reboot happened now
        del sched
        del config
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        assert sched.manifest.num_mails_sent == 1
        os.unlink(sched.manifest.path)
Exemple #23
0
    def test_pre_and_post_cmds0002(self, monkeypatch, mock_platform):
        """Check that the pre/post commands use a shell and don't just exec(3)"""

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        tmp_file = os.path.join(TEST_DIR, "prepost.txt")

        # commands use shell syntax
        config.PRE_EXECUTION_CMDS = ["echo 'pre' > %s" % tmp_file]
        config.POST_EXECUTION_CMDS = ["echo 'post' >> %s" % tmp_file]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        sched.run()

        with open(tmp_file) as fh:
            got = fh.read()

        os.unlink(tmp_file)
        assert got == "pre\npost\n"
Exemple #24
0
def run_with_captured_reboots(config, platform, monkeypatch):
    """Runs a session to completion using exceptions to capture reboots

    Returns the number of reboots and the last scheduler"""

    no_envlogs(monkeypatch)
    make_reboot_raise(monkeypatch)
    krun.util.assign_platform(config, platform)
    reboots = 0

    manifest = emulate_first_reboot(platform, config)
    if manifest.num_execs_left == 0:
        sched = ExecutionScheduler(config,
                                   platform.mailer,
                                   platform,
                                   dry_run=True)
        return reboots, sched
    reboots += 1

    # Run the main benchmarking loop
    while True:
        sched = ExecutionScheduler(config,
                                   platform.mailer,
                                   platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            reboots += 1
        else:
            # normal exit() from run -- schedule finished
            break

    return reboots, sched
Exemple #25
0
    def test_pre_and_post_cmds0001(self, monkeypatch, mock_platform):
        cap_cmds = []

        def dummy_run_shell_cmd(cmd, failure_fatal=False, extra_env=None):
            cap_cmds.append(cmd)
            return "", "", 0

        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_run_shell_cmd)

        config = Config(os.path.join(TEST_DIR, "example.krun"))

        config.PRE_EXECUTION_CMDS = ["pre1", "pre2"]
        config.POST_EXECUTION_CMDS = ["post1", "post2"]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        expect = ["pre1", "pre2", "post1", "post2"] * 8
        assert cap_cmds == expect
Exemple #26
0
    def test_post_exec_cmds0002(self, monkeypatch, mock_platform):
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        path = os.path.join(TEST_DIR, "shell-out")
        cmd = "echo ${KRUN_RESULTS_FILE}:${KRUN_LOG_FILE} > %s" % path
        config.POST_EXECUTION_CMDS = [cmd]
        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        with open(path) as fh:
            got = fh.read().strip()

        os.unlink(path)

        elems = got.split(":")

        assert elems[0].endswith(".json.bz2")
        assert elems[1].endswith(".log")
Exemple #27
0
    def test_run_schedule_reboot(self, monkeypatch, mock_platform):
        def dummy_shell_cmd(text):
            pass

        def dummy_execv(text, lst):
            pass

        monkeypatch.setattr(os, "execv", dummy_execv)
        monkeypatch.setattr(subprocess, "call", dummy_shell_cmd)
        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_shell_cmd)
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   reboot=True,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        with pytest.raises(AssertionError):
            sched.run()
        assert len(sched) == 7
        os.unlink(os.path.join(TEST_DIR, "example_results.json.bz2"))
Exemple #28
0
    def test_run_schedule(self, monkeypatch, mock_platform):
        json_file = os.path.join(TEST_DIR, "example_results.json.bz2")
        def dummy_shell_cmd(text):
            pass
        monkeypatch.setattr(subprocess, 'call', dummy_shell_cmd)
        monkeypatch.setattr(krun.util, 'run_shell_cmd', dummy_shell_cmd)
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   reboot=False, dry_run=True,
                                   started_by_init=False)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()
        assert len(sched) == 0

        results = Results(Config(os.path.join(TEST_DIR, "example.krun")),
                          mock_platform, results_file=json_file)

        for k, execs in results.data.iteritems():
            assert type(execs) is list
            for one_exec in execs:
                assert type(one_exec) is list
                assert all([type(x) is float for x in one_exec])

        for k, execs in results.eta_estimates.iteritems():
            assert type(execs) is list
            assert all([type(x) is float for x in execs])

        assert type(results.starting_temperatures) is dict
        assert type(results.reboots) is int
        assert type(results.audit) is type(Audit(dict()))
        assert type(results.config) is type(Config())
        assert type(results.error_flag) is bool

        os.unlink(json_file)
Exemple #29
0
 def test_build_schedule(self, mock_platform):
     sched = ExecutionScheduler(Config(
         os.path.join(TEST_DIR, "example.krun")),
                                mock_platform.mailer,
                                mock_platform,
                                resume=False,
                                reboot=True,
                                dry_run=True,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 8
     dummy_py = ExecutionJob(sched, "CPython", "", "dummy",
                             "default-python", 1000)
     dummy_java = ExecutionJob(sched, "Java", "", "dummy", "default-java",
                               1000)
     nbody_py = ExecutionJob(sched, "CPython", "", "nbody",
                             "default-python", 1000)
     nbody_java = ExecutionJob(sched, "Java", "", "nbody", "default-java",
                               1000)
     assert sched.work_deque.count(dummy_py) == 2
     assert sched.work_deque.count(dummy_java) == 2
     assert sched.work_deque.count(nbody_py) == 2
     assert sched.work_deque.count(nbody_java) == 2
Exemple #30
0
    def test_error_flag_persists0002(self, monkeypatch, mock_platform):
        """Check a changed dmesg will correctly set the error flag"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        # pretend dmesg changes
        def dummy_check_for_dmesg_changes(self):
            return True
        monkeypatch.setattr(mock_platform, 'check_dmesg_for_changes',
                            dummy_check_for_dmesg_changes)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config, monkeypatch)
        results_path = config.results_filename()

        # To start, the error flag is not set
        results = Results(config, mock_platform, results_file=results_path)
        assert not results.error_flag

        # run an execution where the dmesg changes
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # reload results and check the error flag is now set
        results = Results(config, mock_platform, results_file=results_path)
        assert results.error_flag

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
Exemple #31
0
    def test_error_flag_persists0001(self, monkeypatch, mock_platform):
        """Check a failing exec will correctly set the error flag"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        # pretend exec fails
        def dummy_job_run(self, mailer, dry):
            measurements = self.make_empty_measurement()
            return measurements, None, 'E'  # measurements, instr_data, flag
        monkeypatch.setattr(ExecutionJob, 'run', dummy_job_run)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config, monkeypatch)
        results_path = config.results_filename()

        # To start, the error flag is not set
        results = Results(config, mock_platform, results_file=results_path)
        assert not results.error_flag

        # run a (failing) execution, which will dump the results file
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # reload results and check the error flag is now set
        results = Results(config, mock_platform, results_file=results_path)
        assert results.error_flag

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
Exemple #32
0
 def test_add_del_job(self, mock_platform):
     sched = ExecutionScheduler(Config(os.path.join(TEST_DIR, "example.krun")),
                                mock_platform.mailer,
                                mock_platform, resume=False,
                                reboot=True, dry_run=False,
                                started_by_init=False)
     assert len(sched) == 0
     sched.add_job(ExecutionJob(sched, "CPython", "", "mybench",
                                "default-python", 1000))
     assert len(sched) == 1
     sched.remove_job_by_key("mybench:CPython:default-python")
     assert len(sched) == 0
     with pytest.raises(JobMissingError):
         sched.remove_job_by_key("mybench:HHVM:default-php")
Exemple #33
0
 def test_add_del_job(self, mock_platform):
     sched = ExecutionScheduler(Config(
         os.path.join(TEST_DIR, "example.krun")),
                                mock_platform.mailer,
                                mock_platform,
                                resume=False,
                                reboot=True,
                                dry_run=False,
                                started_by_init=False)
     assert len(sched) == 0
     sched.add_job(
         ExecutionJob(sched, "CPython", "", "mybench", "default-python",
                      1000))
     assert len(sched) == 1
     sched.remove_job_by_key("mybench:CPython:default-python")
     assert len(sched) == 0
     with pytest.raises(JobMissingError):
         sched.remove_job_by_key("mybench:HHVM:default-php")
Exemple #34
0
    def test_run_schedule(self, monkeypatch, mock_platform):
        json_file = os.path.join(TEST_DIR, "example_results.json.bz2")

        def dummy_shell_cmd(text):
            pass

        monkeypatch.setattr(subprocess, 'call', dummy_shell_cmd)
        monkeypatch.setattr(krun.util, 'run_shell_cmd', dummy_shell_cmd)
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   reboot=False,
                                   dry_run=True,
                                   started_by_init=False)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()
        assert len(sched) == 0

        results = Results(Config(os.path.join(TEST_DIR, "example.krun")),
                          mock_platform,
                          results_file=json_file)

        for k, execs in results.data.iteritems():
            assert type(execs) is list
            for one_exec in execs:
                assert type(one_exec) is list
                assert all([type(x) is float for x in one_exec])

        for k, execs in results.eta_estimates.iteritems():
            assert type(execs) is list
            assert all([type(x) is float for x in execs])

        assert type(results.starting_temperatures) is dict
        assert type(results.reboots) is int
        assert type(results.audit) is type(Audit(dict()))
        assert type(results.config) is type(Config())
        assert type(results.error_flag) is bool

        os.unlink(json_file)
Exemple #35
0
def inner_main(mailer, config, args):
    out_file = config.results_filename()
    out_file_exists = os.path.exists(out_file)

    if out_file_exists and not os.path.isfile(out_file):
        util.fatal(
            "Output file '%s' exists but is not a regular file" % out_file)

    if out_file_exists and not args.resume:
        util.fatal("Output file '%s' already exists. "
                   "Either resume the session (--resume) or "
                   "move the file away" % out_file)

    if not out_file_exists and args.resume:
        util.fatal("No results file to resume. Expected '%s'" % out_file)

    if args.started_by_init and not args.reboot:
        util.fatal("--started-by-init makes no sense without --reboot")

    if args.started_by_init and not args.resume:
        util.fatal("--started-by-init makes no sense without --resume")

    if args.develop:
        warn("Developer mode enabled. Results will not be reliable.")

    # Initialise platform instance and assign to VM defs.
    # This needs to be done early, so VM sanity checks can run.
    platform = detect_platform(mailer)

    if not args.develop:
        debug("Checking platform preliminaries")
        platform.check_preliminaries()
    else:
        # Needed to skip the use of certain tools and techniques.
        # E.g. switching user.
        warn("Not checking platform prerequisites due to developer mode")
        platform.developer_mode = True

    platform.collect_audit()

    # If the user has asked for resume-mode, the current platform must
    # be an identical machine to the current one.
    error_msg = ("You have asked Krun to resume an interrupted benchmark. " +
                 "This is only valid if the machine you are using is " +
                 "identical to the one on which the last results were " +
                 "gathered, which is not the case.")
    current = None
    if args.resume:
        # output file must exist, due to check above
        assert(out_file_exists)
        current = Results(config, platform, results_file=out_file)
        from krun.audit import Audit
        if not Audit(platform.audit) == current.audit:
            util.fatal(error_msg)

        debug("Using pre-recorded initial temperature readings")
        platform.starting_temperatures = current.starting_temperatures
    else:
        # Touch the config file to update its mtime. This is required
        # by resume-mode which uses the mtime to determine the name of
        # the log file, should this benchmark be resumed.
        _, _, rc = util.run_shell_cmd("touch " + args.filename)
        if rc != 0:
            util.fatal("Could not touch config file: " + args.filename)

        info(("Wait %s secs to allow system to cool prior to "
             "collecting initial temperature readings") %
             config.TEMP_READ_PAUSE)

        if args.develop or args.dry_run:
            info("SIMULATED: time.sleep(%s)" % config.TEMP_READ_PAUSE)
        else:
            time.sleep(config.TEMP_READ_PAUSE)

        debug("Taking fresh initial temperature readings")
        platform.starting_temperatures = platform.take_temperature_readings()

    # Assign platform to VM defs -- needs to happen early for sanity checks
    util.assign_platform(config, platform)

    sanity_checks(config, platform)

    # Build job queue -- each job is an execution
    sched = ExecutionScheduler(config,
                               mailer,
                               platform,
                               resume=args.resume,
                               reboot=args.reboot,
                               dry_run=args.dry_run,
                               started_by_init=args.started_by_init)
    sched.build_schedule()
    sched.run()
Exemple #36
0
def inner_main(mailer, on_first_invocation, config, args):
    out_file = config.results_filename()
    out_file_exists = os.path.exists(out_file)

    instr_dir = util.get_instr_json_dir(config)
    instr_dir_exists = os.path.exists(instr_dir)

    envlog_dir = util.get_envlog_dir(config)
    envlog_dir_exists = os.path.exists(envlog_dir)

    if out_file_exists and not os.path.isfile(out_file):
        util.fatal(
            "Output file '%s' exists but is not a regular file" % out_file)

    if out_file_exists and on_first_invocation:
        util.fatal("Output results file '%s' already exists. "
                   "Move the file away before running Krun." % out_file)

    if instr_dir_exists and on_first_invocation:
        util.fatal("Instrumentation dir '%s' exists." % instr_dir)

    if envlog_dir_exists and on_first_invocation:
        util.fatal("Env log dir '%s' exists." % envlog_dir)

    if not out_file_exists and not on_first_invocation:
        util.fatal("No results file to resume. Expected '%s'" % out_file)

    # Initialise platform instance and assign to VM defs.
    # This needs to be done early, so VM sanity checks can run.
    platform = detect_platform(mailer, config)

    platform.quick_mode = args.quick
    platform.no_user_change = args.no_user_change
    platform.no_tickless_check = args.no_tickless_check
    platform.no_pstate_check = args.no_pstate_check
    platform.hardware_reboots = args.hardware_reboots

    # Create the instrumentation directory if required
    if on_first_invocation:
        # We only want make a dir if >=1 VM is in instrumentation mode.
        for vm in config.VMS.itervalues():
            if vm['vm_def'].instrument:
                util.make_instr_dir(config)
                break

    debug("Checking platform preliminaries")
    platform.check_preliminaries()

    # Make a bit of noise if this is a virtualised environment
    if platform.is_virtual():
        warn("This appears to be a virtualised host. The results will be flawed. "
             "Use bare-metal for reliable results!")

    platform.collect_audit()

    # At this point the config file is OK, and on-disk state is consistent,
    # so let's daemonise (if requested).
    if args.daemonise:
        util.daemonise()

    if not on_first_invocation:
        # output file must exist, due to check above
        assert(out_file_exists)

        debug("Using pre-recorded initial temperature readings")
        manifest = ManifestManager(config, platform)

        platform_temps = {}
        for sensor, tup in manifest.starting_temperatures.iteritems():
            platform_temps[sensor] = tup[1]
        platform.starting_temperatures = platform_temps
    else:
        manifest = ManifestManager(config, platform, new_file=True)
        if manifest.num_execs_left == 0:
            # No executions, or all skipped
            fatal("Empty schedule!")

        try:
            info(("Wait %s secs to allow system to cool prior to "
                 "collecting initial temperature readings") %
                 config.TEMP_READ_PAUSE)

            # This part is wrapped in hooks, so that if daemons or networking are
            # taken down for process executions, then the initial temperature
            # reading gets the same treatment.
            util.run_shell_cmd_list(config.PRE_EXECUTION_CMDS,)
            platform.sleep(config.TEMP_READ_PAUSE)

            debug("Taking fresh initial temperature readings")
            platform.starting_temperatures = platform.take_temperature_readings()
            manifest.set_starting_temperatures(platform.starting_temperatures)

            # Write out an empty results file. After the initial reboot Krun
            # will expect this to exist.
            Results.ok_to_instantiate = True
            results = Results(config, platform)
            results.write_to_file()
        except:
            raise
        finally:
            util.run_shell_cmd_list(config.POST_EXECUTION_CMDS,)

        log_path = config.log_filename(resume=False)
        util.log_and_mail(mailer, debug,
                          "Benchmarking started",
                          "Benchmarking started.\nLogging to %s" %
                          log_path, bypass_limiter=True)

        util.reboot(manifest, platform)

    # Assign platform to VM defs -- needs to happen early for sanity checks
    util.assign_platform(config, platform)

    sanity_checks(config, platform)

    # Build job queue -- each job is an execution
    sched = ExecutionScheduler(config,
                               mailer,
                               platform,
                               dry_run=args.dry_run)
    sched.run()
Exemple #37
0
def inner_main(mailer, on_first_invocation, config, args):
    out_file = config.results_filename()
    out_file_exists = os.path.exists(out_file)

    instr_dir = util.get_instr_json_dir(config)
    instr_dir_exists = os.path.exists(instr_dir)

    envlog_dir = util.get_envlog_dir(config)
    envlog_dir_exists = os.path.exists(envlog_dir)

    if out_file_exists and not os.path.isfile(out_file):
        util.fatal("Output file '%s' exists but is not a regular file" %
                   out_file)

    if out_file_exists and on_first_invocation:
        util.fatal("Output results file '%s' already exists. "
                   "Move the file away before running Krun." % out_file)

    if instr_dir_exists and on_first_invocation:
        util.fatal("Instrumentation dir '%s' exists." % instr_dir)

    if envlog_dir_exists and on_first_invocation:
        util.fatal("Env log dir '%s' exists." % envlog_dir)

    if not out_file_exists and not on_first_invocation:
        util.fatal("No results file to resume. Expected '%s'" % out_file)

    # Initialise platform instance and assign to VM defs.
    # This needs to be done early, so VM sanity checks can run.
    platform = detect_platform(mailer, config)

    platform.quick_mode = args.quick
    platform.no_user_change = args.no_user_change
    platform.no_tickless_check = args.no_tickless_check
    platform.no_pstate_check = args.no_pstate_check
    platform.hardware_reboots = args.hardware_reboots

    # Create the instrumentation directory if required
    if on_first_invocation:
        # We only want make a dir if >=1 VM is in instrumentation mode.
        for vm in config.VMS.itervalues():
            if vm['vm_def'].instrument:
                util.make_instr_dir(config)
                break

    debug("Checking platform preliminaries")
    platform.check_preliminaries()

    # Make a bit of noise if this is a virtualised environment
    if platform.is_virtual():
        warn(
            "This appears to be a virtualised host. The results will be flawed. "
            "Use bare-metal for reliable results!")

    platform.collect_audit()

    # At this point the config file is OK, and on-disk state is consistent,
    # so let's daemonise (if requested).
    if args.daemonise:
        util.daemonise()

    if not on_first_invocation:
        # output file must exist, due to check above
        assert (out_file_exists)

        debug("Using pre-recorded initial temperature readings")
        manifest = ManifestManager(config, platform)

        platform_temps = {}
        for sensor, tup in manifest.starting_temperatures.iteritems():
            platform_temps[sensor] = tup[1]
        platform.starting_temperatures = platform_temps
    else:
        manifest = ManifestManager(config, platform, new_file=True)
        if manifest.num_execs_left == 0:
            # No executions, or all skipped
            fatal("Empty schedule!")

        try:
            info(("Wait %s secs to allow system to cool prior to "
                  "collecting initial temperature readings") %
                 config.TEMP_READ_PAUSE)

            # This part is wrapped in hooks, so that if daemons or networking are
            # taken down for process executions, then the initial temperature
            # reading gets the same treatment.
            util.run_shell_cmd_list(config.PRE_EXECUTION_CMDS, )
            platform.sleep(config.TEMP_READ_PAUSE)

            debug("Taking fresh initial temperature readings")
            platform.starting_temperatures = platform.take_temperature_readings(
            )
            manifest.set_starting_temperatures(platform.starting_temperatures)

            # Write out an empty results file. After the initial reboot Krun
            # will expect this to exist.
            Results.ok_to_instantiate = True
            results = Results(config, platform)
            results.write_to_file()
        except:
            raise
        finally:
            util.run_shell_cmd_list(config.POST_EXECUTION_CMDS, )

        log_path = config.log_filename(resume=False)
        util.log_and_mail(mailer,
                          debug,
                          "Benchmarking started",
                          "Benchmarking started.\nLogging to %s" % log_path,
                          bypass_limiter=True)

        util.reboot(manifest, platform)

    # Assign platform to VM defs -- needs to happen early for sanity checks
    util.assign_platform(config, platform)

    sanity_checks(config, platform)

    # Build job queue -- each job is an execution
    sched = ExecutionScheduler(config, mailer, platform, dry_run=args.dry_run)
    sched.run()