Example #1
0
    def test_run_schedule_reboot(self, monkeypatch, mock_platform):
        def dummy_shell_cmd(text):
            pass

        def dummy_execv(text, lst):
            pass

        monkeypatch.setattr(os, "execv", dummy_execv)
        monkeypatch.setattr(subprocess, "call", dummy_shell_cmd)
        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_shell_cmd)
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   reboot=True,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        with pytest.raises(AssertionError):
            sched.run()
        assert len(sched) == 7
        os.unlink(os.path.join(TEST_DIR, "example_results.json.bz2"))
Example #2
0
    def test_pre_and_post_cmds0002(self, monkeypatch, mock_platform):
        """Check that the pre/post commands use a shell and don't just exec(3)"""

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        tmp_file = os.path.join(TEST_DIR, "prepost.txt")

        # commands use shell syntax
        config.PRE_EXECUTION_CMDS = ["echo 'pre' > %s" % tmp_file]
        config.POST_EXECUTION_CMDS = ["echo 'post' >> %s" % tmp_file]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        sched.run()

        with open(tmp_file) as fh:
            got = fh.read()

        os.unlink(tmp_file)
        assert got == "pre\npost\n"
Example #3
0
    def test_pre_and_post_cmds0001(self, monkeypatch, mock_platform):
        cap_cmds = []

        def dummy_run_shell_cmd(cmd, failure_fatal=False, extra_env=None):
            cap_cmds.append(cmd)
            return "", "", 0

        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_run_shell_cmd)

        config = Config(os.path.join(TEST_DIR, "example.krun"))

        config.PRE_EXECUTION_CMDS = ["pre1", "pre2"]
        config.POST_EXECUTION_CMDS = ["post1", "post2"]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        expect = ["pre1", "pre2", "post1", "post2"] * 8
        assert cap_cmds == expect
Example #4
0
    def test_pre_and_post_cmds0002(self, monkeypatch, mock_platform):
        """Check that the pre/post commands use a shell and don't just exec(3)"""

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        tmp_file = os.path.join(TEST_DIR, "prepost.txt")

        # commands use shell syntax
        config.PRE_EXECUTION_CMDS = ["echo 'pre' > %s" % tmp_file]
        config.POST_EXECUTION_CMDS = ["echo 'post' >> %s" % tmp_file]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        sched.run()

        with open(tmp_file) as fh:
            got = fh.read()

        os.unlink(tmp_file)
        assert got == "pre\npost\n"
Example #5
0
    def test_pre_and_post_cmds0001(self, monkeypatch, mock_platform):
        cap_cmds = []
        def dummy_run_shell_cmd(cmd, failure_fatal=False, extra_env=None):
            cap_cmds.append(cmd)
            return "", "", 0

        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_run_shell_cmd)

        config = Config(os.path.join(TEST_DIR, "example.krun"))

        config.PRE_EXECUTION_CMDS = ["pre1", "pre2"]
        config.POST_EXECUTION_CMDS = ["post1", "post2"]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        expect = ["pre1", "pre2", "post1", "post2"] * 8
        assert cap_cmds == expect
Example #6
0
    def test_post_exec_cmds0002(self, monkeypatch, mock_platform):
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        path = os.path.join(TEST_DIR, "shell-out")
        cmd = "echo ${KRUN_RESULTS_FILE}:${KRUN_LOG_FILE} > %s" % path
        config.POST_EXECUTION_CMDS = [cmd]
        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        with open(path) as fh:
            got = fh.read().strip()

        os.unlink(path)

        elems = got.split(":")

        assert elems[0].endswith(".json.bz2")
        assert elems[1].endswith(".log")
Example #7
0
    def test_post_exec_cmds0002(self, monkeypatch, mock_platform):
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        path = os.path.join(TEST_DIR, "shell-out")
        cmd = "echo ${KRUN_RESULTS_FILE}:${KRUN_LOG_FILE} > %s" % path
        config.POST_EXECUTION_CMDS = [cmd]
        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        with open(path) as fh:
            got = fh.read().strip()

        os.unlink(path)

        elems = got.split(":")

        assert elems[0].endswith(".json.bz2")
        assert elems[1].endswith(".log")
Example #8
0
 def test_part_complete_schedule(self, mock_platform):
     sched = ExecutionScheduler(Config(os.path.join(TEST_DIR, "quick.krun")),
                                mock_platform.mailer,
                                mock_platform, resume=True,
                                reboot=True, dry_run=True,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 0
Example #9
0
 def test_queue_len0001(self, mock_platform):
     config_path = os.path.join(TEST_DIR, "more_complicated.krun")
     sched = ExecutionScheduler(Config(config_path),
                                mock_platform.mailer,
                                mock_platform, resume=False,
                                reboot=True, dry_run=False,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 90  # taking into account skips
Example #10
0
 def test_queue_len0001(self, mock_platform):
     config_path = os.path.join(TEST_DIR, "more_complicated.krun")
     sched = ExecutionScheduler(Config(config_path),
                                mock_platform.mailer,
                                mock_platform,
                                resume=False,
                                reboot=True,
                                dry_run=False,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 90  # taking into account skips
Example #11
0
 def test_part_complete_schedule(self, mock_platform):
     sched = ExecutionScheduler(Config(os.path.join(TEST_DIR,
                                                    "quick.krun")),
                                mock_platform.mailer,
                                mock_platform,
                                resume=True,
                                reboot=True,
                                dry_run=True,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 0
Example #12
0
    def test_etas_dont_agree_with_schedule(self, mock_platform):
        """ETAs don't exist for all jobs for which there is iterations data"""

        sched = ExecutionScheduler(Config(os.path.join(TEST_DIR, "broken_etas.krun")),
                                   mock_platform.mailer, mock_platform,
                                   resume=True, reboot=False, dry_run=True,
                                   started_by_init=False)
        try:
            sched.build_schedule()
        except krun.util.FatalKrunError:
            pass
        else:
            assert False, "Krun did not exit when ETAs failed to tally with results!"
Example #13
0
File: util.py Project: bennn/krun
def get_session_info(config):
    """Gets information about the session (for --info)

    Separated from print_session_info for ease of testing"""

    from krun.scheduler import ScheduleEmpty, ExecutionScheduler
    from krun.platform import detect_platform

    platform = detect_platform(None)
    sched = ExecutionScheduler(config, None, platform)
    non_skipped_keys, skipped_keys = sched.build_schedule()

    n_proc_execs = 0
    n_in_proc_iters = 0

    while True:
        try:
            job = sched.next_job()
        except ScheduleEmpty:
            break

        n_proc_execs += 1
        n_in_proc_iters += job.vm_info["n_iterations"]

    return {
        "n_proc_execs": n_proc_execs,
        "n_in_proc_iters": n_in_proc_iters,
        "skipped_keys": skipped_keys,
        "non_skipped_keys": non_skipped_keys,
    }
Example #14
0
def get_session_info(config):
    """Gets information about the session (for --info)

    Separated from print_session_info for ease of testing"""

    from krun.scheduler import ScheduleEmpty, ExecutionScheduler
    from krun.platform import detect_platform

    platform = detect_platform(None)
    sched = ExecutionScheduler(config, None, platform)
    non_skipped_keys, skipped_keys = sched.build_schedule()

    n_proc_execs = 0
    n_in_proc_iters = 0

    while True:
        try:
            job = sched.next_job()
        except ScheduleEmpty:
            break

        n_proc_execs += 1
        n_in_proc_iters += job.vm_info["n_iterations"]

    return {
        "n_proc_execs": n_proc_execs,
        "n_in_proc_iters": n_in_proc_iters,
        "skipped_keys": skipped_keys,
        "non_skipped_keys": non_skipped_keys,
    }
Example #15
0
    def test_etas_dont_agree_with_schedule(self, mock_platform):
        """ETAs don't exist for all jobs for which there is iterations data"""

        sched = ExecutionScheduler(Config(
            os.path.join(TEST_DIR, "broken_etas.krun")),
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=True,
                                   reboot=False,
                                   dry_run=True,
                                   started_by_init=False)
        try:
            sched.build_schedule()
        except krun.util.FatalKrunError:
            pass
        else:
            assert False, "Krun did not exit when ETAs failed to tally with results!"
Example #16
0
 def test_build_schedule(self, mock_platform):
     sched = ExecutionScheduler(Config(os.path.join(TEST_DIR, "example.krun")),
                                mock_platform.mailer,
                                mock_platform, resume=False,
                                reboot=True, dry_run=True,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 8
     dummy_py = ExecutionJob(sched, "CPython", "", "dummy",
                             "default-python", 1000)
     dummy_java = ExecutionJob(sched, "Java", "", "dummy", "default-java", 1000)
     nbody_py = ExecutionJob(sched, "CPython", "", "nbody",
                             "default-python", 1000)
     nbody_java = ExecutionJob(sched, "Java", "", "nbody", "default-java", 1000)
     assert sched.work_deque.count(dummy_py) == 2
     assert sched.work_deque.count(dummy_java) == 2
     assert sched.work_deque.count(nbody_py) == 2
     assert sched.work_deque.count(nbody_java) == 2
Example #17
0
    def test_run_schedule(self, monkeypatch, mock_platform):
        json_file = os.path.join(TEST_DIR, "example_results.json.bz2")

        def dummy_shell_cmd(text):
            pass

        monkeypatch.setattr(subprocess, 'call', dummy_shell_cmd)
        monkeypatch.setattr(krun.util, 'run_shell_cmd', dummy_shell_cmd)
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   reboot=False,
                                   dry_run=True,
                                   started_by_init=False)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()
        assert len(sched) == 0

        results = Results(Config(os.path.join(TEST_DIR, "example.krun")),
                          mock_platform,
                          results_file=json_file)

        for k, execs in results.data.iteritems():
            assert type(execs) is list
            for one_exec in execs:
                assert type(one_exec) is list
                assert all([type(x) is float for x in one_exec])

        for k, execs in results.eta_estimates.iteritems():
            assert type(execs) is list
            assert all([type(x) is float for x in execs])

        assert type(results.starting_temperatures) is dict
        assert type(results.reboots) is int
        assert type(results.audit) is type(Audit(dict()))
        assert type(results.config) is type(Config())
        assert type(results.error_flag) is bool

        os.unlink(json_file)
Example #18
0
 def test_run_schedule_reboot(self, monkeypatch, mock_platform):
     def dummy_shell_cmd(text):
         pass
     def dummy_execv(text, lst):
         pass
     monkeypatch.setattr(os, "execv", dummy_execv)
     monkeypatch.setattr(subprocess, "call", dummy_shell_cmd)
     monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_shell_cmd)
     config = Config(os.path.join(TEST_DIR, "example.krun"))
     krun.util.assign_platform(config, mock_platform)
     sched = ExecutionScheduler(config,
                                mock_platform.mailer,
                                mock_platform, resume=False,
                                reboot=True, dry_run=True,
                                started_by_init=True)
     sched.build_schedule()
     assert len(sched) == 8
     with pytest.raises(AssertionError):
         sched.run()
     assert len(sched) == 7
     os.unlink(os.path.join(TEST_DIR, "example_results.json.bz2"))
Example #19
0
    def test_run_schedule(self, monkeypatch, mock_platform):
        json_file = os.path.join(TEST_DIR, "example_results.json.bz2")
        def dummy_shell_cmd(text):
            pass
        monkeypatch.setattr(subprocess, 'call', dummy_shell_cmd)
        monkeypatch.setattr(krun.util, 'run_shell_cmd', dummy_shell_cmd)
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   reboot=False, dry_run=True,
                                   started_by_init=False)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()
        assert len(sched) == 0

        results = Results(Config(os.path.join(TEST_DIR, "example.krun")),
                          mock_platform, results_file=json_file)

        for k, execs in results.data.iteritems():
            assert type(execs) is list
            for one_exec in execs:
                assert type(one_exec) is list
                assert all([type(x) is float for x in one_exec])

        for k, execs in results.eta_estimates.iteritems():
            assert type(execs) is list
            assert all([type(x) is float for x in execs])

        assert type(results.starting_temperatures) is dict
        assert type(results.reboots) is int
        assert type(results.audit) is type(Audit(dict()))
        assert type(results.config) is type(Config())
        assert type(results.error_flag) is bool

        os.unlink(json_file)
Example #20
0
 def test_build_schedule(self, mock_platform):
     sched = ExecutionScheduler(Config(
         os.path.join(TEST_DIR, "example.krun")),
                                mock_platform.mailer,
                                mock_platform,
                                resume=False,
                                reboot=True,
                                dry_run=True,
                                started_by_init=False)
     sched.build_schedule()
     assert len(sched) == 8
     dummy_py = ExecutionJob(sched, "CPython", "", "dummy",
                             "default-python", 1000)
     dummy_java = ExecutionJob(sched, "Java", "", "dummy", "default-java",
                               1000)
     nbody_py = ExecutionJob(sched, "CPython", "", "nbody",
                             "default-python", 1000)
     nbody_java = ExecutionJob(sched, "Java", "", "nbody", "default-java",
                               1000)
     assert sched.work_deque.count(dummy_py) == 2
     assert sched.work_deque.count(dummy_java) == 2
     assert sched.work_deque.count(nbody_py) == 2
     assert sched.work_deque.count(nbody_java) == 2
Example #21
0
File: krun.py Project: bennn/krun
def inner_main(mailer, config, args):
    out_file = config.results_filename()
    out_file_exists = os.path.exists(out_file)

    if out_file_exists and not os.path.isfile(out_file):
        util.fatal(
            "Output file '%s' exists but is not a regular file" % out_file)

    if out_file_exists and not args.resume:
        util.fatal("Output file '%s' already exists. "
                   "Either resume the session (--resume) or "
                   "move the file away" % out_file)

    if not out_file_exists and args.resume:
        util.fatal("No results file to resume. Expected '%s'" % out_file)

    if args.started_by_init and not args.reboot:
        util.fatal("--started-by-init makes no sense without --reboot")

    if args.started_by_init and not args.resume:
        util.fatal("--started-by-init makes no sense without --resume")

    if args.develop:
        warn("Developer mode enabled. Results will not be reliable.")

    # Initialise platform instance and assign to VM defs.
    # This needs to be done early, so VM sanity checks can run.
    platform = detect_platform(mailer)

    if not args.develop:
        debug("Checking platform preliminaries")
        platform.check_preliminaries()
    else:
        # Needed to skip the use of certain tools and techniques.
        # E.g. switching user.
        warn("Not checking platform prerequisites due to developer mode")
        platform.developer_mode = True

    platform.collect_audit()

    # If the user has asked for resume-mode, the current platform must
    # be an identical machine to the current one.
    error_msg = ("You have asked Krun to resume an interrupted benchmark. " +
                 "This is only valid if the machine you are using is " +
                 "identical to the one on which the last results were " +
                 "gathered, which is not the case.")
    current = None
    if args.resume:
        # output file must exist, due to check above
        assert(out_file_exists)
        current = Results(config, platform, results_file=out_file)
        from krun.audit import Audit
        if not Audit(platform.audit) == current.audit:
            util.fatal(error_msg)

        debug("Using pre-recorded initial temperature readings")
        platform.starting_temperatures = current.starting_temperatures
    else:
        # Touch the config file to update its mtime. This is required
        # by resume-mode which uses the mtime to determine the name of
        # the log file, should this benchmark be resumed.
        _, _, rc = util.run_shell_cmd("touch " + args.filename)
        if rc != 0:
            util.fatal("Could not touch config file: " + args.filename)

        info(("Wait %s secs to allow system to cool prior to "
             "collecting initial temperature readings") %
             config.TEMP_READ_PAUSE)

        if args.develop or args.dry_run:
            info("SIMULATED: time.sleep(%s)" % config.TEMP_READ_PAUSE)
        else:
            time.sleep(config.TEMP_READ_PAUSE)

        debug("Taking fresh initial temperature readings")
        platform.starting_temperatures = platform.take_temperature_readings()

    # Assign platform to VM defs -- needs to happen early for sanity checks
    util.assign_platform(config, platform)

    sanity_checks(config, platform)

    # Build job queue -- each job is an execution
    sched = ExecutionScheduler(config,
                               mailer,
                               platform,
                               resume=args.resume,
                               reboot=args.reboot,
                               dry_run=args.dry_run,
                               started_by_init=args.started_by_init)
    sched.build_schedule()
    sched.run()