コード例 #1
0
ファイル: test_scheduler.py プロジェクト: softdevteam/krun
    def test_boot_loop0001(self, monkeypatch, mock_platform, caplog):
        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)

        emulate_first_reboot(mock_platform, config, monkeypatch)

        # Simulate a boot loop
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        sched.manifest.num_reboots = 9998  # way too many
        sched.manifest.update_num_reboots() # increments and writes out file

        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = ("HALTING now to prevent an infinite reboot loop: "
                  "INVARIANT num_reboots <= num_jobs violated. Krun was about "
                  "to execute reboot number: 10000. 1 jobs have been "
                  "completed, 7 are left to go.")
        assert expect in caplog.text()

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
コード例 #2
0
ファイル: test_config.py プロジェクト: softdevteam/krun
def test_skip0007():
    config = Config()

    with pytest.raises(ValueError) as e:
        config.should_skip("wobble")

    assert e.value.message == "bad benchmark key: wobble"
コード例 #3
0
ファイル: test_config.py プロジェクト: bennn/krun
def test_read_corrupt_config_from_string():
    path = os.path.join(TEST_DIR, "corrupt.krun")
    config = Config(None)
    with pytest.raises(Exception):
        with open(path) as fp:
            config_string = fp.read()
            config.read_from_string(config_string)
コード例 #4
0
ファイル: test_scheduler.py プロジェクト: softdevteam/krun
    def test_audit_differs0001(self, monkeypatch, mock_platform, caplog):
        """Check that if the audit differs, a crash occurs"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config, monkeypatch)
        results_path = config.results_filename()

        # mutate the audit, so it won't match later
        results = Results(config, mock_platform, results_file=results_path)
        results.audit._audit["wibble"] = "wut"
        results.write_to_file()

        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = "This is only valid if the machine you are using is identical"
        assert expect in caplog.text()

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
コード例 #5
0
ファイル: test_scheduler.py プロジェクト: bennn/krun
    def test_pre_and_post_cmds0002(self, monkeypatch, mock_platform):
        """Check that the pre/post commands use a shell and don't just exec(3)"""

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        tmp_file = os.path.join(TEST_DIR, "prepost.txt")

        # commands use shell syntax
        config.PRE_EXECUTION_CMDS = ["echo 'pre' > %s" % tmp_file]
        config.POST_EXECUTION_CMDS = ["echo 'post' >> %s" % tmp_file]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        sched.run()

        with open(tmp_file) as fh:
            got = fh.read()

        os.unlink(tmp_file)
        assert got == "pre\npost\n"
コード例 #6
0
ファイル: test_scheduler.py プロジェクト: bennn/krun
    def test_pre_and_post_cmds0001(self, monkeypatch, mock_platform):
        cap_cmds = []

        def dummy_run_shell_cmd(cmd, failure_fatal=False, extra_env=None):
            cap_cmds.append(cmd)
            return "", "", 0

        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_run_shell_cmd)

        config = Config(os.path.join(TEST_DIR, "example.krun"))

        config.PRE_EXECUTION_CMDS = ["pre1", "pre2"]
        config.POST_EXECUTION_CMDS = ["post1", "post2"]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        expect = ["pre1", "pre2", "post1", "post2"] * 8
        assert cap_cmds == expect
コード例 #7
0
ファイル: test_scheduler.py プロジェクト: warsier/krun
 def test_run_schedule0003(self, mock_platform, monkeypatch):
     config = Config(os.path.join(TEST_DIR, "example_all_skip.krun"))
     n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                  monkeypatch)
     assert n_reboots == 0 # all skipped!
     os.unlink(sched.manifest.path)
     os.unlink(config.results_filename())
コード例 #8
0
ファイル: test_scheduler.py プロジェクト: bennn/krun
    def test_post_exec_cmds0002(self, monkeypatch, mock_platform):
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        path = os.path.join(TEST_DIR, "shell-out")
        cmd = "echo ${KRUN_RESULTS_FILE}:${KRUN_LOG_FILE} > %s" % path
        config.POST_EXECUTION_CMDS = [cmd]
        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        with open(path) as fh:
            got = fh.read().strip()

        os.unlink(path)

        elems = got.split(":")

        assert elems[0].endswith(".json.bz2")
        assert elems[1].endswith(".log")
コード例 #9
0
ファイル: test_scheduler.py プロジェクト: bennn/krun
    def test_pre_and_post_cmds0002(self, monkeypatch, mock_platform):
        """Check that the pre/post commands use a shell and don't just exec(3)"""

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        tmp_file = os.path.join(TEST_DIR, "prepost.txt")

        # commands use shell syntax
        config.PRE_EXECUTION_CMDS = ["echo 'pre' > %s" % tmp_file]
        config.POST_EXECUTION_CMDS = ["echo 'post' >> %s" % tmp_file]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        sched.run()

        with open(tmp_file) as fh:
            got = fh.read()

        os.unlink(tmp_file)
        assert got == "pre\npost\n"
コード例 #10
0
ファイル: test_scheduler.py プロジェクト: bennn/krun
    def test_pre_and_post_cmds0001(self, monkeypatch, mock_platform):
        cap_cmds = []
        def dummy_run_shell_cmd(cmd, failure_fatal=False, extra_env=None):
            cap_cmds.append(cmd)
            return "", "", 0

        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_run_shell_cmd)

        config = Config(os.path.join(TEST_DIR, "example.krun"))

        config.PRE_EXECUTION_CMDS = ["pre1", "pre2"]
        config.POST_EXECUTION_CMDS = ["post1", "post2"]

        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        expect = ["pre1", "pre2", "post1", "post2"] * 8
        assert cap_cmds == expect
コード例 #11
0
    def test_num_emails_sent_persists0001(self, monkeypatch, mock_platform,
                                          no_results_instantiation_check):
        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        sched.mailer.recipients = ["noone@localhost"]

        assert sched.manifest.num_mails_sent == 0
        sched.mailer.send("subject", "body", manifest=sched.manifest)
        assert sched.manifest.num_mails_sent == 1
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # suppose a reboot happened now
        del sched
        del config
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        assert sched.manifest.num_mails_sent == 1
        os.unlink(sched.manifest.path)
コード例 #12
0
def test_skip0007():
    config = Config()

    with pytest.raises(ValueError) as e:
        config.should_skip("wobble")

    assert e.value.args[0] == "bad benchmark key: wobble"
コード例 #13
0
    def test_boot_loop0001(self, monkeypatch, mock_platform, caplog,
                           no_results_instantiation_check):
        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)

        emulate_first_reboot(mock_platform, config)

        # Simulate a boot loop
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        sched.manifest.num_reboots = 9998  # way too many
        sched.manifest.update_num_reboots()  # increments and writes out file

        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = ("HALTING now to prevent an infinite reboot loop: "
                  "INVARIANT num_reboots <= num_jobs violated. Krun was about "
                  "to execute reboot number: 10000. 1 jobs have been "
                  "completed, 7 are left to go.")
        assert expect in caplog.text

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
コード例 #14
0
    def test_audit_differs0001(self, monkeypatch, mock_platform, caplog,
                               no_results_instantiation_check):
        """Check that if the audit differs, a crash occurs"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config)
        results_path = config.results_filename()

        # mutate the audit, so it won't match later
        results = Results(config, mock_platform, results_file=results_path)
        results.audit._audit["wibble"] = "wut"
        results.write_to_file()

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   dry_run=True)
        with pytest.raises(krun.util.FatalKrunError):
            sched.run()

        expect = "This is only valid if the machine you are using is identical"
        assert expect in caplog.text

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
コード例 #15
0
ファイル: test_scheduler.py プロジェクト: bennn/krun
    def test_post_exec_cmds0002(self, monkeypatch, mock_platform):
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        path = os.path.join(TEST_DIR, "shell-out")
        cmd = "echo ${KRUN_RESULTS_FILE}:${KRUN_LOG_FILE} > %s" % path
        config.POST_EXECUTION_CMDS = [cmd]
        krun.util.assign_platform(config, mock_platform)

        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform, resume=False,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()

        with open(path) as fh:
            got = fh.read().strip()

        os.unlink(path)

        elems = got.split(":")

        assert elems[0].endswith(".json.bz2")
        assert elems[1].endswith(".log")
コード例 #16
0
    def test_pre_and_post_exec_cmds0002(self, monkeypatch, mock_platform,
                                        no_results_instantiation_check):
        config = Config(os.path.join(TEST_DIR, "one_exec.krun"))
        path = os.path.join(TEST_DIR, "shell-out")
        cmd = "echo ${KRUN_RESULTS_FILE}:${KRUN_LOG_FILE}:${KRUN_MANIFEST_FILE} > %s" % path
        config.POST_EXECUTION_CMDS = [cmd]

        krun.util.assign_platform(config, mock_platform)
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        os.unlink(config.results_filename())

        with open(path) as fh:
            got = fh.read().strip()

        os.unlink(path)
        os.unlink(sched.manifest.path)

        assert n_reboots == 1
        elems = got.split(":")
        assert os.path.basename(elems[0]) == "one_exec_results.json.bz2"
        assert os.path.basename(elems[1]) == "one_exec.log"
        assert os.path.basename(elems[2]) == "one_exec.manifest"

        # all paths should be in the same dir
        dirnames = [os.path.dirname(x) for x in elems]
        assert dirnames[0] == dirnames[1] == dirnames[2]
コード例 #17
0
    def test_run_schedule0005(self, mock_platform, monkeypatch,
                              no_results_instantiation_check):
        def dummy_execjob_run(self, mailer, dryrun=False):
            return self.empty_measurements, {}, "E"  # pretend jobs fail

        monkeypatch.setattr(ExecutionJob, 'run', dummy_execjob_run)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert n_reboots == 8  # 2 benchmarks, 2 vms, 2 execs

        results = Results(config,
                          mock_platform,
                          results_file=config.results_filename())
        type_check_results(results)

        assert len(results.wallclock_times) == 4  # 2 benchmarks, 2 vms
        for key, execs in results.wallclock_times.iteritems():
            assert len(execs) == 2

            for _exec in execs:
                assert len(_exec) == 0  # due to error

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
コード例 #18
0
def test_results_filename():
    example = os.path.join(TEST_DIR, "example.krun")
    touch(example)
    example_config = Config(example)
    # not exact match due to absolute path
    assert example_config.results_filename().endswith(
        "example_results.json.bz2")
コード例 #19
0
ファイル: test_scheduler.py プロジェクト: softdevteam/krun
    def test_pre_and_post_exec_cmds0002(self, monkeypatch, mock_platform):
        config = Config(os.path.join(TEST_DIR, "one_exec.krun"))
        path = os.path.join(TEST_DIR, "shell-out")
        cmd = "echo ${KRUN_RESULTS_FILE}:${KRUN_LOG_FILE}:${KRUN_MANIFEST_FILE} > %s" % path
        config.POST_EXECUTION_CMDS = [cmd]

        krun.util.assign_platform(config, mock_platform)
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        os.unlink(config.results_filename())

        with open(path) as fh:
            got = fh.read().strip()

        os.unlink(path)
        os.unlink(sched.manifest.path)

        assert n_reboots == 1
        elems = got.split(":")
        assert os.path.basename(elems[0]) == "one_exec_results.json.bz2"
        assert os.path.basename(elems[1]) == "one_exec.log"
        assert os.path.basename(elems[2]) == "one_exec.manifest"

        # all paths should be in the same dir
        dirnames = [os.path.dirname(x) for x in elems]
        assert dirnames[0] == dirnames[1] == dirnames[2]
コード例 #20
0
ファイル: test_config.py プロジェクト: bennn/krun
def test_read_config_from_string():
    path = os.path.join(TEST_DIR, "example.krun")
    config0 = Config(path)
    config1 = Config(None)
    with open(path) as fp:
        config_string = fp.read()
        config1.read_from_string(config_string)
        assert config0 == config1
コード例 #21
0
def test_check_config_consistency_fails():
    path = os.path.join(TEST_DIR, "example.krun")
    config = Config(path)
    with open(path) as fp:
        config_string = fp.read()
    with pytest.raises(Exception) as excinfo:
        config.check_config_consistency(
            config_string + "\n# different config!", "fakefilename")
    assert "+# different config!" in excinfo.value.args[0]
コード例 #22
0
ファイル: test_config.py プロジェクト: softdevteam/krun
def test_check_config_consistency_fails():
    path = os.path.join(TEST_DIR, "example.krun")
    config = Config(path)
    with open(path) as fp:
        config_string = fp.read()
    with pytest.raises(Exception) as excinfo:
        config.check_config_consistency(config_string + "\n# different config!",
                                        "fakefilename")
    print excinfo.value.message
    assert "+# different config!" in excinfo.value.message
コード例 #23
0
ファイル: test_util.py プロジェクト: bennn/krun
def to_strip():
    from krun.platform import detect_platform
    from krun.results import Results

    path = os.path.join(TEST_DIR, "quick.krun")
    config = Config(path)

    platform = detect_platform(None)
    results = Results(config, platform, results_file=config.results_filename())
    return results
コード例 #24
0
ファイル: results.py プロジェクト: bennn/krun
 def read_from_file(self, results_file):
     """Initialise object from serialised file on disk.
     """
     with bz2.BZ2File(results_file, "rb") as f:
         results = json.loads(f.read())
         self.__dict__.update(results)
         # Ensure that self.audir and self.config have correct types.
         self.config = Config(None)
         self.config.read_from_string(results["config"])
         self.audit = results["audit"]
コード例 #25
0
ファイル: test_util.py プロジェクト: bennn/krun
def to_strip():
    from krun.platform import detect_platform
    from krun.results import Results

    path = os.path.join(TEST_DIR, "quick.krun")
    config = Config(path)

    platform = detect_platform(None)
    results = Results(config, platform,
                      results_file=config.results_filename())
    return results
コード例 #26
0
ファイル: test_config.py プロジェクト: bennn/krun
def test_log_filename(monkeypatch):
    path = os.path.join(TEST_DIR, "example.krun")
    example_config = Config(path)
    tstamp = time.strftime(LOGFILE_FILENAME_TIME_FORMAT)
    expect_path = os.path.join(TEST_DIR, "example_%s.log" % tstamp)
    assert example_config.log_filename(False) == expect_path
    def mock_mtime(path):
        return 1445964109.9363003
    monkeypatch.setattr(os.path, 'getmtime', mock_mtime)
    tstamp = '20151027_164149'
    expect_path = os.path.join(TEST_DIR, "example_%s.log" % tstamp)
    assert example_config.log_filename(True) == expect_path
コード例 #27
0
ファイル: test_config.py プロジェクト: bennn/krun
def test_skip0001():
    path = os.path.join(TEST_DIR, "skips.krun")
    config = Config(path)
    expected = ["*:PyPy:*",
                "*:CPython:*",
                "*:Hotspot:*",
                "*:Graal:*",
                "*:LuaJIT:*",
                "*:HHVM:*",
                "*:JRubyTruffle:*",
                "*:V8:*",
    ]
    for triplet in expected:
        assert config.should_skip(triplet)
    assert config.should_skip("nbody:HHVM:default-php")
    assert not config.should_skip("nbody:MYVM:default-php")
コード例 #28
0
ファイル: test_results.py プロジェクト: warsier/krun
 def test_write_results_to_disk(self, mock_platform,
                                no_results_instantiation_check):
     config = Config("krun/tests/example.krun")
     mock_platform.num_cpus = 4
     mock_platform.num_per_core_measurements = mock_platform.num_cpus
     out_file = "krun/tests/example_results.json.bz2"
     results0 = Results(config, mock_platform)
     results0.audit = dict()
     results0.starting_temperatures = [4355, 9879]
     results0.wallclock_times = {u"dummy:Java:default-java": [[1.000726]]}
     results0.eta_estimates = {u"dummy:Java:default-java": [1.1]}
     results0.core_cycle_counts = {
         u"dummy:Java:default-java": [[[2], [3], [4], [5]]]
     }
     results0.aperf_counts = {
         u"dummy:Java:default-java": [[[3], [4], [5], [6]]]
     }
     results0.mperf_counts = {
         u"dummy:Java:default-java": [[[4], [5], [6], [7]]]
     }
     results0.reboots = 5
     results0.error_flag = False
     results0.write_to_file()
     results1 = Results(config, None, results_file=out_file)
     assert results0 == results1
     # Clean-up generated file.
     os.unlink(out_file)
コード例 #29
0
    def test_run_exec_popen0002(self, monkeypatch):
        """Check that writing stderr to a file works. Used for instrumentation"""

        config = Config()
        platform = MockPlatform(None, config)
        vm_def = PythonVMDef('/dummy/bin/python')
        vm_def.set_platform(platform)

        args = [
            sys.executable, "-c",
            "import sys; sys.stdout.write('STDOUT'); sys.stderr.write('STDERR')"
        ]

        with NamedTemporaryFile(delete=False, prefix="kruntest") as fh:
            filename = fh.name
            out, err, rv, timed_out = vm_def._run_exec_popen(args, fh)

        assert err == ""  # not here due to redirection
        assert out == "STDOUT"  # behaviour should be unchanged
        assert rv == 0
        assert timed_out == False

        # stderr should be in this file
        with open(filename) as fh:
            assert fh.read() == "STDERR"

        fh.close()
        os.unlink(filename)
コード例 #30
0
ファイル: test_config.py プロジェクト: softdevteam/krun
def test_skip0001():
    path = os.path.join(TEST_DIR, "skips.krun")
    config = Config(path)
    expected = ["*:PyPy:*",
                "*:CPython:*",
                "*:Hotspot:*",
                "*:Graal:*",
                "*:LuaJIT:*",
                "*:HHVM:*",
                "*:TruffleRuby:*",
                "*:V8:*",
                ]
    for triplet in expected:
        assert config.should_skip(triplet)
    assert config.should_skip("nbody:HHVM:default-php")
    assert not config.should_skip("nbody:MYVM:default-php")
コード例 #31
0
    def test_sync_disks0002(self, monkeypatch):
        """We throw away the results from sanity checks, so there's no need to
        sync disks (and wait)."""

        stdout = json.dumps({
            "wallclock_times": [123.4],
            "core_cycle_counts": [[1], [2], [3], [4]],
            "aperf_counts": [[5], [6], [7], [8]],
            "mperf_counts": [[9], [10], [11], [12]],
        })

        config = Config()
        platform = MockPlatform(None, config)
        ep = EntryPoint("test")
        vm_def = PythonVMDef('/dummy/bin/python')

        sync_called = [False]

        def fake_sync_disks():
            sync_called[0] = True

        monkeypatch.setattr(platform, "sync_disks", fake_sync_disks)

        def fake_run_exec_popen(args, stderr_file=None):
            return stdout, "", 0, False  # stdout, stderr, exit_code, timed_out

        monkeypatch.setattr(vm_def, "_run_exec_popen", fake_run_exec_popen)

        util.spawn_sanity_check(platform, ep, vm_def, "test")
        assert sync_called == [False]
コード例 #32
0
ファイル: test_manifest_manager.py プロジェクト: warsier/krun
def test_write_new_manifest0002(mock_platform):
    manifest_path = "example_000.manifest"
    config_path = os.path.join(TEST_DIR, "more_complicated.krun")
    config = Config(config_path)
    manifest = ManifestManager(config, mock_platform, new_file=True)
    assert manifest.total_num_execs == 90  # taking into account skips
    _tear_down(manifest.path)
コード例 #33
0
ファイル: test_config.py プロジェクト: bennn/krun
def test_skip0009():
    config = Config()
    config.SKIP = ["*:SomeVM:*",
                   "fasta:JRubyTruffle:default-ruby",
                   "bench:*:*",
                   "bench:vm:skipvariant",
                   "*:*:skipvariant",
                   ]

    assert config.should_skip("fasta:JRubyTruffle:default-ruby")
    assert not config.should_skip("fasta:JRubyTruffle:default-ruby2")
    assert config.should_skip("bench:lala:hihi")
    assert config.should_skip("bench:lala:hihi2")
    assert not config.should_skip("bench1:lala:hihi")
    assert config.should_skip("bench1:lala:skipvariant")
    assert config.should_skip("bench1:lala2:skipvariant")
コード例 #34
0
ファイル: test_scheduler.py プロジェクト: bennn/krun
    def test_run_schedule_reboot(self, monkeypatch, mock_platform):
        def dummy_shell_cmd(text):
            pass

        def dummy_execv(text, lst):
            pass

        monkeypatch.setattr(os, "execv", dummy_execv)
        monkeypatch.setattr(subprocess, "call", dummy_shell_cmd)
        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_shell_cmd)
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   reboot=True,
                                   dry_run=True,
                                   started_by_init=True)
        sched.build_schedule()
        assert len(sched) == 8
        with pytest.raises(AssertionError):
            sched.run()
        assert len(sched) == 7
        os.unlink(os.path.join(TEST_DIR, "example_results.json.bz2"))
コード例 #35
0
ファイル: krun.py プロジェクト: bennn/krun
def main(parser):
    args = parser.parse_args()

    if args.dump is not None:
        if not args.filename.endswith(".json.bz2"):
            usage(parser)
        else:
            results = Results(None, None, results_file=args.filename)
            if args.dump == "config" or "audit":
                text = unicode(results.__getattribute__(args.dump))
            else:
                text = json.dumps(results.__getattribute__(args.dump),
                                  sort_keys=True, indent=2)
            # String data read in from JSON are unicode objects. This matters
            # for us as some data in the audit includes unicode characters.
            # If it does, a simple print no longer suffices if the system
            # locale is (e.g.) ASCII. In this case print will raise an
            # exception. The correct thing to do is to encode() the unicode to
            # the system locale.
            print(text.encode(locale.getpreferredencoding()))
            sys.exit(0)

    if not args.filename.endswith(".krun"):
        usage(parser)

    try:
        if os.stat(args.filename).st_size <= 0:
            util.fatal('Krun configuration file %s is empty.' % args.filename)
    except OSError:
        util.fatal('Krun configuration file %s does not exist.' % args.filename)

    config = Config(args.filename)

    if args.info:
        # Info mode doesn't run the experiment.
        # Just prints some metrics and exits.
        util.print_session_info(config)
        return

    if args.strip_results:
        util.strip_results(config, args.strip_results)
        return

    attach_log_file(config, args.resume)
    debug("Krun invoked with arguments: %s" % sys.argv)

    mail_recipients = config.MAIL_TO
    if type(mail_recipients) is not list:
        util.fatal("MAIL_TO config should be a list")

    mailer = Mailer(mail_recipients, max_mails=config.MAX_MAILS)

    try:
        inner_main(mailer, config, args)
    except util.FatalKrunError as e:
        subject = "Fatal Krun Exception"
        mailer.send(subject, e.args[0], bypass_limiter=True)
        util.run_shell_cmd_list(config.POST_EXECUTION_CMDS)
        raise e
コード例 #36
0
ファイル: test_config.py プロジェクト: bennn/krun
def test_skip0010():
    config = Config()
    config.SKIP = ["*:SomeVM:*",
                   "fasta:JRubyTruffle:default-ruby",
                   "bench:*:*",
                   "bench:vm:skipvariant",
                   "*:*:skipvariant",
                   "*:*:*",  # everything should be skipped due to this
                   ]

    import uuid
    def rand_str():
        return uuid.uuid4().hex

    for i in xrange(25):
        key = "%s:%s:%s" % tuple([rand_str() for x in xrange(3)])
        assert config.should_skip(key)
コード例 #37
0
def test_custom_dmesg_whitelist0003(monkeypatch):
    """Test a config file that uses no custom whitelist"""

    path = os.path.join(TEST_DIR, "example.krun")
    config = Config(path)
    platform = krun.platform.detect_platform(None, config)
    patterns = [p.pattern for p in platform.get_dmesg_whitelist()]
    assert patterns == platform.default_dmesg_whitelist()
コード例 #38
0
def test_custom_dmesg_whitelist0002(monkeypatch):
    """Test a config file that replaces entirely the dmesg whitelist"""

    path = os.path.join(TEST_DIR, "custom_dmesg_whitelist0002.krun")
    config = Config(path)
    platform = krun.platform.detect_platform(None, config)
    patterns = [p.pattern for p in platform.get_dmesg_whitelist()]
    assert patterns == ["^.no+", "^defaults", "^here+"]
コード例 #39
0
ファイル: test_results.py プロジェクト: bennn/krun
 def test_eq(self, mock_platform):
     results = Results(None,
                       None,
                       results_file="krun/tests/quick_results.json.bz2")
     assert results == results
     assert not results == None
     assert not results == \
         Results(Config("krun/tests/example.krun"), mock_platform)
コード例 #40
0
ファイル: test_manifest_manager.py プロジェクト: warsier/krun
def test_write_new_manifest0001(mock_platform):
    _setup(BLANK_EXAMPLE_MANIFEST)
    config = Config(os.path.join(TEST_DIR, "example.krun"))
    manifest1 = ManifestManager(config, mock_platform, new_file=True)
    manifest2 = ManifestManager(
        config, mock_platform)  # reads the file in from the last line
    assert manifest1 == manifest2
    _tear_down(manifest2.path)
コード例 #41
0
ファイル: test_config.py プロジェクト: softdevteam/krun
def test_skip0010():
    config = Config()
    config.SKIP = ["*:SomeVM:*",
                   "fasta:TruffleRuby:default-ruby",
                   "bench:*:*",
                   "bench:vm:skipvariant",
                   "*:*:skipvariant",
                   "*:*:*",  # everything should be skipped due to this
                   ]

    import uuid
    def rand_str():
        return uuid.uuid4().hex

    for i in xrange(25):
        key = "%s:%s:%s" % tuple([rand_str() for x in xrange(3)])
        assert config.should_skip(key)
コード例 #42
0
ファイル: test_scheduler.py プロジェクト: softdevteam/krun
    def test_run_schedule0002(self, mock_platform, monkeypatch):
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert n_reboots == 8  # 2 benchmarks, 2 vms, 2 execs

        results = Results(config, mock_platform,
                          results_file=config.results_filename())
        type_check_results(results)

        assert len(results.wallclock_times) == 4  # 2 benchmarks, 2 vms
        for key, execs in results.wallclock_times.iteritems():
            assert len(execs) == 2
            for _exec in execs:
               assert len(_exec) == 0  # due to dry run

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
コード例 #43
0
ファイル: test_config.py プロジェクト: softdevteam/krun
def test_skip0009():
    config = Config()
    config.SKIP = ["*:SomeVM:*",
                   "fasta:TruffleRuby:default-ruby",
                   "bench:*:*",
                   "bench:vm:skipvariant",
                   "*:*:skipvariant",
                   ]

    assert config.should_skip("fasta:TruffleRuby:default-ruby")
    assert not config.should_skip("fasta:TruffleRuby:default-ruby2")
    assert config.should_skip("bench:lala:hihi")
    assert config.should_skip("bench:lala:hihi2")
    assert not config.should_skip("bench1:lala:hihi")
    assert config.should_skip("bench1:lala:skipvariant")
    assert config.should_skip("bench1:lala2:skipvariant")
コード例 #44
0
ファイル: test_scheduler.py プロジェクト: softdevteam/krun
    def test_pre_and_post_exec_cmds0001(self, monkeypatch, mock_platform):
        cap_cmds = []
        def dummy_run_shell_cmd(cmd, failure_fatal=False, extra_env=None):
            cap_cmds.append(cmd)
            return "", "", 0

        monkeypatch.setattr(krun.util, "run_shell_cmd", dummy_run_shell_cmd)

        config = Config(os.path.join(TEST_DIR, "one_exec.krun"))
        config.PRE_EXECUTION_CMDS = ["cmd1", "cmd2"]
        config.POST_EXECUTION_CMDS = ["cmd3", "cmd4"]

        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)

        assert n_reboots == 1
        expect = ["cmd1", "cmd2", "cmd3", "cmd4"]
        assert cap_cmds == expect
コード例 #45
0
ファイル: test_scheduler.py プロジェクト: softdevteam/krun
    def test_run_schedule0001(self, monkeypatch, mock_platform):
        config = Config(os.path.join(TEST_DIR, "one_exec.krun"))
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert sched.manifest.total_num_execs == 1
        assert sched.manifest.num_execs_left == 0
        assert n_reboots == 1

        results = Results(config, mock_platform,
                          results_file=config.results_filename())
        type_check_results(results)

        assert len(results.wallclock_times) == 1  # 1 benchmark, 1 vm
        for key, execs in results.wallclock_times.iteritems():
            assert len(execs) == 1
            for _exec in execs:
                assert len(_exec) == 0  # due to dry run

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
コード例 #46
0
ファイル: test_scheduler.py プロジェクト: softdevteam/krun
    def test_pre_and_post_cmds0003(self, monkeypatch, mock_platform):
        """Check that the pre/post commands use a shell and don't just exec(3)"""

        config = Config(os.path.join(TEST_DIR, "one_exec.krun"))
        tmp_file = os.path.join(TEST_DIR, "prepost.txt")

        # commands use shell syntax
        config.PRE_EXECUTION_CMDS = ["echo 'pre' > %s" % tmp_file]
        config.POST_EXECUTION_CMDS = ["echo 'post' >> %s" % tmp_file]

        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert n_reboots == 1
        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)

        with open(tmp_file) as fh:
            got = fh.read()

        os.unlink(tmp_file)
        assert got == "pre\npost\n"
コード例 #47
0
ファイル: test_config.py プロジェクト: softdevteam/krun
def test_skip0006():
    config = Config()
    config.SKIP = ["*:*:*"]

    assert config.should_skip("mybench:CPython:default-python")
    assert config.should_skip("myotherbench:CPython:default-python")
    assert config.should_skip("mybench:PyPy:default-python")
    assert config.should_skip("mybench:CPython:special-python")
コード例 #48
0
ファイル: test_scheduler.py プロジェクト: softdevteam/krun
    def test_error_flag_persists0001(self, monkeypatch, mock_platform):
        """Check a failing exec will correctly set the error flag"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        # pretend exec fails
        def dummy_job_run(self, mailer, dry):
            measurements = self.make_empty_measurement()
            return measurements, None, 'E'  # measurements, instr_data, flag
        monkeypatch.setattr(ExecutionJob, 'run', dummy_job_run)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config, monkeypatch)
        results_path = config.results_filename()

        # To start, the error flag is not set
        results = Results(config, mock_platform, results_file=results_path)
        assert not results.error_flag

        # run a (failing) execution, which will dump the results file
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # reload results and check the error flag is now set
        results = Results(config, mock_platform, results_file=results_path)
        assert results.error_flag

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
コード例 #49
0
ファイル: test_scheduler.py プロジェクト: softdevteam/krun
    def test_error_flag_persists0002(self, monkeypatch, mock_platform):
        """Check a changed dmesg will correctly set the error flag"""

        make_reboot_raise(monkeypatch)
        no_envlogs(monkeypatch)

        # pretend dmesg changes
        def dummy_check_for_dmesg_changes(self):
            return True
        monkeypatch.setattr(mock_platform, 'check_dmesg_for_changes',
                            dummy_check_for_dmesg_changes)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        emulate_first_reboot(mock_platform, config, monkeypatch)
        results_path = config.results_filename()

        # To start, the error flag is not set
        results = Results(config, mock_platform, results_file=results_path)
        assert not results.error_flag

        # run an execution where the dmesg changes
        sched = ExecutionScheduler(config, mock_platform.mailer, mock_platform,
                                   dry_run=True)
        try:
            sched.run()
        except _TestReboot:
            pass
        else:
            assert False

        # reload results and check the error flag is now set
        results = Results(config, mock_platform, results_file=results_path)
        assert results.error_flag

        os.unlink(sched.manifest.path)
        os.unlink(results_path)
コード例 #50
0
ファイル: test_scheduler.py プロジェクト: softdevteam/krun
    def test_run_schedule0005(self, mock_platform, monkeypatch):

        def dummy_execjob_run(self, mailer, dryrun=False):
            return self.empty_measurements, {}, "E"  # pretend jobs fail
        monkeypatch.setattr(ExecutionJob, 'run', dummy_execjob_run)

        config = Config(os.path.join(TEST_DIR, "example.krun"))
        n_reboots, sched = run_with_captured_reboots(config, mock_platform,
                                                     monkeypatch)
        assert n_reboots == 8  # 2 benchmarks, 2 vms, 2 execs

        results = Results(config, mock_platform,
                          results_file=config.results_filename())
        type_check_results(results)

        assert len(results.wallclock_times) == 4  # 2 benchmarks, 2 vms
        for key, execs in results.wallclock_times.iteritems():
            assert len(execs) == 2

            for _exec in execs:
                assert len(_exec) == 0  # due to error

        os.unlink(config.results_filename())
        os.unlink(sched.manifest.path)
コード例 #51
0
ファイル: test_config.py プロジェクト: softdevteam/krun
def test_log_filename0001():
    path = os.path.join(TEST_DIR, "example.krun")
    example_config = Config(path)
    expect_path = os.path.join(TEST_DIR, "example.log")
    assert example_config.log_filename(False) == expect_path
コード例 #52
0
ファイル: test_config.py プロジェクト: softdevteam/krun
def test_read_config_from_file():
    path = os.path.join(TEST_DIR, "example.krun")
    config0 = Config(path)
    config1 = Config(None)
    config1.read_from_file(path)
    assert config0 == config1
コード例 #53
0
ファイル: test_config.py プロジェクト: softdevteam/krun
def test_check_config_consistency():
    path = os.path.join(TEST_DIR, "example.krun")
    config = Config(path)
    with open(path) as fp:
        config_string = fp.read()
    config.check_config_consistency(config_string, "fakefilename")
コード例 #54
0
ファイル: test_config.py プロジェクト: softdevteam/krun
def test_skip0008():
    config = Config()
    config.SKIP = ["*:SomeVM:*", "fasta:TruffleRuby:default-ruby"]

    assert config.should_skip("fasta:TruffleRuby:default-ruby")
コード例 #55
0
ファイル: results.py プロジェクト: bennn/krun
class Results(object):
    """Results of a Krun benchmarking session.
    Can be serialised to disk.
    """

    def __init__(self, config, platform, results_file=None):
        self.config = config

        # Maps key to results:
        # "bmark:vm:variant" -> [[e0i0, e0i1, ...], [e1i0, e1i1, ...], ...]
        self.data = dict()
        self.reboots = 0

        # Record how long execs are taking so we can give the user a rough ETA.
        # Maps "bmark:vm:variant" -> [t_0, t_1, ...]
        self.eta_estimates = dict()

        # error_flag is flipped when a (non-fatal) error or warning occurs.
        # When Krun finishes and this flag is true, a message is printed,
        # thus prompting the user to investigate.
        self.error_flag = False

        # Fill in attributes from the config, platform and prior results.
        if self.config is not None:
            self.filename = self.config.results_filename()
            self.init_from_config()
        if platform is not None:
            self.starting_temperatures = platform.starting_temperatures
            self._audit = Audit(platform.audit)
        else:
            self.starting_temperatures = list()
            self.audit = dict()

        # Import data from a Results object serialised on disk.
        if results_file is not None:
            self.read_from_file(results_file)

    @property
    def audit(self):
        return self._audit

    @audit.setter
    def audit(self, audit_dict):
        self._audit = Audit(audit_dict)

    def init_from_config(self):
        """Scaffold dictionaries based on a given configuration.
        """
        # Initialise dictionaries based on config information.
        for vm_name, vm_info in self.config.VMS.items():
            for bmark, _ in self.config.BENCHMARKS.items():
                for variant in vm_info["variants"]:
                    key = ":".join((bmark, vm_name, variant))
                    self.data[key] = []
                    self.eta_estimates[key] = []

    def read_from_file(self, results_file):
        """Initialise object from serialised file on disk.
        """
        with bz2.BZ2File(results_file, "rb") as f:
            results = json.loads(f.read())
            self.__dict__.update(results)
            # Ensure that self.audir and self.config have correct types.
            self.config = Config(None)
            self.config.read_from_string(results["config"])
            self.audit = results["audit"]

    def write_to_file(self):
        """Serialise object on disk."""

        debug("Writing results out to: %s" % self.filename)

        to_write = {
            "config": self.config.text,
            "data": self.data,
            "audit": self.audit.audit,
            "reboots": self.reboots,
            "starting_temperatures": self.starting_temperatures,
            "eta_estimates": self.eta_estimates,
            "error_flag": self.error_flag,
        }
        with bz2.BZ2File(self.filename, "w") as f:
            f.write(json.dumps(to_write,
                               indent=1, sort_keys=True, encoding='utf-8'))

    def jobs_completed(self, key):
        """Return number of executions for which we have data for a given
        benchmark / vm / variant triplet.
        """
        return len(self.data[key])

    def __eq__(self, other):
        if not isinstance(other, self.__class__):
            return False
        return (self.config == other.config and
                self.data == other.data and
                self.audit == other.audit and
                self.reboots == other.reboots and
                self.starting_temperatures == other.starting_temperatures and
                self.eta_estimates == other.eta_estimates and
                self.error_flag == other.error_flag)

    def strip_results(self, key_spec):
        debug("Strip results: %s" % key_spec)

        spec_elems = key_spec.split(":")
        if len(spec_elems) != 3:
            fatal("malformed key spec: %s" % key_spec)

        new_data = self.data.copy()
        removed_keys = 0
        removed_execs = 0

        # We have to keep track of how many executions have run successfully so
        # that we can set self.reboots accordingly. It's not correct to simply
        # deduct one for each execution we remove, as the reboots value is one
        # higher due to the initial reboot. Bear in mind the user may strip
        # several result keys in succession, so counting the completed
        # executions is the only safe way.
        completed_execs = 0

        for key in self.data.iterkeys():
            key_elems = key.split(":")
            # deal with wildcards
            for i in xrange(3):
                if spec_elems[i] == "*":
                    key_elems[i] = "*"

            # decide whether to remove
            if key_elems == spec_elems:
                removed_keys += 1
                removed_execs += len(new_data[key])
                new_data[key] = []
                self.eta_estimates[key] = []
                info("Removed results for: %s" % key)
            else:
                completed_execs += len(new_data[key])

        self.data = new_data

        # If the results were collected with reboot mode, update reboots count
        if self.reboots != 0:
            self.reboots = completed_execs

        return removed_keys