Exemple #1
0
def test_get_set_item():
    audit = Audit({"a": 100, "b": 200})
    empty = Audit(dict())
    assert audit["a"] == 100
    assert audit["b"] == 200
    empty["a"] = 100
    empty["b"] = 200
    assert audit == empty
Exemple #2
0
def test_eq():
    audit = Audit({"a": 100, "b": 200})
    empty = Audit(dict())
    assert audit == audit
    assert empty == empty
    assert not empty == audit
    assert not list() == audit
    a0 = {"uname": "Linux"}
    a1 = {"uname": "BSD"}
    assert not a0 == a1
    assert a0 == a0
    assert a1 == a1
Exemple #3
0
    def __init__(self, config, platform, results_file=None):
        self.config = config

        # Maps key to results:
        # "bmark:vm:variant" -> [[e0i0, e0i1, ...], [e1i0, e1i1, ...], ...]
        self.data = dict()
        self.reboots = 0

        # Record how long execs are taking so we can give the user a rough ETA.
        # Maps "bmark:vm:variant" -> [t_0, t_1, ...]
        self.eta_estimates = dict()

        # error_flag is flipped when a (non-fatal) error or warning occurs.
        # When Krun finishes and this flag is true, a message is printed,
        # thus prompting the user to investigate.
        self.error_flag = False

        # Fill in attributes from the config, platform and prior results.
        if self.config is not None:
            self.filename = self.config.results_filename()
            self.init_from_config()
        if platform is not None:
            self.starting_temperatures = platform.starting_temperatures
            self._audit = Audit(platform.audit)
        else:
            self.starting_temperatures = list()
            self.audit = dict()

        # Import data from a Results object serialised on disk.
        if results_file is not None:
            self.read_from_file(results_file)
Exemple #4
0
def test_unicode():
    audit = Audit({"a": 100, "b": 200})
    spacer = "#" * 78
    expected = "Audit Section: a\n"
    expected += spacer + u"\n\n"
    expected += "100\n\n"
    expected += "Audit Section: b\n"
    expected += spacer + "\n\n"
    expected += "200\n\n"
    assert unicode(expected) == unicode(audit)
Exemple #5
0
def check_audit_unchanged(results, platform):
    """Crash out if the audit in the result doesn't match the one in the
    platform"""

    from krun.audit import Audit
    if Audit(platform.audit) != results.audit:
        error_msg = ("You have asked Krun to resume an interrupted benchmark. "
                     "This is only valid if the machine you are using is "
                     "identical to the one on which the last results were "
                     "gathered, which is not the case.")
        fatal(error_msg)
Exemple #6
0
def type_check_results(results):
    for k, execs in results.wallclock_times.iteritems():
        assert type(execs) is list
        for one_exec in execs:
            assert type(one_exec) is list
            assert all([type(x) is float for x in one_exec])

    for k, execs in results.eta_estimates.iteritems():
        assert type(execs) is list
        assert all([type(x) is float for x in execs])

    assert type(results.audit) is type(Audit(dict()))
    assert type(results.config) is type(Config())
    assert type(results.error_flag) is bool
Exemple #7
0
    def test_run_schedule(self, monkeypatch, mock_platform):
        json_file = os.path.join(TEST_DIR, "example_results.json.bz2")

        def dummy_shell_cmd(text):
            pass

        monkeypatch.setattr(subprocess, 'call', dummy_shell_cmd)
        monkeypatch.setattr(krun.util, 'run_shell_cmd', dummy_shell_cmd)
        config = Config(os.path.join(TEST_DIR, "example.krun"))
        krun.util.assign_platform(config, mock_platform)
        sched = ExecutionScheduler(config,
                                   mock_platform.mailer,
                                   mock_platform,
                                   resume=False,
                                   reboot=False,
                                   dry_run=True,
                                   started_by_init=False)
        sched.build_schedule()
        assert len(sched) == 8
        sched.run()
        assert len(sched) == 0

        results = Results(Config(os.path.join(TEST_DIR, "example.krun")),
                          mock_platform,
                          results_file=json_file)

        for k, execs in results.data.iteritems():
            assert type(execs) is list
            for one_exec in execs:
                assert type(one_exec) is list
                assert all([type(x) is float for x in one_exec])

        for k, execs in results.eta_estimates.iteritems():
            assert type(execs) is list
            assert all([type(x) is float for x in execs])

        assert type(results.starting_temperatures) is dict
        assert type(results.reboots) is int
        assert type(results.audit) is type(Audit(dict()))
        assert type(results.config) is type(Config())
        assert type(results.error_flag) is bool

        os.unlink(json_file)
Exemple #8
0
    def __init__(self, config, platform, results_file=None):
        self.instantiation_check()

        self.config = config
        self.platform = platform

        # "bmark:vm:variant" -> [[e0i0, e0i1, ...], [e1i0, e1i1, ...], ...]
        self.wallclock_times = dict()  # wall-clock times

        # Secondary, per-core measurements
        # Structure as above, but lifted for N processor cores.
        # i.e. aperf_counts[core#][proc_exec#][in_proc_iter#]
        self.core_cycle_counts = dict()
        self.aperf_counts = dict()
        self.mperf_counts = dict()

        # Record the flag for each process execution.
        self.pexec_flags = dict()

        # Record how long execs are taking so we can give the user a rough ETA.
        # Maps "bmark:vm:variant" -> [t_0, t_1, ...]
        self.eta_estimates = dict()

        # error_flag is flipped when a (non-fatal) error or warning occurs.
        # When Krun finishes and this flag is true, a message is printed,
        # thus prompting the user to investigate.
        self.error_flag = False

        # Fill in attributes from the config, platform and prior results.
        if self.config is not None:
            self.filename = self.config.results_filename()
            self.init_from_config()
            self.config_text = self.config.text
        if platform is not None:
            self._audit = Audit(platform.audit)
        else:
            self.audit = dict()

        # Import data from a Results object serialised on disk.
        if results_file is not None:
            self.read_from_file(results_file)
Exemple #9
0
def test_property():
    audit = Audit({"a": 100, "b": 200})
    assert audit.audit == {"a": 100, "b": 200}
    empty = Audit(dict())
    empty.audit = {"a": 100, "b": 200}
    assert empty == audit
Exemple #10
0
def test_contains():
    audit = Audit({"a": 100, "b": 200})
    assert "a" in audit
    assert not "c" in audit
Exemple #11
0
 def audit(self, audit_dict):
     self._audit = Audit(audit_dict)
Exemple #12
0
def inner_main(mailer, config, args):
    out_file = config.results_filename()
    out_file_exists = os.path.exists(out_file)

    if out_file_exists and not os.path.isfile(out_file):
        util.fatal(
            "Output file '%s' exists but is not a regular file" % out_file)

    if out_file_exists and not args.resume:
        util.fatal("Output file '%s' already exists. "
                   "Either resume the session (--resume) or "
                   "move the file away" % out_file)

    if not out_file_exists and args.resume:
        util.fatal("No results file to resume. Expected '%s'" % out_file)

    if args.started_by_init and not args.reboot:
        util.fatal("--started-by-init makes no sense without --reboot")

    if args.started_by_init and not args.resume:
        util.fatal("--started-by-init makes no sense without --resume")

    if args.develop:
        warn("Developer mode enabled. Results will not be reliable.")

    # Initialise platform instance and assign to VM defs.
    # This needs to be done early, so VM sanity checks can run.
    platform = detect_platform(mailer)

    if not args.develop:
        debug("Checking platform preliminaries")
        platform.check_preliminaries()
    else:
        # Needed to skip the use of certain tools and techniques.
        # E.g. switching user.
        warn("Not checking platform prerequisites due to developer mode")
        platform.developer_mode = True

    platform.collect_audit()

    # If the user has asked for resume-mode, the current platform must
    # be an identical machine to the current one.
    error_msg = ("You have asked Krun to resume an interrupted benchmark. " +
                 "This is only valid if the machine you are using is " +
                 "identical to the one on which the last results were " +
                 "gathered, which is not the case.")
    current = None
    if args.resume:
        # output file must exist, due to check above
        assert(out_file_exists)
        current = Results(config, platform, results_file=out_file)
        from krun.audit import Audit
        if not Audit(platform.audit) == current.audit:
            util.fatal(error_msg)

        debug("Using pre-recorded initial temperature readings")
        platform.starting_temperatures = current.starting_temperatures
    else:
        # Touch the config file to update its mtime. This is required
        # by resume-mode which uses the mtime to determine the name of
        # the log file, should this benchmark be resumed.
        _, _, rc = util.run_shell_cmd("touch " + args.filename)
        if rc != 0:
            util.fatal("Could not touch config file: " + args.filename)

        info(("Wait %s secs to allow system to cool prior to "
             "collecting initial temperature readings") %
             config.TEMP_READ_PAUSE)

        if args.develop or args.dry_run:
            info("SIMULATED: time.sleep(%s)" % config.TEMP_READ_PAUSE)
        else:
            time.sleep(config.TEMP_READ_PAUSE)

        debug("Taking fresh initial temperature readings")
        platform.starting_temperatures = platform.take_temperature_readings()

    # Assign platform to VM defs -- needs to happen early for sanity checks
    util.assign_platform(config, platform)

    sanity_checks(config, platform)

    # Build job queue -- each job is an execution
    sched = ExecutionScheduler(config,
                               mailer,
                               platform,
                               resume=args.resume,
                               reboot=args.reboot,
                               dry_run=args.dry_run,
                               started_by_init=args.started_by_init)
    sched.build_schedule()
    sched.run()
Exemple #13
0
def test_property():
    audit = Audit({"a": 100, "b": 200})
    assert audit.audit == {"a": 100, "b": 200}
    empty = Audit(dict())
    empty.audit = {"a": 100, "b": 200}
    assert empty == audit