Exemplo n.º 1
0
def get_session_info(config):
    """Gets information about the session (for --info)

    Separated from print_session_info for ease of testing"""

    from krun.scheduler import ScheduleEmpty, ExecutionScheduler
    from krun.platform import detect_platform

    platform = detect_platform(None)
    sched = ExecutionScheduler(config, None, platform)
    non_skipped_keys, skipped_keys = sched.build_schedule()

    n_proc_execs = 0
    n_in_proc_iters = 0

    while True:
        try:
            job = sched.next_job()
        except ScheduleEmpty:
            break

        n_proc_execs += 1
        n_in_proc_iters += job.vm_info["n_iterations"]

    return {
        "n_proc_execs": n_proc_execs,
        "n_in_proc_iters": n_in_proc_iters,
        "skipped_keys": skipped_keys,
        "non_skipped_keys": non_skipped_keys,
    }
Exemplo n.º 2
0
Arquivo: util.py Projeto: bennn/krun
def get_session_info(config):
    """Gets information about the session (for --info)

    Separated from print_session_info for ease of testing"""

    from krun.scheduler import ScheduleEmpty, ExecutionScheduler
    from krun.platform import detect_platform

    platform = detect_platform(None)
    sched = ExecutionScheduler(config, None, platform)
    non_skipped_keys, skipped_keys = sched.build_schedule()

    n_proc_execs = 0
    n_in_proc_iters = 0

    while True:
        try:
            job = sched.next_job()
        except ScheduleEmpty:
            break

        n_proc_execs += 1
        n_in_proc_iters += job.vm_info["n_iterations"]

    return {
        "n_proc_execs": n_proc_execs,
        "n_in_proc_iters": n_in_proc_iters,
        "skipped_keys": skipped_keys,
        "non_skipped_keys": non_skipped_keys,
    }
Exemplo n.º 3
0
def test_run_shell_cmd_bench_fatal():
    from krun.platform import detect_platform
    cmd = "nonsensecommand"
    platform = detect_platform(None, None)
    out, err, rc = run_shell_cmd_bench(cmd, platform, False)
    assert rc != 0
    assert cmd in err
    assert out == ""
Exemplo n.º 4
0
def test_run_shell_cmd_bench_fatal():
    from krun.platform import detect_platform
    cmd = "nonsensecommand"
    platform = detect_platform(None, None)
    out, err, rc = run_shell_cmd_bench(cmd, platform, False)
    assert rc != 0
    assert cmd in err
    assert out == ""
Exemplo n.º 5
0
def main():
    platform = detect_platform(None, Config())
    platform.check_preliminaries()
    platform.sanity_checks()
    for jvm_name, jvm_cmd in JAVA_VMS.items():
        csvp = "dacapo.%s.results" % jvm_name
        with open(csvp, 'wb') as csvf:
            sys.stdout.write("%s:\n" % jvm_name)
            writer = csv.writer(csvf)
            writer.writerow(['processnum', 'benchmark'] + range(ITERATIONS))
            for benchmark in WORKING_BENCHS:
                if jvm_name == "graal" and benchmark in DISABLE_ON_GRAAL:
                    continue
                if sys.platform.startswith("openbsd") and benchmark in DISABLE_ON_OPENBSD:
                    continue
                sys.stdout.write("  %s:" % benchmark)
                for process in range(PROCESSES):
                    sys.stdout.write(" %s" % str(process))
                    sys.stdout.flush()
                    # Flush the CSV writing, and then give the OS some time
                    # to write stuff out to disk before running the next process
                    # execution.
                    csvf.flush()
                    os.fsync(csvf.fileno())
                    if SSH_DO_COPY:
                        os.system("cat %s | ssh -o 'BatchMode yes' -i %s %s@%s 'cat > %s/%s.dacapo.%s.results'" \
                                  % (csvp, SSH_KEY, SSH_USER, SSH_HOST, \
                                     SSH_COPY_DIR, socket.gethostname(), jvm_name))
                    time.sleep(3)

                    stdout, stderr, rc = run_shell_cmd_bench(
                        "%s -jar %s %s -n %s" % (jvm_cmd, JAR, benchmark,
                                                 ITERATIONS + 1), platform, failure_fatal=False)
                    if rc != 0:
                        sys.stderr.write("\nWARNING: process exec crashed\n")
                        sys.stderr.write("stdout:\n")
                        sys.stderr.write(stdout + "\n")
                        sys.stderr.write("\nstderr:\n")
                        sys.stderr.write(stderr + "\n")
                        sys.stderr.flush()
                        writer.writerow([process, benchmark, "crash"])
                        continue
                    output = []
                    for line in stderr.splitlines():
                        if not line.startswith("====="):
                            continue
                        if "completed warmup" not in line:
                            continue
                        assert benchmark in line
                        line = line.split()
                        index = line.index("in")
                        assert line[index + 2] == "nsec"
                        output.append(str(Decimal(line[index + 1]) / 1000000000))
                    assert len(output) == ITERATIONS
                    writer.writerow([process, benchmark] + output)
                sys.stdout.write("\n")
    platform.save_power()
Exemplo n.º 6
0
def strip_results(config, key_spec):
    from krun.platform import detect_platform
    from krun.results import Results

    platform = detect_platform(None)
    results = Results(config, platform, results_file=config.results_filename())
    n_removed = results.strip_results(key_spec)
    if n_removed > 0:
        results.write_to_file()
    info("Removed %d result keys" % n_removed)
Exemplo n.º 7
0
def to_strip():
    from krun.platform import detect_platform
    from krun.results import Results

    path = os.path.join(TEST_DIR, "quick.krun")
    config = Config(path)

    platform = detect_platform(None)
    results = Results(config, platform, results_file=config.results_filename())
    return results
Exemplo n.º 8
0
def to_strip():
    from krun.platform import detect_platform
    from krun.results import Results

    path = os.path.join(TEST_DIR, "quick.krun")
    config = Config(path)

    platform = detect_platform(None)
    results = Results(config, platform,
                      results_file=config.results_filename())
    return results
Exemplo n.º 9
0
Arquivo: util.py Projeto: bennn/krun
def strip_results(config, key_spec):
    from krun.platform import detect_platform
    from krun.results import Results

    platform = detect_platform(None)
    results = Results(config, platform,
                      results_file=config.results_filename())
    n_removed = results.strip_results(key_spec)
    if n_removed > 0:
        results.write_to_file()
    info("Removed %d result keys" % n_removed)
Exemplo n.º 10
0
def test_run_shell_cmd_bench():
    from krun.platform import detect_platform
    platform = detect_platform(None, None)
    msg = "example text\n"
    out, err, rc = run_shell_cmd_bench("echo " + msg, platform)
    assert out == msg
    assert err == ""
    assert rc == 0

    msg2 = "another example\n"
    out, err, rc = run_shell_cmd_bench(
        "(>&2 echo %s)  && (echo %s)" % (msg2, msg), platform)
    assert out == msg
    assert err == msg2
    assert rc == 0
Exemplo n.º 11
0
def main():
    platform = detect_platform(None, Config())
    platform.check_preliminaries()
    platform.sanity_checks()
    for jsvm_name, jsvm_cmd in JAVASCRIPT_VMS.items():
        csvp = "octane.%s.results" % jsvm_name
        with open(csvp, 'wb') as csvf:
            sys.stdout.write("%s:" % jsvm_name)
            writer = csv.writer(csvf)
            writer.writerow(['processnum', 'benchmark'] + range(ITERATIONS))
            for process in range(PROCESSES):
                sys.stdout.write(" %s" % str(process))
                sys.stdout.flush()
                # Flush the CSV writing, and then give the OS some time
                # to write stuff out to disk before running the next process
                # execution.
                csvf.flush()
                os.fsync(csvf.fileno())
                if SSH_DO_COPY:
                    os.system("cat %s | ssh -o 'BatchMode yes' -i %s %s@%s 'cat > %s/%s.octane.%s.results'" \
                              % (csvp, SSH_KEY, SSH_USER, SSH_HOST, \
                                 SSH_COPY_DIR, socket.gethostname(), jsvm_name))
                time.sleep(3)

                stdout, stderr, rc = run_shell_cmd_bench(jsvm_cmd, platform)
                if rc != 0:
                    sys.stderr.write(stderr)
                    sys.exit(rc)
                times = None
                for line in stdout.splitlines():
                    assert len(line) > 0
                    # Lines beginning with something other than a space are the
                    # name of the next benchmark to run. Lines beginning with a
                    # space are the timings of an iteration
                    if line[0] == " ":
                        # Times are in ms, so convert to seconds (without any
                        # loss of precision).
                        times.append(str(Decimal(line.strip()) / 1000))
                    else:
                        assert times is None or len(times) == ITERATIONS
                        if times is not None:
                            writer.writerow([process, bench_name] + times)
                        bench_name = line.strip()
                        times = []
                assert len(times) == ITERATIONS
                writer.writerow([process, bench_name] + times)
            sys.stdout.write("\n")
    platform.save_power()
Exemplo n.º 12
0
def main():
    platform = detect_platform(None, Config())
    platform.check_preliminaries()
    platform.sanity_checks()
    for jsvm_name, jsvm_cmd in JAVASCRIPT_VMS.items():
        csvp = "octane.%s.results" % jsvm_name
        with open(csvp, 'wb') as csvf:
            sys.stdout.write("%s:" % jsvm_name)
            writer = csv.writer(csvf)
            writer.writerow(['processnum', 'benchmark'] + range(ITERATIONS))
            for process in range(PROCESSES):
                sys.stdout.write(" %s" % str(process))
                sys.stdout.flush()
                # Flush the CSV writing, and then give the OS some time
                # to write stuff out to disk before running the next process
                # execution.
                csvf.flush()
                os.fsync(csvf.fileno())
                if SSH_DO_COPY:
                    os.system("cat %s | ssh -o 'BatchMode yes' -i %s %s@%s 'cat > %s/%s.octane.%s.results'" \
                              % (csvp, SSH_KEY, SSH_USER, SSH_HOST, \
                                 SSH_COPY_DIR, socket.gethostname(), jsvm_name))
                time.sleep(3)

                stdout, stderr, rc = run_shell_cmd_bench(jsvm_cmd, platform)
                if rc != 0:
                    sys.stderr.write(stderr)
                    sys.exit(rc)
                times = None
                for line in stdout.splitlines():
                    assert len(line) > 0
                    # Lines beginning with something other than a space are the
                    # name of the next benchmark to run. Lines beginning with a
                    # space are the timings of an iteration
                    if line[0] == " ":
                        # Times are in ms, so convert to seconds (without any
                        # loss of precision).
                        times.append(str(Decimal(line.strip()) / 1000))
                    else:
                        assert times is None or len(times) == ITERATIONS
                        if times is not None:
                            writer.writerow([process, bench_name] + times)
                        bench_name = line.strip()
                        times = []
                assert len(times) == ITERATIONS
                writer.writerow([process, bench_name] + times)
            sys.stdout.write("\n")
    platform.save_power()
Exemplo n.º 13
0
def test_run_shell_cmd_bench():
    from krun.platform import detect_platform
    platform = detect_platform(None, None)
    msg = "example text\n"
    out, err, rc = run_shell_cmd_bench("echo " + msg, platform)
    assert out == msg
    assert err == ""
    assert rc == 0

    msg2 = "another example\n"
    out, err, rc = run_shell_cmd_bench(
        "(>&2 echo %s)  && (echo %s)" % (msg2, msg),
        platform)
    assert out == msg
    assert err == msg2
    assert rc == 0
Exemplo n.º 14
0
def get_session_info(config):
    """Gets information about the session (for --info)

    Overwrites any existing manifest file.

    Separated from print_session_info for ease of testing"""

    from krun.scheduler import ManifestManager
    from krun.platform import detect_platform
    platform = detect_platform(None, config)
    manifest = ManifestManager(config, platform, new_file=True)

    return {
        "n_proc_execs": manifest.total_num_execs,
        "n_in_proc_iters": manifest.get_total_in_proc_iters(config),
        "skipped_keys": manifest.skipped_keys,
        "non_skipped_keys": manifest.non_skipped_keys,
    }
Exemplo n.º 15
0
Arquivo: krun.py Projeto: bennn/krun
def inner_main(mailer, config, args):
    out_file = config.results_filename()
    out_file_exists = os.path.exists(out_file)

    if out_file_exists and not os.path.isfile(out_file):
        util.fatal(
            "Output file '%s' exists but is not a regular file" % out_file)

    if out_file_exists and not args.resume:
        util.fatal("Output file '%s' already exists. "
                   "Either resume the session (--resume) or "
                   "move the file away" % out_file)

    if not out_file_exists and args.resume:
        util.fatal("No results file to resume. Expected '%s'" % out_file)

    if args.started_by_init and not args.reboot:
        util.fatal("--started-by-init makes no sense without --reboot")

    if args.started_by_init and not args.resume:
        util.fatal("--started-by-init makes no sense without --resume")

    if args.develop:
        warn("Developer mode enabled. Results will not be reliable.")

    # Initialise platform instance and assign to VM defs.
    # This needs to be done early, so VM sanity checks can run.
    platform = detect_platform(mailer)

    if not args.develop:
        debug("Checking platform preliminaries")
        platform.check_preliminaries()
    else:
        # Needed to skip the use of certain tools and techniques.
        # E.g. switching user.
        warn("Not checking platform prerequisites due to developer mode")
        platform.developer_mode = True

    platform.collect_audit()

    # If the user has asked for resume-mode, the current platform must
    # be an identical machine to the current one.
    error_msg = ("You have asked Krun to resume an interrupted benchmark. " +
                 "This is only valid if the machine you are using is " +
                 "identical to the one on which the last results were " +
                 "gathered, which is not the case.")
    current = None
    if args.resume:
        # output file must exist, due to check above
        assert(out_file_exists)
        current = Results(config, platform, results_file=out_file)
        from krun.audit import Audit
        if not Audit(platform.audit) == current.audit:
            util.fatal(error_msg)

        debug("Using pre-recorded initial temperature readings")
        platform.starting_temperatures = current.starting_temperatures
    else:
        # Touch the config file to update its mtime. This is required
        # by resume-mode which uses the mtime to determine the name of
        # the log file, should this benchmark be resumed.
        _, _, rc = util.run_shell_cmd("touch " + args.filename)
        if rc != 0:
            util.fatal("Could not touch config file: " + args.filename)

        info(("Wait %s secs to allow system to cool prior to "
             "collecting initial temperature readings") %
             config.TEMP_READ_PAUSE)

        if args.develop or args.dry_run:
            info("SIMULATED: time.sleep(%s)" % config.TEMP_READ_PAUSE)
        else:
            time.sleep(config.TEMP_READ_PAUSE)

        debug("Taking fresh initial temperature readings")
        platform.starting_temperatures = platform.take_temperature_readings()

    # Assign platform to VM defs -- needs to happen early for sanity checks
    util.assign_platform(config, platform)

    sanity_checks(config, platform)

    # Build job queue -- each job is an execution
    sched = ExecutionScheduler(config,
                               mailer,
                               platform,
                               resume=args.resume,
                               reboot=args.reboot,
                               dry_run=args.dry_run,
                               started_by_init=args.started_by_init)
    sched.build_schedule()
    sched.run()
Exemplo n.º 16
0
def test_read_popen_output_carefully_0001():
    platform = detect_platform(None, None)
    process = subprocess32.Popen(["/bin/sleep", "5"], stdout=subprocess32.PIPE)
    _, _, _, timed_out = read_popen_output_carefully(process, platform, timeout=1)
    assert timed_out
Exemplo n.º 17
0
def inner_main(mailer, on_first_invocation, config, args):
    out_file = config.results_filename()
    out_file_exists = os.path.exists(out_file)

    instr_dir = util.get_instr_json_dir(config)
    instr_dir_exists = os.path.exists(instr_dir)

    envlog_dir = util.get_envlog_dir(config)
    envlog_dir_exists = os.path.exists(envlog_dir)

    if out_file_exists and not os.path.isfile(out_file):
        util.fatal(
            "Output file '%s' exists but is not a regular file" % out_file)

    if out_file_exists and on_first_invocation:
        util.fatal("Output results file '%s' already exists. "
                   "Move the file away before running Krun." % out_file)

    if instr_dir_exists and on_first_invocation:
        util.fatal("Instrumentation dir '%s' exists." % instr_dir)

    if envlog_dir_exists and on_first_invocation:
        util.fatal("Env log dir '%s' exists." % envlog_dir)

    if not out_file_exists and not on_first_invocation:
        util.fatal("No results file to resume. Expected '%s'" % out_file)

    # Initialise platform instance and assign to VM defs.
    # This needs to be done early, so VM sanity checks can run.
    platform = detect_platform(mailer, config)

    platform.quick_mode = args.quick
    platform.no_user_change = args.no_user_change
    platform.no_tickless_check = args.no_tickless_check
    platform.no_pstate_check = args.no_pstate_check
    platform.hardware_reboots = args.hardware_reboots

    # Create the instrumentation directory if required
    if on_first_invocation:
        # We only want make a dir if >=1 VM is in instrumentation mode.
        for vm in config.VMS.itervalues():
            if vm['vm_def'].instrument:
                util.make_instr_dir(config)
                break

    debug("Checking platform preliminaries")
    platform.check_preliminaries()

    # Make a bit of noise if this is a virtualised environment
    if platform.is_virtual():
        warn("This appears to be a virtualised host. The results will be flawed. "
             "Use bare-metal for reliable results!")

    platform.collect_audit()

    # At this point the config file is OK, and on-disk state is consistent,
    # so let's daemonise (if requested).
    if args.daemonise:
        util.daemonise()

    if not on_first_invocation:
        # output file must exist, due to check above
        assert(out_file_exists)

        debug("Using pre-recorded initial temperature readings")
        manifest = ManifestManager(config, platform)

        platform_temps = {}
        for sensor, tup in manifest.starting_temperatures.iteritems():
            platform_temps[sensor] = tup[1]
        platform.starting_temperatures = platform_temps
    else:
        manifest = ManifestManager(config, platform, new_file=True)
        if manifest.num_execs_left == 0:
            # No executions, or all skipped
            fatal("Empty schedule!")

        try:
            info(("Wait %s secs to allow system to cool prior to "
                 "collecting initial temperature readings") %
                 config.TEMP_READ_PAUSE)

            # This part is wrapped in hooks, so that if daemons or networking are
            # taken down for process executions, then the initial temperature
            # reading gets the same treatment.
            util.run_shell_cmd_list(config.PRE_EXECUTION_CMDS,)
            platform.sleep(config.TEMP_READ_PAUSE)

            debug("Taking fresh initial temperature readings")
            platform.starting_temperatures = platform.take_temperature_readings()
            manifest.set_starting_temperatures(platform.starting_temperatures)

            # Write out an empty results file. After the initial reboot Krun
            # will expect this to exist.
            Results.ok_to_instantiate = True
            results = Results(config, platform)
            results.write_to_file()
        except:
            raise
        finally:
            util.run_shell_cmd_list(config.POST_EXECUTION_CMDS,)

        log_path = config.log_filename(resume=False)
        util.log_and_mail(mailer, debug,
                          "Benchmarking started",
                          "Benchmarking started.\nLogging to %s" %
                          log_path, bypass_limiter=True)

        util.reboot(manifest, platform)

    # Assign platform to VM defs -- needs to happen early for sanity checks
    util.assign_platform(config, platform)

    sanity_checks(config, platform)

    # Build job queue -- each job is an execution
    sched = ExecutionScheduler(config,
                               mailer,
                               platform,
                               dry_run=args.dry_run)
    sched.run()
Exemplo n.º 18
0
from subprocess import Popen, PIPE
import os
import sys
import pytest

# Some core cycle tests collect two readings as fast as possible, so the delta
# should be pretty small (but it ultimately depends upon the CPU).
NOT_MANY_CYCLES = 500000

DIR = os.path.abspath(os.path.dirname(__file__))
TEST_PROG_PATH = os.path.join(DIR, "test_prog")

sys.path.append(os.path.join(DIR, "..", ".."))
from krun.platform import detect_platform
PLATFORM = detect_platform(None, None)

MSR_SUPPORT = PLATFORM.num_per_core_measurements > 0

def invoke_c_prog(mode):
    assert os.path.exists(TEST_PROG_PATH)

    p = Popen(TEST_PROG_PATH + " " + mode,
              stderr=PIPE, stdout=PIPE, shell=True)
    out, err = p.communicate()
    return p.returncode, out.strip(), err.strip()


def parse_keyvals(out, doubles=False):
    dct = {}
    for line in out.splitlines():
Exemplo n.º 19
0
Arquivo: krun.py Projeto: warsier/krun
def inner_main(mailer, on_first_invocation, config, args):
    out_file = config.results_filename()
    out_file_exists = os.path.exists(out_file)

    instr_dir = util.get_instr_json_dir(config)
    instr_dir_exists = os.path.exists(instr_dir)

    envlog_dir = util.get_envlog_dir(config)
    envlog_dir_exists = os.path.exists(envlog_dir)

    if out_file_exists and not os.path.isfile(out_file):
        util.fatal("Output file '%s' exists but is not a regular file" %
                   out_file)

    if out_file_exists and on_first_invocation:
        util.fatal("Output results file '%s' already exists. "
                   "Move the file away before running Krun." % out_file)

    if instr_dir_exists and on_first_invocation:
        util.fatal("Instrumentation dir '%s' exists." % instr_dir)

    if envlog_dir_exists and on_first_invocation:
        util.fatal("Env log dir '%s' exists." % envlog_dir)

    if not out_file_exists and not on_first_invocation:
        util.fatal("No results file to resume. Expected '%s'" % out_file)

    # Initialise platform instance and assign to VM defs.
    # This needs to be done early, so VM sanity checks can run.
    platform = detect_platform(mailer, config)

    platform.quick_mode = args.quick
    platform.no_user_change = args.no_user_change
    platform.no_tickless_check = args.no_tickless_check
    platform.no_pstate_check = args.no_pstate_check
    platform.hardware_reboots = args.hardware_reboots

    # Create the instrumentation directory if required
    if on_first_invocation:
        # We only want make a dir if >=1 VM is in instrumentation mode.
        for vm in config.VMS.itervalues():
            if vm['vm_def'].instrument:
                util.make_instr_dir(config)
                break

    debug("Checking platform preliminaries")
    platform.check_preliminaries()

    # Make a bit of noise if this is a virtualised environment
    if platform.is_virtual():
        warn(
            "This appears to be a virtualised host. The results will be flawed. "
            "Use bare-metal for reliable results!")

    platform.collect_audit()

    # At this point the config file is OK, and on-disk state is consistent,
    # so let's daemonise (if requested).
    if args.daemonise:
        util.daemonise()

    if not on_first_invocation:
        # output file must exist, due to check above
        assert (out_file_exists)

        debug("Using pre-recorded initial temperature readings")
        manifest = ManifestManager(config, platform)

        platform_temps = {}
        for sensor, tup in manifest.starting_temperatures.iteritems():
            platform_temps[sensor] = tup[1]
        platform.starting_temperatures = platform_temps
    else:
        manifest = ManifestManager(config, platform, new_file=True)
        if manifest.num_execs_left == 0:
            # No executions, or all skipped
            fatal("Empty schedule!")

        try:
            info(("Wait %s secs to allow system to cool prior to "
                  "collecting initial temperature readings") %
                 config.TEMP_READ_PAUSE)

            # This part is wrapped in hooks, so that if daemons or networking are
            # taken down for process executions, then the initial temperature
            # reading gets the same treatment.
            util.run_shell_cmd_list(config.PRE_EXECUTION_CMDS, )
            platform.sleep(config.TEMP_READ_PAUSE)

            debug("Taking fresh initial temperature readings")
            platform.starting_temperatures = platform.take_temperature_readings(
            )
            manifest.set_starting_temperatures(platform.starting_temperatures)

            # Write out an empty results file. After the initial reboot Krun
            # will expect this to exist.
            Results.ok_to_instantiate = True
            results = Results(config, platform)
            results.write_to_file()
        except:
            raise
        finally:
            util.run_shell_cmd_list(config.POST_EXECUTION_CMDS, )

        log_path = config.log_filename(resume=False)
        util.log_and_mail(mailer,
                          debug,
                          "Benchmarking started",
                          "Benchmarking started.\nLogging to %s" % log_path,
                          bypass_limiter=True)

        util.reboot(manifest, platform)

    # Assign platform to VM defs -- needs to happen early for sanity checks
    util.assign_platform(config, platform)

    sanity_checks(config, platform)

    # Build job queue -- each job is an execution
    sched = ExecutionScheduler(config, mailer, platform, dry_run=args.dry_run)
    sched.run()
Exemplo n.º 20
0
def main():
    platform = detect_platform(None, Config())
    platform.check_preliminaries()
    platform.sanity_checks()
    for jvm_name, jvm_cmd in JAVA_VMS.items():
        csvp = "dacapo.%s.results" % jvm_name
        with open(csvp, 'wb') as csvf:
            sys.stdout.write("%s:\n" % jvm_name)
            writer = csv.writer(csvf)
            writer.writerow(['processnum', 'benchmark'] + range(ITERATIONS))
            for benchmark in WORKING_BENCHS:
                if jvm_name == "graal" and benchmark in DISABLE_ON_GRAAL:
                    continue
                if sys.platform.startswith(
                        "openbsd") and benchmark in DISABLE_ON_OPENBSD:
                    continue
                sys.stdout.write("  %s:" % benchmark)
                for process in range(PROCESSES):
                    sys.stdout.write(" %s" % str(process))
                    sys.stdout.flush()
                    # Flush the CSV writing, and then give the OS some time
                    # to write stuff out to disk before running the next process
                    # execution.
                    csvf.flush()
                    os.fsync(csvf.fileno())
                    if SSH_DO_COPY:
                        os.system("cat %s | ssh -o 'BatchMode yes' -i %s %s@%s 'cat > %s/%s.dacapo.%s.results'" \
                                  % (csvp, SSH_KEY, SSH_USER, SSH_HOST, \
                                     SSH_COPY_DIR, socket.gethostname(), jvm_name))
                    time.sleep(3)

                    stdout, stderr, rc = run_shell_cmd_bench(
                        "%s -jar %s %s -n %s" %
                        (jvm_cmd, JAR, benchmark, ITERATIONS + 1),
                        platform,
                        failure_fatal=False)
                    if rc != 0:
                        sys.stderr.write("\nWARNING: process exec crashed\n")
                        sys.stderr.write("stdout:\n")
                        sys.stderr.write(stdout + "\n")
                        sys.stderr.write("\nstderr:\n")
                        sys.stderr.write(stderr + "\n")
                        sys.stderr.flush()
                        writer.writerow([process, benchmark, "crash"])
                        continue
                    output = []
                    for line in stderr.splitlines():
                        if not line.startswith("====="):
                            continue
                        if "completed warmup" not in line:
                            continue
                        assert benchmark in line
                        line = line.split()
                        index = line.index("in")
                        assert line[index + 2] == "nsec"
                        output.append(
                            str(Decimal(line[index + 1]) / 1000000000))
                    assert len(output) == ITERATIONS
                    writer.writerow([process, benchmark] + output)
                sys.stdout.write("\n")
    platform.save_power()
Exemplo n.º 21
0
 def platform(self):
     return detect_platform(MockMailer(), Config())
Exemplo n.º 22
0
 def platform(self):
     return detect_platform(MockMailer(), Config())