Exemplo n.º 1
0
    def read_from_file(self, config_file):
        assert config_file.endswith(".krun")
        config_dict = {}
        try:
            execfile(config_file, config_dict)
        except Exception:
            self._fatal_exception_execing_config(sys.exc_info())

        for key in CHECK_FIELDS:
            if key not in config_dict:
                fatal("Config file is missing a %s" % key)

        for vm_name in config_dict["VMS"]:
            if " " in vm_name:
                fatal("VM names must not contain spaces")

        for vm_name in config_dict["BENCHMARKS"]:
            if " " in vm_name:
                fatal("Benchmark names must not contain spaces")

        for variant_name in config_dict["VARIANTS"]:
            if " " in variant_name:
                fatal("Variant names must not contain spaces")

        self.__dict__.update(config_dict)
        self.filename = config_file
        with open(config_file, "r") as fp:
            self.text = fp.read()

        if self.AMPERF_RATIO_BOUNDS and not self.AMPERF_BUSY_THRESHOLD or \
                not self.AMPERF_RATIO_BOUNDS and self.AMPERF_BUSY_THRESHOLD:
                fatal("AMPERF_RATIO_BOUNDS and AMPERF_BUSY_THRESHOLD must either "
                      "both be defined in the config file, or neither")
Exemplo n.º 2
0
    def _check_apm_state(self):
        debug("Checking APM state is geared for high-performance")
        adjust = False

        out = self._get_apm_output()
        lines = out.split("\n")

        n_lines = len(lines)
        if n_lines != 3:
            fatal("Expected 3 lines of output from apm(8), got %d" % n_lines)

        perf_line = lines[2].strip()

        # First, the performance mode should be manual (static)
        if not perf_line.startswith("Performance adjustment mode: manual"):
            debug("performance mode is not manual.")
            adjust = True

        # Second, the CPU should be running as fast as possible
        out, _, _ = run_shell_cmd(self.GET_SETPERF_CMD)
        elems = out.split("=")
        if len(elems) != 2 or elems[1].strip() != "100":
            debug("hw.setperf is '%s' not '100'" % elems[1])
            adjust = True

        if adjust:
            debug("adjusting performance mode")
            out, _, _ = run_shell_cmd("apm -H")
            self._check_apm_state()  # should work this time
Exemplo n.º 3
0
    def sanity_checks(self):
        JavaVMDef.sanity_checks(self)

        if not self.vm_path != "mx":
            fatal("Graal's vm_path should be a path to an 'mx' script")

        self._check_jvmci_server_enabled()
Exemplo n.º 4
0
    def _check_cpu_governor(self):
        """Checks the right CPU governor is in effect

        Since we do not know which CPU benchmarks will be scheduled on,
        we simply check them all"""

        # Check CPU cores are running with the 'performance' governor
        # And that the correct scaler is in use. We never want the pstate
        # scaler, as it tends to cause the clock speed to fluctuate, even
        # when in performance mode. Instead we use standard ACPI.
        changed = False
        for cpu_n in xrange(self.num_cpus):
            # Check CPU governors
            debug("Checking CPU governor for CPU%d" % cpu_n)
            with open(LinuxPlatform.CPU_GOV_FMT % cpu_n, "r") as fh:
                v = fh.read().strip()

            if v != "performance":
                debug("changing CPU governor for CPU %s" % cpu_n)
                cmd = "%s cpufreq-set -c %d -g performance" % \
                    (self.change_user_cmd, cpu_n)
                stdout, stderr, rc = run_shell_cmd(cmd, failure_fatal=False)
                changed = True

                if rc != 0:
                    fatal("Governor for CPU%d governor: is '%s' not "
                          "performance'.\nKrun attempted to adjust the "
                          "governor using:\n  '%s'\n"
                          "however this command failed. Is %s configured "
                          "and is cpufrequtils installed?"
                          % (cpu_n, v, cmd, self.change_user_cmd))
        if changed:
            self._check_cpu_governor()  # just to be sure
Exemplo n.º 5
0
    def read_from_file(self, config_file):
        assert config_file.endswith(".krun")
        config_dict = {}
        try:
            execfile(config_file, config_dict)
        except Exception:
            self._fatal_exception_execing_config(sys.exc_info())

        for key in CHECK_FIELDS:
            if key not in config_dict:
                fatal("Config file is missing a %s" % key)

        for vm_name in config_dict["VMS"]:
            if " " in vm_name:
                fatal("VM names must not contain spaces")

        for vm_name in config_dict["BENCHMARKS"]:
            if " " in vm_name:
                fatal("Benchmark names must not contain spaces")

        for variant_name in config_dict["VARIANTS"]:
            if " " in variant_name:
                fatal("Variant names must not contain spaces")

        self.__dict__.update(config_dict)
        self.filename = config_file
        with open(config_file, "r") as fp:
            self.text = fp.read()

        if self.AMPERF_RATIO_BOUNDS and not self.AMPERF_BUSY_THRESHOLD or \
                not self.AMPERF_RATIO_BOUNDS and self.AMPERF_BUSY_THRESHOLD:
            fatal("AMPERF_RATIO_BOUNDS and AMPERF_BUSY_THRESHOLD must either "
                  "both be defined in the config file, or neither")
Exemplo n.º 6
0
    def _check_util_linux_installed(self):
        debug("Check util-linux is installed")

        from distutils.spawn import find_executable
        if not find_executable("taskset"):
            fatal("util-linix is not installed "
                  "(needed for pinning and scheduler tweaking).")
Exemplo n.º 7
0
    def sanity_checks(self):
        JavaVMDef.sanity_checks(self)

        if not self.vm_path.endswith("java"):
            fatal("Graal vm_path should be a path to a jvmci enabled java binary")

        self._check_jvmci_server_enabled()
Exemplo n.º 8
0
    def __init__(self, iterations_runner, env=None):
        self.iterations_runner = iterations_runner

        # List of EnvChange instances to apply prior to each experiment.
        # These should be benchmark agnostic. Look elsewhere for
        # environment changes specific to a benchmark.
        self.common_env_changes = []

        # The user can pass in a dict to influence the environment.
        #
        # These variables are *prepended* to any coinsiding environment that
        # Krun has set to run benchmarks. E.g. If Krun wants to set
        # LD_LIBRARY_PATH=/opt/pypy/pypy/goal, and the user passes down
        # {"LD_LIBRARY_PATH": "/wibble/lib"}, the resulting environment is
        # LD_LIBRARY_PATH=/wibble/lib:/opt/pypy/pypy/goal
        #
        # This is useful, for example, if the user built their own GCC
        # and needs to force the LD_LIBRARY_PATH.
        if env is not None:
            if not isinstance(env, dict):
                fatal("'env' argument for VM defs should be a dict")
            for k, v in env.iteritems():
                self.add_env_change(EnvChangeAppend(k, v))

        # tempting as it is to add a self.vm_path, we don't. If we were to add
        # natively compiled languages, then there is no "VM" to speak of.

        self.platform = None  # Set later

        # Do not execute the benchmark program
        # (useful for testing configurations.).
        self.dry_run = False
Exemplo n.º 9
0
def find_internal_jvmci_java_bin(base_dir):
    """
    The jvmci internal jdk8 seems to move around depending upon
    the JVM with which it was built.

    E.g. the java binary could be:
    jvmci/jdk1.8.0-internal/product/bin/java

    or it could be:
    jvmci/jdk1.8.0_66-internal/product/bin/java

    This is a helper function to try and find the 'java' binary
    inside this "moving" directory.

    arguments:
    base_dir -- base jvmci directory"""

    try:
        matches = fnmatch.filter(os.listdir(base_dir), 'jdk1.8.0*internal*')
    except OSError as e:
        # we didn't find an internal JDK
        fatal("couldn't find the JVMCI internal JDK")

    if len(matches) == 1:
        return os.path.join(base_dir, matches[0], "product", "bin", "java")
    elif len(matches) > 1:
        raise Exception("Found more than one jvmci internal jdk in %s" % base_dir)
    else:
        raise Exception("couldn't locate jvmci internal jdk in %s" % base_dir)
Exemplo n.º 10
0
def test_fatal(capsys, caplog):
    msg = "example text"
    with pytest.raises(FatalKrunError):
        fatal(msg)
    out, err = capsys.readouterr()
    assert out == ""
    assert msg in caplog.text
Exemplo n.º 11
0
    def wait_for_temperature_sensors(self, testing=False):
        """A polling loop waiting for temperature sensors to return (close) to
        their starting values.

        When 'testing' is True, only one iteration of the wait loop will
        run (used only in unit tests)."""

        if self.developer_mode:
            warn("Not waiting for temperature sensors due to developer mode")
            return

        if not testing:
            bail_out_time = time.clock() + self.TEMP_WAIT_SECS_BEFORE_GIVEUP
        else:
            bail_out_time = 0  # force only one iteration

        while True:
            flag, reason = self.temp_sensors_within_interval()

            if flag == self.TEMP_OK:
                break
            elif flag == self.TEMP_TOO_HOT:
                time.sleep(1)
            elif flag == self.TEMP_TOO_COLD:
                # This takes a variable amount of time, but on a modern
                # machine it takes only a fraction of a second.
                util.make_heat()

            if time.clock() >= bail_out_time:
                break

        if flag != self.TEMP_OK:
            fatal("Temperature timeout: %s" % reason)
Exemplo n.º 12
0
    def sanity_checks(self):
        JavaVMDef.sanity_checks(self)

        if not self.vm_path != "mx":
            fatal("Graal's vm_path should be a path to an 'mx' script")

        self._check_jvmci_server_enabled()
Exemplo n.º 13
0
def test_fatal(capsys, caplog):
    caplog.setLevel(logging.ERROR)
    msg = "example text"
    with pytest.raises(FatalKrunError):
        fatal(msg)
    out, err = capsys.readouterr()
    assert out == ""
    assert msg in caplog.text()
Exemplo n.º 14
0
 def check_config_consistency(self, config_str, filename):
     import difflib
     if self.text != config_str:
         diff = "".join(difflib.unified_diff(
             self.text.splitlines(True), config_str.splitlines(True),
             self.filename, "<cached in %s>" % filename))
         fatal("The experiment is in an inconsistent state as the config"
               "file %s has changed since it was initially cached in %s"
               "\n%s" % (
                   self.filename, filename, diff))
Exemplo n.º 15
0
    def pin_process_args(self):
        """Pin to a set of adaptive tick CPUs.
        We are working the assumption that the kernel is NO_HZ_FULL_ALL meaning
        that all but the first CPU are in adaptive tick mode."""

        if self.num_cpus == 1:
            fatal("not enough CPUs to pin")

        cpus = ",".join([str(x) for x in xrange(1, self.num_cpus)])
        return ["taskset", "-c", cpus]
Exemplo n.º 16
0
def detect_platform(mailer):
    plat_name = sys.platform
    if plat_name.startswith("linux"):
        if os.path.exists("/etc/debian_version"):
            return DebianLinuxPlatform(mailer)
        else:
            fatal("Unknown Linux platform")
    elif plat_name.startswith("openbsd"):
        return OpenBSDPlatform(mailer)
    else:
        fatal("I don't have support for your platform")
Exemplo n.º 17
0
    def find_temperature_sensors(self):
        lines = self._get_sysctl_sensor_lines()
        sensors = []
        for line in lines.split("\n"):
            elems = line.split("=")

            if len(elems) != 2:
                fatal("Malformed sysctl line: '%s'" % line)

            sensors.append(elems[0].strip())
        self.temp_sensors = sensors
Exemplo n.º 18
0
    def _read_temperature_sensor(self, sid):
        try:
            sysfs_file = self.temp_sensor_map[sid]
        except KeyError:
            fatal("Failed to read sensor: %s (missing key)" % sid)

        try:
            with open(sysfs_file) as fh:
                return int(fh.read())
        except IOError:
            fatal("Failed to read sensor: %s at %s" % (sid, sysfs_file))
Exemplo n.º 19
0
    def starting_temperatures(self, readings_dct):
        """Sets the starting temperatures and automatically updates the
        temperature thresholds."""

        # Check consistency of sensors
        keys1 = list(sorted(readings_dct.keys()))
        keys2 = list(sorted(self.temp_sensors))
        if keys1 != keys2:
            fatal("Inconsistent sensors. %s vs %s" % \
                  (keys1, keys2))  # sensors moved between reboot?

        self._starting_temperatures = readings_dct
        debug("Set start temperatures: %s" % readings_dct)
Exemplo n.º 20
0
    def _get_num_cpus(self):
        err = False

        # most reliable method generic to all Linux
        out, _, rv = run_shell_cmd("grep -c ^processor  /proc/cpuinfo")
        if rv == 0:
            out = out.strip()
            try:
                return int(out)
            except ValueError:
                pass

        fatal("could not detect number of logical CPUs")
Exemplo n.º 21
0
    def sync_disks(self):
        """Force pending I/O to physical disks"""

        debug("sync disks...")
        rc = subprocess.call("/bin/sync")
        if rc != 0:
            fatal("sync failed")

        # The OpenBSD manual says: "sync() [the system call] may return before
        # the buffers are completely flushed.", and the sync command is merely
        # a thin wrapper around the syscall. We wait a while. We have reports
        # that the sync command itself can take up to 10 seconds.
        time.sleep(SYNC_SLEEP_SECS)
Exemplo n.º 22
0
    def _run_exec_capture(self, child_pipe):
        """Allows the subprocess (whose pipes we have handles on) to run
        to completion. We print stderr as it arrives.

        Returns a triple: stderr, stdout and the returncode."""

        # Get raw OS-level file descriptors
        stderr_fd, stdout_fd = \
            child_pipe.stderr.fileno(), child_pipe.stdout.fileno()

        # Ensure both fds are unbuffered.
        # stderr should already be, but it doesn't hurt to force it.
        for f in [stderr_fd, stdout_fd]:
            self.platform.unbuffer_fd(f)

        stderr_data, stdout_data = [], []
        stderr_consumer = print_stderr_linewise(info)
        stderr_consumer.next() # start the generator

        open_fds = [stderr_fd, stdout_fd]
        while open_fds:
            ready = select.select(open_fds, [], [], SELECT_TIMEOUT)

            if stdout_fd in ready[0]:
                d = os.read(stdout_fd, PIPE_BUF_SZ)
                if d == "":  # EOF
                    open_fds.remove(stdout_fd)
                else:
                    stdout_data.append(d)

            if stderr_fd in ready[0]:
                d = os.read(stderr_fd, PIPE_BUF_SZ)
                if d == "":  # EOF
                    open_fds.remove(stderr_fd)
                else:
                    stderr_data.append(d)
                    stderr_consumer.send(d)
        # We know stderr and stdout are closed.
        # Now we are just waiting for the process to exit, which may have
        # already happened of course.
        try:
            child_pipe.wait()
        except Exception as e:
            fatal("wait() failed on child pipe: %s" % str(e))

        assert child_pipe.returncode is not None

        stderr = "".join(stderr_data)
        stdout = "".join(stdout_data)

        return stdout, stderr, child_pipe.returncode
Exemplo n.º 23
0
    def _fatal_kernel_arg(self, arg, prefix="", suffix=""):
        """Debian specific advice on adding/changing a kernel arg"""

        if prefix != "":
            prefix += "\n"

        if suffix != "":
            suffix += "\n"

        fatal("%s"
              "Set `%s` in the kernel arguments.\n"
              "To do this on Debian:\n"
              "  * Edit /etc/default/grub\n"
              "  * Amend GRUB_CMDLINE_LINUX_DEFAULT\n"
              "  * Run `sudo update-grub`\n"
              "%s" % (prefix, arg, suffix))
Exemplo n.º 24
0
    def _fatal_kernel_arg(self, arg, prefix, suffix):
        """Bail out and inform user how to add a kernel argument"""

        # This is generic Linux advice.
        # If you can offer better distribution-specific advice, override this
        # in a more specific Linux subclass.

        if prefix != "":
            prefix += "\n"

        if suffix != "":
            suffix += "\n"

        fatal("%s"
              "Set `%s` in the kernel arguments.\n"
              "%s" % (prefix, arg, suffix))
Exemplo n.º 25
0
    def _check_perf_samplerate(self):
        """Attempt to minimise time spent by the Linux perf kernel profiler.
        You can't disable this, so the best we can do is set the sample
        rate to the minimum value of one sample per second."""

        with open(LinuxPlatform.PERF_SAMPLE_RATE) as fh:
            sr = int(fh.read().strip())

        if sr != 1:
            cmd = "%s sh -c 'echo 1 > %s'" % \
                (self.change_user_cmd, LinuxPlatform.PERF_SAMPLE_RATE)
            stdout, stderr, rc = run_shell_cmd(cmd, failure_fatal=False)

            if rc != 0:
                fatal("perf profiler sample rate >1 p/s. "
                      "Krun was unable to adjust it.\nFailing command:\n  %s"
                      % cmd)
Exemplo n.º 26
0
 def _remove_previous_execs_from_schedule(self):
     for key in self.results.data:
         num_completed_jobs = self.results.jobs_completed(key)
         if num_completed_jobs > 0:
             try:
                 debug("%s has already been run %d times." %
                       (key, num_completed_jobs))
                 for _ in range(num_completed_jobs):
                     self.remove_job_by_key(key)
                     self.jobs_done += 1
             except JobMissingError as excn:
                 tup = (excn.key, self.config.filename,
                        self.config.results_filename())
                 msg = ("Failed to resume benchmarking session\n." +
                        "The execution %s appears in results " +
                        "file: %s, but not in config file: %s." % tup)
                 util.fatal(msg)
Exemplo n.º 27
0
 def _remove_previous_execs_from_schedule(self):
     for key in self.results.data:
         num_completed_jobs = self.results.jobs_completed(key)
         if num_completed_jobs > 0:
             try:
                 debug("%s has already been run %d times." %
                       (key, num_completed_jobs))
                 for _ in range(num_completed_jobs):
                     self.remove_job_by_key(key)
                     self.jobs_done += 1
             except JobMissingError as excn:
                 tup = (excn.key, self.config.filename,
                        self.config.results_filename())
                 msg = ("Failed to resume benchmarking session\n." +
                        "The execution %s appears in results " +
                        "file: %s, but not in config file: %s." % tup)
                 util.fatal(msg)
Exemplo n.º 28
0
    def read_from_file(self, config_file):
        assert config_file.endswith(".krun")
        config_dict = {}
        try:
            execfile(config_file, config_dict)
        except Exception as e:
            error("error importing config file:\n%s" % str(e))
            raise

        for key in CHECK_FIELDS:
            if key not in config_dict:
                fatal("Config file is missing a %s" % key)

        self.__dict__.update(config_dict)
        self.filename = config_file
        with open(config_file, "r") as fp:
            self.text = fp.read()
Exemplo n.º 29
0
Arquivo: config.py Projeto: bennn/krun
    def read_from_file(self, config_file):
        assert config_file.endswith(".krun")
        config_dict = {}
        try:
            execfile(config_file, config_dict)
        except Exception as e:
            error("error importing config file:\n%s" % str(e))
            raise

        for key in CHECK_FIELDS:
            if key not in config_dict:
                fatal("Config file is missing a %s" % key)

        self.__dict__.update(config_dict)
        self.filename = config_file
        with open(config_file, "r") as fp:
            self.text = fp.read()
Exemplo n.º 30
0
Arquivo: krun.py Projeto: bennn/krun
def setup_logging(parser):
    # Colours help to distinguish benchmark stderr from messages printed
    # by the runner. We also print warnings and errors in red so that it
    # is quite impossible to miss them.
    args = parser.parse_args()

    # We default to "info" level, user can change by passing
    # in a different argument to --debug on the command line.
    level_str = args.debug_level.upper()
    if level_str not in ("DEBUG", "INFO", "WARN", "DEBUG", "CRITICAL", "ERROR"):
        util.fatal("Bad debug level: %s" % level_str)

    level = getattr(logging, level_str.upper())

    logging.root.setLevel(level)
    stream = logging.StreamHandler()
    stream.setLevel(level)
    stream.setFormatter(CONSOLE_FORMATTER)
    logging.root.handlers = [stream]
Exemplo n.º 31
0
    def strip_results(self, key_spec):
        debug("Strip results: %s" % key_spec)

        spec_elems = key_spec.split(":")
        if len(spec_elems) != 3:
            fatal("malformed key spec: %s" % key_spec)

        new_data = self.data.copy()
        removed_keys = 0
        removed_execs = 0

        # We have to keep track of how many executions have run successfully so
        # that we can set self.reboots accordingly. It's not correct to simply
        # deduct one for each execution we remove, as the reboots value is one
        # higher due to the initial reboot. Bear in mind the user may strip
        # several result keys in succession, so counting the completed
        # executions is the only safe way.
        completed_execs = 0

        for key in self.data.iterkeys():
            key_elems = key.split(":")
            # deal with wildcards
            for i in xrange(3):
                if spec_elems[i] == "*":
                    key_elems[i] = "*"

            # decide whether to remove
            if key_elems == spec_elems:
                removed_keys += 1
                removed_execs += len(new_data[key])
                new_data[key] = []
                self.eta_estimates[key] = []
                info("Removed results for: %s" % key)
            else:
                completed_execs += len(new_data[key])

        self.data = new_data

        # If the results were collected with reboot mode, update reboots count
        if self.reboots != 0:
            self.reboots = completed_execs

        return removed_keys
Exemplo n.º 32
0
    def _check_aslr_disabled(self):
        debug("Checking ASLR is off")
        with open(self.ASLR_FILE, "r") as fh:
                enabled = fh.read().strip()
        if enabled == "0":
            return  # OK!
        else:
            # ASLR is off, but we can try to enable it
            debug("Turning ASLR off")
            cmd = "%s sh -c 'echo 0 > %s'" % \
                (self.change_user_cmd, self.ASLR_FILE)
            stdout, stderr, rc = run_shell_cmd(cmd, failure_fatal=False)

            if rc != 0:
                msg = "ASLR disabled (%s, expect '0' got '%s').\n" % \
                    (self.ASLR_FILE, enabled)
                msg += "Krun tried to turn it off, but failed."
                fatal(msg)
            else:
                self._check_aslr_disabled()  # should work this time
Exemplo n.º 33
0
    def take_temperature_readings(self):
        readings = {}
        for sensor in self.temp_sensors:
            line = self._raw_read_temperature_sensor(sensor)

            elems = line.split("=")

            if len(elems) != 2:
                fatal("Failed to read sensor: '%s'. "
                      "Malformed sysctl output: %s" % (sensor, line))

            k, v = elems
            v_elems = [x.strip() for x in v.split(" ")]
            k = k.strip()
            assert k == sensor

            # Typically the value element looks like:
            # "49.00 degC" or "48.00 degC (zone temperature)"
            # We will only concern ourself with the first two elements.
            # Notice that the values are already reported in degrees
            # centigrade, so we don't have to process them.
            if len(v_elems) < 2 or v_elems[1] != "degC":
                fatal("Failed to read sensor: '%s'. "
                      "Odd non-degC value: '%s'" % (k, v))

            try:
                temp_val = float(v_elems[0])
            except ValueError:
                fatal("Failed to read sensor %s. "
                      "Non-numeric value: '%s'" % (k, v_elems[0]))

            readings[k] = temp_val

        return readings
Exemplo n.º 34
0
    def next_exec_key_index(self):
        """Returns the sequential process execution index into the ordered list
        of all process executions sharing the same 'bench:vm:variant' key.

        Although this could have been done at `_parse()` time, it would require
        a (variable sized) `dict` since we don't know which key we will be
        counting for until we find the first outstanding (O) record.

        Instead, this method does a pass over the manifest searching for
        records whose key is `self.next_exec_key`.

        This function assumes that there is at least one outstanding job (O
        line) in the manifest. If there is not, it will raise `FatalKrunError`.
        """

        fh = self._open()
        lines = iter(fh)
        count = 0

        # Skip header
        for line in lines:
            strip_line = line.strip()
            if strip_line == "keys":
                break
        else:
            util.fatal("Manifest is missing a body")

        # Now count the number of matching keys until the first 'O'
        # (outstanding) record.
        for line in lines:
            flag, key = line.strip().split()
            if key == self.next_exec_key:
                if flag == "O":
                    break
                count += 1
        else:
            util.fatal("Manifest ended unexpectedly")

        return count
Exemplo n.º 35
0
def find_internal_jvmci_java_home(base_dir):
    """
    The jvmci jdk8 is named according to the JVM that was used to build it.

    Point this function at the 'jvmci' dir and it will return the JAVA_HOME for
    the jvmci JDK.

    arguments:
    base_dir -- base jvmci directory"""

    try:
        matches = fnmatch.filter(os.listdir(base_dir), 'jdk1.8*')
    except OSError:
        # we didn't find an internal JDK
        fatal("couldn't find the JVMCI internal JDK")

    if len(matches) == 1:
        return os.path.join(base_dir, matches[0], "product")
    elif len(matches) > 1:
        raise Exception("Found more than one jvmci internal jdk in %s" % base_dir)
    else:
        raise Exception("couldn't locate jvmci internal jdk in %s" % base_dir)
Exemplo n.º 36
0
Arquivo: krun.py Projeto: bennn/krun
def main(parser):
    args = parser.parse_args()

    if args.dump is not None:
        if not args.filename.endswith(".json.bz2"):
            usage(parser)
        else:
            results = Results(None, None, results_file=args.filename)
            if args.dump == "config" or "audit":
                text = unicode(results.__getattribute__(args.dump))
            else:
                text = json.dumps(results.__getattribute__(args.dump),
                                  sort_keys=True, indent=2)
            # String data read in from JSON are unicode objects. This matters
            # for us as some data in the audit includes unicode characters.
            # If it does, a simple print no longer suffices if the system
            # locale is (e.g.) ASCII. In this case print will raise an
            # exception. The correct thing to do is to encode() the unicode to
            # the system locale.
            print(text.encode(locale.getpreferredencoding()))
            sys.exit(0)

    if not args.filename.endswith(".krun"):
        usage(parser)

    try:
        if os.stat(args.filename).st_size <= 0:
            util.fatal('Krun configuration file %s is empty.' % args.filename)
    except OSError:
        util.fatal('Krun configuration file %s does not exist.' % args.filename)

    config = Config(args.filename)

    if args.info:
        # Info mode doesn't run the experiment.
        # Just prints some metrics and exits.
        util.print_session_info(config)
        return

    if args.strip_results:
        util.strip_results(config, args.strip_results)
        return

    attach_log_file(config, args.resume)
    debug("Krun invoked with arguments: %s" % sys.argv)

    mail_recipients = config.MAIL_TO
    if type(mail_recipients) is not list:
        util.fatal("MAIL_TO config should be a list")

    mailer = Mailer(mail_recipients, max_mails=config.MAX_MAILS)

    try:
        inner_main(mailer, config, args)
    except util.FatalKrunError as e:
        subject = "Fatal Krun Exception"
        mailer.send(subject, e.args[0], bypass_limiter=True)
        util.run_shell_cmd_list(config.POST_EXECUTION_CMDS)
        raise e
Exemplo n.º 37
0
def find_internal_jvmci_java_home(base_dir):
    """
    The jvmci jdk8 is named according to the JVM that was used to build it.

    Point this function at the 'jvmci' dir and it will return the JAVA_HOME for
    the jvmci JDK.

    arguments:
    base_dir -- base jvmci directory"""

    try:
        matches = fnmatch.filter(os.listdir(base_dir), 'jdk1.8*')
    except OSError:
        # we didn't find an internal JDK
        fatal("couldn't find the JVMCI internal JDK")

    if len(matches) == 1:
        return os.path.join(base_dir, matches[0], "product")
    elif len(matches) > 1:
        raise Exception("Found more than one jvmci internal jdk in %s" % base_dir)
    else:
        raise Exception("couldn't locate jvmci internal jdk in %s" % base_dir)
Exemplo n.º 38
0
    def _check_realtime_throttle_disabled(self):
        """Linux kernel gets pretty upset if you run a CPU intensive thread
        under the real-time thread schedule policy. By default Linux will
        artificially pre-empt such threads to give other things a chance to run
        on this core. A switch will flip at runtime leaving a message in dmesg
        when this comes into effect.

        See the "Limiting the CPU usage of real-time and deadline processes"
        section in sched(7).

        We don't want "throttling" on the benchmarking cores.

        From sched(7):

        "Specifying [sched_rt_runtime_us] -1 makes the runtime the same as the
        period; that is, no CPU time is set aside for non-real-time processes."
        """

        debug("Check real-time thread throttling is off")

        for itr in xrange(2):
            with open(LinuxPlatform.SCHED_RT_RUNTIME_US) as fh:
                val = fh.read().strip()

            if val != "-1":
                if itr == 0:
                    debug("%s is not -1, adjusting." % LinuxPlatform.SCHED_RT_RUNTIME_US)

                    # Needs to happen as root
                    args = self.change_user_args() +  \
                        ["sh", "-c",
                         "'echo -1 > %s'" % LinuxPlatform.SCHED_RT_RUNTIME_US]

                    cmd = " ".join(args)
                    run_shell_cmd(cmd)
                else:
                    fatal("Could not set %s to -1" %
                          LinuxPlatform.SCHED_RT_RUNTIME_US)
Exemplo n.º 39
0
    def __init__(self, iterations_runner, env=None, instrument=False):
        self.iterations_runner = iterations_runner

        # List of EnvChange instances to apply prior to each experiment.
        # These should be benchmark agnostic. Look elsewhere for
        # environment changes specific to a benchmark.
        self.common_env_changes = []

        # The user can pass in a dict to influence the environment.
        #
        # These variables are *prepended* to any coinsiding environment that
        # Krun has set to run benchmarks. E.g. If Krun wants to set
        # LD_LIBRARY_PATH=/opt/pypy/pypy/goal, and the user passes down
        # {"LD_LIBRARY_PATH": "/wibble/lib"}, the resulting environment is
        # LD_LIBRARY_PATH=/wibble/lib:/opt/pypy/pypy/goal
        #
        # This is useful, for example, if the user built their own GCC
        # and needs to force the LD_LIBRARY_PATH.
        if env is not None:
            if not isinstance(env, dict):
                fatal("'env' argument for VM defs should be a dict")
            for k, v in env.iteritems():
                self.add_env_change(EnvChangeAppend(k, v))

        # tempting as it is to add a self.vm_path, we don't. If we were to add
        # natively compiled languages, then there is no "VM" to speak of.

        # These are set later
        self.platform = None
        self.config = None

        # Do not execute the benchmark program
        # (useful for testing configurations.).
        self.dry_run = False

        self.instrument = instrument
Exemplo n.º 40
0
    def _reboot(self):
        self.results.reboots += 1
        debug("About to execute reboot: %g, expecting %g in total." %
              (self.results.reboots, self.expected_reboots))
        # Dump the results file. This may already have been done, but we
        # have changed self.nreboots, which needs to be written out.
        self.results.write_to_file()

        if self.results.reboots > self.expected_reboots:
            util.fatal(("HALTING now to prevent an infinite reboot loop: " +
                        "INVARIANT num_reboots <= num_jobs violated. " +
                        "Krun was about to execute reboot number: %g. " +
                        "%g jobs have been completed, %g are left to go.") %
                       (self.results.reboots, self.jobs_done, len(self)))
        if self.dry_run:
            info("SIMULATED: reboot (restarting Krun in-place)")
            args =  sys.argv
            if not self.started_by_init:
                args.extend(["--resume", "--started-by-init"])
                debug("Simulated reboot with args: " + " ".join(args))
            os.execv(args[0], args)  # replace myself
            assert False  # unreachable
        else:
            subprocess.call(self.platform.get_reboot_cmd())
Exemplo n.º 41
0
    def _check_cpu_scaler(self):
        """Check the correct CPU scaler is in effect"""

        for cpu_n in xrange(self.num_cpus):
            # Check CPU scaler
            debug("Checking CPU scaler for CPU%d" % cpu_n)
            with open(LinuxPlatform.CPU_SCALER_FMT % cpu_n, "r") as fh:
                v = fh.read().strip()

            if v != "acpi-cpufreq":
                if v == "intel_pstate":
                    scaler_files = [ "  * " + LinuxPlatform.CPU_SCALER_FMT % x for
                                    x in xrange(self.num_cpus)]
                    self._fatal_kernel_arg(
                        "intel_pstate=disable",
                        "The kernel is using 'intel_pstate' for scaling instead of 'acpi-cpufreq.",
                        "When the system comes up, check the following "
                        "files contain 'acpi-cpufreq':\n%s"
                        % "\n".join(scaler_files))
                else:
                    fatal("The kernel is using '%s' for CPU scaling instead "
                          "of using 'acpi-cpufreq'" % v)

        # Check "turbo boost" is disabled
        # It really should be, as turbo boost is only available using pstates,
        # and the code above is ensuring we are not. Let's check anyway.
        debug("Checking 'turbo boost' is disabled")
        if os.path.exists(LinuxPlatform.TURBO_DISABLED):
            with open(LinuxPlatform.TURBO_DISABLED) as fh:
                v = int(fh.read().strip())

            if v != 1:
                fatal("Machine has 'turbo boost' enabled. "
                      "This should not happen, as this feature only applies to "
                      "pstate CPU scaling and Krun just determined that "
                      "the system is not!")