Esempio n. 1
0
def filter_command(args):
    """Implements the 'filter' command for the 'wult' and 'ndl' tools."""

    res = RORawResult.RORawResult(args.respath)

    if args.list_columns:
        for colname in res.colnames:
            _LOG.info("%s: %s", colname, res.defs.info[colname]["title"])
        return

    if not getattr(args, "oargs", None):
        raise Error("please, specify at least one reduction criteria.")
    if args.reportid and not args.outdir:
        raise Error("'--reportid' can be used only with '-o'/'--outdir'")

    if args.human_readable and args.outdir:
        raise Error("'--human-readable' and '--outdir' are mutually exclusive")

    apply_filters(args, res)

    if args.outdir:
        res.save(args.outdir, reportid=args.reportid)
    elif not args.human_readable:
        res.df.to_csv(sys.stdout, index=False, header=True)
    else:
        for idx, (_, dp) in enumerate(res.df.iterrows()):
            if idx > 0:
                _LOG.info("")
            _LOG.info(Human.dict2str(dict(dp)))
Esempio n. 2
0
    def _send_command(self, cmd, arg=None):
        """Send a command to 'stats-collect', verify and return the response."""

        if arg:
            cmd += " " + arg

        sc_str = f"'stats-collect' at {self._sc_id}"
        check_log_msg = f"Check 'stats-collect' log file{self._proc.hostmsg}':\n{self._logpath}"

        _LOG.debug("sending the following command to %s:\n%s", sc_str, cmd)

        try:
            self._send_msg(cmd)
        except (Error, socket.error) as err:
            raise Error(f"failed to send the following message to "
                        f"{sc_str}:\n{cmd}\n{err}\n{check_log_msg}") from err

        try:
            msg = self._recv_msg()
        except (Error, socket.error) as err:
            raise Error(
                f"failed receiving the reply to the following command from {sc_str}: "
                f"{cmd}\n{err}\n{check_log_msg}") from err

        if msg == "OK":
            return None

        if msg.startswith("OK "):
            return msg[3:]

        raise SCReplyError(
            f"{sc_str} did not respond with 'OK' to the following command:\n{cmd}"
            f"\nInstead, the response was the following:\n{msg}\n{check_log_msg}"
        )
Esempio n. 3
0
    def save(self, dirpath, reportid=None):
        """
        Save the test result at path 'dirpath', optionally change the report ID with 'reportid'.
        """

        dirpath = Path(dirpath)
        if not dirpath.exists():
            _LOG.debug("creating directory '%s", dirpath)
            try:
                dirpath.mkdir(parents=True, exist_ok=False)
            except OSError as err:
                raise Error(f"failed to create directory '{dirpath}':\n{err}") from None
        elif not dirpath.is_dir():
            raise Error(f"path '{dirpath}' exists and it is not a directory")

        if reportid:
            info = self.info.copy()
            info["reportid"] = reportid
        else:
            info = self.info

        path = dirpath.joinpath(self.info_path.name)
        YAML.dump(info, path)

        path = dirpath.joinpath(self.dp_path.name)
        self.df.to_csv(path, index=False, header=True)
Esempio n. 4
0
    def _validate_sut(self):
        """Check the SUT to insure we have everything to measure it."""

        # Make sure a supported idle driver is in use.
        path = Path("/sys/devices/system/cpu/cpuidle/current_driver")
        with self._proc.open(path, "r") as fobj:
            drvname = fobj.read().strip()

        if drvname == "none":
            errmsg = f"no idle driver in use{self._proc.hostmsg}"
            try:
                cmdline = self._get_cmdline()
            except Error as err:
                raise Error(errmsg) from err

            idleoption = [item for item in cmdline.split() if "idle=" in item]
            if idleoption:
                errmsg += f". Your system uses the '{idleoption[0]}' kernel boot parameter, try " \
                          f"removing it."
            raise Error(errmsg)

        supported = ("intel_idle", "acpi_idle")
        if drvname not in supported:
            supported = ", ".join(supported)
            raise Error(
                f"unsupported idle driver '{drvname}'{self._proc.hostmsg},\n"
                f"only the following drivers are supported: {supported}")
Esempio n. 5
0
def get_kver_bin(path, split=False, proc=None):
    """
    Get version of a kernel binary at 'path'. The 'split' and 'proc' arguments are the same as in
    'get_kver()'.
    """

    if not proc:
        proc = Procs.Proc()

    cmd = f"file -- {path}"
    stdout = proc.run_verify(cmd)[0].strip()

    msg = f"ran this command: {cmd}, got output:\n{stdout}"

    matchobj = re.match(r".* Linux kernel.* executable .*", stdout)
    if not matchobj:
        raise Error(f"file at '{path}'{proc.hostmsg} is not a Linux kernel binary file\n{msg}")

    matchobj = re.match(r".* version ([^ ]+) .*", stdout)
    if not matchobj:
        raise Error(f"{msg}\nFailed to find kernel version in the output.")

    kver = matchobj.group(1)
    if split:
        return split_kver(kver)
    return kver
Esempio n. 6
0
    def configure(self):
        """Configure the ETF qdisc."""

        _LOG.debug("setting up ETF qdisc with handover delta %d nanoseconds",
                   self._handover_delta)

        stdout, _ = self._proc.run_verify("%s -V" % self._tc_bin)
        match = re.match(r"^tc utility, iproute2-(ss)?(.*)$", stdout.strip())
        if not match:
            raise Error(
                f"failed to parse version number of the 'tc' tool{self._proc.hostmsg}"
            )

        # 'tc' version numbering changed from date based (e.g. "tc utility, iproute2-ss180129") to
        # regular version numbering corresponding to kernel version (e.g. "tc utility,
        # iproute2-5.8.0") Any version with new style is new enough.
        if match.group(1) == "ss" and int(match.group(2)) < 181023:
            raise Error(self._old_tc_err_msg)

        self.reset_root_qdisc()

        cmd = f"{self._tc_bin} qdisc replace dev {self._ifname} parent root handle 100 mqprio " \
              f"num_tc 3 map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 0"
        self._run_tc_cmd(cmd)

        cmd = f"{self._tc_bin} qdisc add dev {self._ifname} parent 100:1 etf offload clockid " \
              f"CLOCK_TAI delta {self._handover_delta}"
        self._run_tc_cmd(cmd)

        # Here is the behavior we observed in kernel version 4.19: resetting the qdisc resets the
        # NIC, and the carrier disappears for some time. Let's wait for it to appear.
        _LOG.debug("waiting for carrier on network interface '%s'%s",
                   self._ifname, self._proc.hostmsg)
        self._netif.wait_for_carrier(10)
Esempio n. 7
0
def open_raw_results(respaths, toolname, reportids=None, reportid_additional_chars=None):
    """
    Opens the input raw test results, and returns the list of 'RORawResult' objects.
      * respaths - list of paths to raw results.
      * toolname - name of the tool opening raw results.
      * reportids - list of reportids to override report IDs in raw results.
      * reportid_additional_chars - string of characters allowed in report ID on top of default
                                    characters.
    """

    if reportids:
        reportids = Trivial.split_csv_line(reportids)
    else:
        reportids = []

    if len(reportids) > len(respaths):
        raise Error(f"there are {len(reportids)} report IDs to assign to {len(respaths)} input "
                    f"test results. Please, provide {len(respaths)} or less report IDs.")

    # Append the required amount of 'None's to make the 'reportids' list be of the same length as
    # the 'respaths' list.
    reportids += [None] * (len(respaths) - len(reportids))

    rsts = []
    for respath, reportid in zip(respaths, reportids):
        if reportid:
            ReportID.validate_reportid(reportid, additional_chars=reportid_additional_chars)

        res = RORawResult.RORawResult(respath, reportid=reportid)
        if toolname != res.info["toolname"]:
            raise Error(f"cannot generate '{toolname}' report, results are collected with the"
                        f"'{res.info['toolname']}':\n{respath}")
        rsts.append(res)

    return rsts
Esempio n. 8
0
    def _read_stats_file(self, path):
        """
        Returns a pandas DataFrame containing the data stored in the raw AC Power statistics CSV
        file at 'path'.
        """

        sdf = pandas.DataFrame()

        try:
            # 'skipfooter' parameter only available with Python pandas engine.
            sdf = pandas.read_csv(path, skipfooter=1, engine="python")
        except pandas.errors.ParserError as err:
            raise Error(f"unable to parse CSV '{path}': {err}.") from None

        # Confirm that the time column name is in the CSV headers.
        if self._time_colname not in sdf:
            raise Error(
                f"column '{self._time_colname}' not found in statistics file '{path}'."
            )

        # Convert Time column from time since epoch to time since the first data point was recorded.
        sdf[self._time_colname] = sdf[self._time_colname] - sdf[
            self._time_colname][0]

        return sdf
Esempio n. 9
0
def _validate_range(rng, what, single_ok):
    """Implements 'parse_ldist()'."""

    if single_ok:
        min_len = 1
    else:
        min_len = 2

    split_rng = Trivial.split_csv_line(rng)

    if len(split_rng) < min_len:
        raise Error(f"bad {what} range '{rng}', it should include {min_len} numbers")
    if len(split_rng) > 2:
        raise Error(f"bad {what} range '{rng}', it should not include more than 2 numbers")

    vals = [None] * len(split_rng)

    for idx, val in enumerate(split_rng):
        vals[idx] = Human.parse_duration_ns(val, default_unit="us", name=what)
        if vals[idx] < 0:
            raise Error(f"bad {what} value '{split_rng[idx]}', should be greater than zero")

    if len(vals) == 2 and vals[1] - vals[0] < 0:
        raise Error(f"bad {what} range '{rng}', first number cannot be greater than the second "
                    f"number")
    if len(vals) == 1:
        vals.append(vals[0])

    return vals
Esempio n. 10
0
    def prepare(self):
        """Prepare to start measurements."""

        # Ensure the kernel is fresh enough.
        kver = KernelVersion.get_kver(proc=self._proc)
        if KernelVersion.kver_lt(kver, "5.1-rc1"):
            raise Error(
                f"version of the running kernel{self._proc.hostmsg} is {kver}, but it "
                f"does not support the ETF qdisc.\nPlease, use kernel version 5.1 or "
                f"newer")

        try:
            self._nmcli = _Nmcli.Nmcli(proc=self._proc)
        except ErrorNotSupported:
            pass
        else:
            # We have to configure the I210 network interface in a special way, but if it is managed
            # by NetworkManager, the configuration may get reset at any point. Therefore, detach the
            # network interface from NetworkManager.
            _LOG.info("Detaching network interface '%s' from NetworkManager%s",
                      self._ifname, self._proc.hostmsg)
            self._nmcli.unmanage(self._ifname)

        # Ensure the interface exists and has carrier. It must be brought up before we can check the
        # carrier status.
        self._netif.up()
        self._netif.wait_for_carrier(10)

        # Make sure the network interface has an IP address.
        ipaddr = self._netif.get_ipv4_addr(default=None)
        if ipaddr:
            _LOG.debug("network interface '%s'%s has IP address '%s'",
                       self._ifname, self._proc.hostmsg, ipaddr)
        else:
            ipaddr = self._netif.get_unique_ipv4_addr()
            ipaddr += "/16"
            self._netif.set_ipv4_addr(ipaddr)
            # Ensure the IP was set.
            self._netif.get_ipv4_addr()
            _LOG.info("Assigned IP address '%s' to interface '%s'%s", ipaddr,
                      self._ifname, self._proc.hostmsg)

        self._drv.load(unload=True, opts=f"ifname={self._ifname}")

        # We use the ETF qdisc for scheduling delayed network packets. Configure it and start the
        # 'phc2sys' process in background in order to keep the host and NIC clocks in sync.

        # Get the TAI offset first.
        stdout, _ = self._proc.run_verify(
            f"{self._ndlrunner_bin} --tai-offset")
        tai_offset = self._get_line(prefix="TAI offset", line=stdout)
        if not Trivial.is_int(tai_offset):
            raise Error(
                f"unexpected 'ndlrunner --tai-offset' output:\n{stdout}")

        _LOG.info("Configuring the ETF qdisc%s", self._proc.hostmsg)
        self._etfqdisc.configure()
        _LOG.info("Starting NIC-to-system clock synchronization process%s",
                  self._proc.hostmsg)
        self._etfqdisc.start_phc2sys(tai_offset=int(tai_offset))
Esempio n. 11
0
    def _get_lines(self):
        """This generator to reads the 'ndlrunner' helper output and yields it line by line."""

        timeout = 1.0 + self._ldist[1] / 1000000000

        while True:
            stdout, stderr, exitcode = self._ndlrunner.wait_for_cmd(
                timeout=timeout, lines=[16, None], join=False)
            if exitcode is not None:
                msg = self._ndlrunner.cmd_failed_msg(stdout, stderr, exitcode,
                                                     timeout)
                raise Error(
                    f"{self._ndlrunner_error_prefix()} has exited unexpectedly\n{msg}"
                )
            if stderr:
                raise Error(
                    f"{self._ndlrunner_error_prefix()} printed an error message:\n"
                    f"{''.join(stderr)}")
            if not stdout:
                raise Error(
                    f"{self._ndlrunner_error_prefix()} did not provide any output for "
                    f"{timeout} seconds")

            for line in stdout:
                yield line
Esempio n. 12
0
def validate_reportid(reportid, additional_chars=None, default=_RAISE):
    """
    We limit the characters which can be used in report IDs to those which are safe to use in URLs,
    and this function validates a report ID in 'reportid' against the allowed set of characters. The
    characters are ACSII alphanumeric characters, "-", ".", "_", and "~". The 'additional_chars'
    argument is the same as in 'get_charset_descr()'.

    By default this function raises an exception if 'reportid' is invalid, but if the 'default'
    argument is provided, the 'default' value is returned instead.
    """

    if len(reportid) > MAX_REPORID_LEN:
        if default is _RAISE:
            raise Error(
                f"too long run ID ({len(reportid)} characters), the maximum allowed length "
                f"is {MAX_REPORID_LEN} characters")
        return default

    if not additional_chars:
        additional_chars = ""

    chars = SPECIAL_CHARS + additional_chars
    if not re.match(rf"^[A-Za-z0-9{chars}]+$", reportid):
        if default is _RAISE:
            charset_descr = get_charset_descr() + additional_chars
            raise Error(
                f"bad run ID '{reportid}'\n"
                f"Please, use only the following characters: {charset_descr}")
        return default

    return reportid
Esempio n. 13
0
    def bind(self, drvname):
        """Bind the PCI device to driver 'drvname'."""

        _LOG.debug("binding device '%s' to driver '%s'%s",
                   self._pci_info["pciaddr"], drvname, self._proc.hostmsg)

        failmsg = f"failed to bind device '{self._pci_info['pciaddr']}' to driver '{drvname}'" \
                  f"{self._proc.hostmsg}"

        drvpath = Path(f"/sys/bus/pci/drivers/{drvname}")
        if not FSHelpers.exists(drvpath, proc=self._proc):
            raise Error(
                f"{failmsg}':\npath '{drvpath}' does not exist{self._proc.hostmsg}"
            )

        cur_drvname = self.get_driver_name()
        if cur_drvname == drvname:
            _LOG.debug("device '%s' is already bound to driver '%s'%s",
                       self._pci_info["pciaddr"], drvname, self._proc.hostmsg)
            return

        if cur_drvname:
            raise Error(
                f"{failmsg}:\nit is already bound to driver '{cur_drvname}'")

        # At this point we do not know if the driver supports this PCI ID. So start with the
        # assumption that it does not, in which case writing to the 'new_id' file should do both:
        # * make the driver aware of the PCI ID
        # * bind the device
        path = f"{drvpath}/new_id"
        val = f"{self._pci_info['vendorid']} {self._pci_info['devid']}"
        bound = True

        try:
            with self._proc.open(path, "wt") as fobj:
                _LOG.debug("writing '%s' to file '%s'", val, path)
                fobj.write(val)
        except Error as err:
            bound = False

        if not bound:
            # Probably the driver already knows about this PCI ID. Use the 'bind' file in this case.
            path = f"{drvpath}/bind"
            val = self._pci_info["pciaddr"]
            with self._proc.open(path, "wt") as fobj:
                _LOG.debug("writing '%s' to file '%s'", val, path)
                try:
                    fobj.write(val)
                except Error as err:
                    raise Error(
                        f"{failmsg}:\n{err}\n{self.get_new_dmesg()}") from err

        # Verify that the device is bound to the driver.
        if not self._get_driver()[1]:
            raise Error(f"{failmsg}\n{self.get_new_dmesg()}")

        _LOG.debug("binded device '%s' to driver '%s'%s\n%s",
                   self._pci_info["pciaddr"], drvname, self._proc.hostmsg,
                   self.get_new_dmesg())
Esempio n. 14
0
    def __init__(self, dirpath, reportid=None):
        """
        The class constructor. The arguments are as follows.
          * dirpath - path to the directory containing the raw test result to open.
          * reportid - override the report ID of the raw test result: the 'reportid' string will be
                       used instead of the report ID stored in 'dirpath/info.yml'. Note, the
                       provided report ID is not verified, so the caller has to make sure is a sane
                       string.

        Note, the constructor does not load the potentially huge test result data into the memory.
        It only loads the 'info.yml' file and figures out the colum names list. The data are loaded
        "on-demand" by the 'load_df()' and other methods.
        """

        super().__init__(dirpath)

        # Check few special error cases upfront in order to provide a clear error message:
        # the info and datapoint files should exist and be non-empty.
        for name in ("info_path", "dp_path"):
            attr = getattr(self, name)
            try:
                if not attr.is_file():
                    raise ErrorNotFound(f"'{attr}' does not exist or it is not a regular file")
                if not attr.stat().st_size:
                    raise Error(f"file '{attr}' is empty")
            except OSError as err:
                raise Error(f"failed to access '{attr}': {err}") from err

        # Unknown columns in the CSV file.
        self._ignored_colnames = None

        self.df = None
        self.smrys = None
        self.colnames = []
        self.colnames_set = set()

        self.info = YAML.load(self.info_path)
        if reportid:
            # Note, we do not verify it here, the caller is supposed to verify.
            self.info["reportid"] = reportid
        if "reportid" not in self.info:
            raise ErrorNotSupported(f"no 'reportid' key found in {self.info_path}")
        self.reportid = self.info["reportid"]

        toolname = self.info.get("toolname")
        if not toolname:
            raise Error(f"bad '{self.info_path}' format - the 'toolname' key is missing")

        toolver = self.info.get("toolver")
        if not toolver:
            raise Error(f"bad '{self.info_path}' format - the 'toolver' key is missing")

        self.defs = Defs.Defs(self.info["toolname"])

        # All column names in the CSV file.
        self._read_colnames()
Esempio n. 15
0
    def __init__(self, reports, outdir, basedir, metric_name, metric_colname,
                 time_colname, defs):
        """
        The class constructor. Adding a stats tab will create a 'metricname' sub-directory and
        store plots and the summary table in it. Arguments are as follows:
         * reports - dictionary containing the statistics data for each report:
                     '{reportid: stats_df}'
         * outdir - the output directory in which to create the 'metricname' sub-directory.
         * basedir - base directory of the report. All paths should be made relative to this.
         * metric_name - name of the metric to create the tab for.
         * metric_colname - name of the column in the 'stats_df's which contains data for
                           'metricname'.
         * time_colname - name of the column in the 'stats_df's which represents the elpased time.
         * defs - dictionary containing the definitions for this metric.
        """

        # File system-friendly tab name.
        self.name = metric_name
        self._basedir = basedir
        self._outdir = outdir / self.name
        self.smry_path = self._outdir / "summary-table.txt"

        try:
            self._outdir.mkdir(parents=True, exist_ok=True)
        except OSError as err:
            raise Error(f"failed to create directory '{self._outdir}': {err}"
                        ) from None

        self._metric_defs = defs[self.name]
        self._metric_colname = metric_colname
        self._time_defs = defs["Time"]
        self._time_colname = time_colname
        self.title = self._metric_defs["title"]
        self.descr = self._metric_defs["descr"]
        self.smry_funcs = self._metric_defs["default_funcs"]

        # Reduce 'reports' to only the metric and time columns which are needed for this tab.
        self._reports = {}
        for reportid, df in reports.items():
            if self._metric_colname in df:
                self._reports[reportid] = df[[
                    self._metric_colname, self._time_colname
                ]].copy()

        if not self._reports:
            raise Error(
                f"failed to generate '{self.name}' tab: no data under column"
                f"'{self._metric_colname}' provided.")

        self._plots = []
        try:
            self._init_plots()
        except Exception as err:
            raise Error(f"failed to initialise plots: {err}") from None
Esempio n. 16
0
    def _validate_init_args(self):
        """Validate the class constructor input arguments."""

        if self.outdir.exists() and not self.outdir.is_dir():
            raise Error(f"path '{self.outdir}' already exists and it is not a directory")

        # Ensure that results are compatible.
        rname, rver = self._refinfo["toolname"], self._refinfo["toolver"]
        for res in self.rsts:
            name, ver = res.info["toolname"], res.info["toolver"]
            if name != rname:
                raise Error(f"the following test results are not compatible:\n"
                            f"1. {self._refres.dirpath}: created by '{rname}'\n"
                            f"2. {res.dirpath}: created by '{name}'\n"
                            f"Cannot put incompatible results to the same report")
            if ver != rver:
                _LOG.warning("the following test results may be not compatible:\n"
                             "1. %s: created by '%s' version '%s'\n"
                             "2. %s: created by '%s' version '%s'",
                             self._refres.dirpath, rname, rver, res.dirpath, name, ver)

        # Ensure the report IDs are unique.
        reportids = set()
        for res in self.rsts:
            reportid = res.reportid
            if reportid in reportids:
                # Try to construct a unique report ID.
                for idx in range(1, 20):
                    new_reportid = f"{reportid}-{idx:02}"
                    if new_reportid not in reportids:
                        _LOG.warning("duplicate reportid '%s', using '%s' instead",
                                     reportid, new_reportid)
                        res.reportid = new_reportid
                        break
                else:
                    raise Error(f"too many duplicate report IDs, e.g., '{reportid}' is problematic")

            reportids.add(res.reportid)

        if self.title_descr and Path(self.title_descr).is_file():
            try:
                with open(self.title_descr, "r", encoding="UTF-8") as fobj:
                    self.title_descr = fobj.read()
            except OSError as err:
                raise Error(f"failed to read the report description file {self.title_descr}:\n"
                            f"{err}") from err

        for res in self.rsts:
            if res.dirpath.resolve() == self.outdir.resolve():
                # Don't create report in results directory, use 'html-report' subdirectory instead.
                self.outdir = self.outdir.joinpath("html-report")
Esempio n. 17
0
    def _setup_stats_collect_ssh_forwarding(self):
        """
        This is a helper function for '_start_stats_collect()' which sets up an SSH forwarding
        between local host and the SUT.

        'Statsd' always listens on a Unix socket, which means that we cannot directly connect to it
        when 'stats-collect' runs on a remote host. Therefore, we create an SSH tunnel which will
        forward TCP stream between a local TCP port the remote Unix socket.
        """

        proc = self._proc
        self._ssht_port = RemoteHelpers.get_free_port()
        self._sc_id = f"{self._ssht_port}:{proc.hostname}:{self._uspath}"

        ssh_opts = proc.get_ssh_opts()
        cmd = f"ssh -L {self._ssht_port}:{self._uspath} -N {ssh_opts} {proc.hostname}"
        self._ssht = Procs.Proc().run_async(cmd)

        # Wait the tunnel to get established.
        start_time = time.time()
        timeout = max(proc.connection_timeout, 5)
        msg = f"failed to establish SSH tunnel between localhost and {proc.hostname} " \
              f"with this command:\n{cmd}"

        while time.time() - start_time <= timeout:
            _LOG.debug("trying to connect to localhost:%s", self._ssht_port)
            stdout, stderr, exitcode = self._ssht.wait_for_cmd(
                timeout=1, capture_output=True)  # pylint: disable=no-member

            if exitcode is not None:
                raise Error(
                    proc.cmd_failed_msg(cmd,
                                        stdout,
                                        stderr,
                                        exitcode,
                                        startmsg=msg))

            try:
                self._connect()
            except Error:
                pass
            else:
                self._disconnect()
                return

        raise Error(
            f"{msg}\nTried for {timeout} seconds, but could not connect to "
            f"localhost:{self._ssht_port}\nCheck '{self._logpath}'{proc.hostmsg} for "
            f"details")
Esempio n. 18
0
    def _load_df(self, force_reload=False, **kwargs):
        """
        Apply all the filters and selectors to 'self.df'. Load it from the datapoints CSV file if it
        has not been loaded yet. If 'force_reload' is 'True', always load 'self.df' from the CSV
        file.
        """

        rsel = self._get_rsel()
        csel = self._get_csel(self.colnames)

        load_csv = force_reload or self.df is None

        if not rsel:
            if load_csv:
                self._load_csv(usecols=csel, **kwargs)
            csel = None
        else:
            # We cannot drop columns yet, because rows selector may refer the columns.
            if load_csv:
                self._load_csv(**kwargs)

        if rsel:
            _LOG.debug("applying rows selector: %s", rsel)
            try:
                try:
                    expr = pandas.eval(rsel)
                except ValueError as err:
                    if "data type must provide an itemsize" in str(err):
                        # Some older versions of the default "numexpr" engine has a bug, and this is
                        # a workaround. We just try the "python" engine instead.
                        expr = pandas.eval(rsel, engine="python")
                    else:
                        raise
            except Exception as err:
                raise Error(f"failed to evaluate expression '{rsel}': {err}\nMake sure you use "
                            f"correct CSV column names, which are also case-sensitive.") from err

            self.df = self.df[expr].reset_index(drop=True)
            if self.df.empty:
                raise Error(f"no data left after applying row selector(s) to CSV file "
                            f"'{self.dp_path}'")

        if csel:
            _LOG.debug("applying columns selector: %s", csel)
            self.df = self.df[csel]
            if self.df.empty:
                raise Error(f"no data left after applying column selector(s) to CSV file "
                            f"'{self.dp_path}'")
Esempio n. 19
0
    def _build_csmap(self, rcsobj):
        """
        Wult driver supplies the requested C-state index. Build a dictionary mapping the index to
        C-state name.
        """

        close = False
        try:
            if rcsobj is None:
                rcsobj = CStates.ReqCStates(proc=self._proc)
                close = True
            csinfo = rcsobj.get_cpu_cstates_info(self._cpunum)
        finally:
            if close:
                rcsobj.close()

        # Check that there are idle states that we can measure.
        for info in csinfo.values():
            if not info["disable"]:
                break
        else:
            raise Error(
                f"no idle states are enabled on CPU {self._cpunum}{self._proc.hostmsg}"
            )

        self._csmap = {}
        for csname, cstate in csinfo.items():
            self._csmap[cstate["index"]] = csname
Esempio n. 20
0
    def get_unique_ipv4_addr(self):
        """
        Generate a random unique IPv4 address which does not belong to any network on the host.
        """

        max_attempts = 128
        for _ in range(max_attempts):
            ipaddr = ".".join([str(random.randint(0, 255)) for _ in range(4)])
            for ifname in _get_ifnames(self._proc):
                netif = NetIface(ifname, proc=self._proc)
                netinfo = netif.get_ip_info()

                if "ipv4" not in netinfo:
                    continue

                addrobj = ipaddress.IPv4Address(ipaddr)
                netobj = ipaddress.IPv4Network(netinfo["ipv4"]["cidr"])
                if addrobj in netobj:
                    _LOG.debug(
                        "IPv4 address '%s' belongs to interface '%s', retrying",
                        ipaddr, ifname)
                    break
            else:
                # We found the unique IP.
                return ipaddr

        raise Error(
            f"failed to find a random unique IP address for host '{self._proc.hostname}'"
        )
Esempio n. 21
0
    def deployable_not_found(what):
        """Called when a helper of driver was not found on the SUT to raise an exception."""

        err = f"{what} was not found on {proc.hostmsg}. Please, run:\n{toolname} deploy"
        if proc.is_remote:
            err += f" -H {proc.hostname}"
        raise Error(err)
Esempio n. 22
0
def find_processes(regex: str, proc=None):
    """
    Find all processes which match the 'regex' regular expression on the host defined by 'proc'. The
    regular expression is matched against the process executable name + command-line arguments.

    By default this function operates on the local host, but the 'proc' argument can be used to pass
    a connected 'SSH' object in which case this function will operate on the remote host.

    Returns a list of tuples containing the PID and the command line.
    """

    if not proc:
        proc = Procs.Proc()

    cmd = "ps axo pid,args"
    stdout, stderr = proc.run_verify(cmd, join=False)

    if len(stdout) < 2:
        raise Error(
            f"no processes found at all{proc.hostmsg}\nExecuted this command:\n{cmd}\n"
            f"stdout:\n{stdout}\nstderr:{stderr}\n")

    procs = []
    for line in stdout[1:]:
        pid, comm = line.strip().split(" ", 1)
        pid = int(pid)
        if proc.hostname == "localhost" and pid == Trivial.get_pid():
            continue
        if re.search(regex, comm):
            procs.append((int(pid), comm))

    return procs
Esempio n. 23
0
    def getlines(self):
        """
        Yield trace buffer lines one-by-one. Wait for a trace line for maximum 'timeout' seconds.
        """

        while True:
            stdout, stderr, exitcode = self._reader.wait_for_cmd(
                timeout=self.timeout, lines=[32, None], join=False)

            if not stdout and not stderr and exitcode is None:
                raise ErrorTimeOut(
                    f"no data in trace buffer for {self._reader.timeout} seconds"
                    f"{self._proc.hostmsg}")

            # The process has terminated or printed something to standard error.
            if exitcode is not None or stderr:
                msg = self._reader.cmd_failed_msg(stdout, stderr, exitcode)
                raise Error(
                    f"the function trace reader process has exited unexpectedly:\n{msg}"
                )

            for line in stdout:
                if line.startswith("#"):
                    continue
                self.raw_line = line.strip()
                yield FTraceLine(line)
Esempio n. 24
0
def even_up_dpcnt(rsts):
    """
    This is a helper function for the '--even-up-datapoints' option. It takes a list of
    'RORawResult' objects ('rsts') and truncates them to the size of the smallest test result, where
    "size" is defined as the count of rows in the CSV file.
    """

    # Find test with the smallest CSV file. It should be a good approximation for the smallest test
    # result, ant it will be corrected as we go.
    min_size = min_res = None
    for res in rsts:
        try:
            size = res.dp_path.stat().st_size
        except OSError as err:
            raise Error(f"'stat()' failed for '{res.dp_path}': {err}") from None
        if min_size is None or size < min_size:
            min_size = size
            min_res = res

    min_res.load_df()
    min_dpcnt = len(min_res.df.index)

    # Load only 'min_dpcnt' datapoints for every test result, correcting 'min_dpcnt' as we go.
    for res in rsts:
        res.load_df(nrows=min_dpcnt)
        min_dpcnt = min(min_dpcnt, len(res.df.index))

    # And in case our initial 'min_dpcnt' estimation was incorrect, truncate all the results to the
    # final 'min_dpcnt'.
    for res in rsts:
        dpcnt = len(res.df.index)
        if dpcnt > min_dpcnt:
            res.df = res.df.truncate(after=min_dpcnt-1)
Esempio n. 25
0
def get_kver_ktree(ktree, split=False, proc=None, makecmd=None):
    """
    Get version of the kernel in the kernel sources directory 'ktree'. The 'ktree' directory must
    contain an already configured kernel or it should be path to the kernel build directory if the
    kernel was compliled out of tree (make O=<ktree>).

    By default this function runs the 'make -C <ktree> --quiet -- kernelrelease' command to get the
    kernel version. However, you can use the 'makecmd' argument to verride the 'make -C <ktree>'
    part of it.

    The 'split' and 'proc' arguments are the same as in 'get_kver()'.
    """

    if not proc:
        proc = Procs.Proc()

    if not makecmd:
        makecmd = "make -C '%s'" % ktree
    cmd = makecmd + " --quiet -- kernelrelease"

    try:
        kver = proc.run_verify(cmd)[0].strip()
    except proc.Error as err:
        raise Error("cannot detect kernel version in '%s':\n%s\nMake sure kernel sources are "
                    "configured." % (ktree, err)) from err

    if split:
        return split_kver(kver)
    return kver
Esempio n. 26
0
def split_kver(kver, numerical=False):
    """
    Split the kernel version string on the components: major, minor, stable, rc, localver. For
    example, '4.18.1-build0' would be ('4', '18', '1', None, '-build0'), and '5.0-rc2' would be
    ('5', '0', 0, '2', ''). By default the numeric parts of the version are returned as strings, but
    if the 'numerical' argument is 'True', they are returned as integers.
    """

    def _fetch_rc(localver):
        """Fetch the release candidate version number from the local version."""

        matchobj = re.match(r"-rc(\d+)(.*)", localver)
        if matchobj:
            return matchobj.group(1, 2)
        return (None, localver)

    matchobj = re.match(r"^(\d+)\.(\d+)(?:(?:\.(\d+)){0,1}(.*)){0,1}", kver)
    if not matchobj:
        raise Error("failed to parse kernel version string '%s'" % kver)

    major, minor, stable, localver = matchobj.group(1, 2, 3, 4)
    if stable is None:
        stable = 0
    rc, localver = _fetch_rc(localver)
    if numerical:
        major = int(major)
        minor = int(minor)
        stable = int(stable)
        if rc is not None:
            rc = int(rc)

    return SplitKver(major, minor, stable, rc, localver)
Esempio n. 27
0
    def __init__(self, devid, cpunum, proc, dmesg=None, force=False):
        """
        The class constructor. The 'force' argument can be used to initialize I210 device for
        measurements even if its network interface state is "up". Other arguments are the same as in
        '_WultDeviceBase.__init__()'. The 'devid' can be be the PCI address or the network interface
        name.
        """

        netif = None
        try:
            netif = NetIface.NetIface(devid, proc=proc)
        except ErrorNotFound:
            pass

        if netif:
            # Make sure the device is not used for networking, because we are about to unbind it
            # from the driver. This check makes sure users do not lose networking by specifying
            # wrong device by a mistake.
            if not force and netif.getstate() == "up":
                msg = ""
                if devid != netif.ifname:
                    msg = f" (network interface '{netif.ifname}')"

                raise Error(
                    f"refusing to use device '{devid}'{msg}{proc.hostmsg}: "
                    f"it is up and might be used for networking. Please, bring it down "
                    f"if you want to use it for wult measurements.")
            hwaddr = netif.hwaddr
        else:
            hwaddr = devid

        super().__init__(hwaddr, cpunum, proc, dmesg=dmesg)
Esempio n. 28
0
    def _run_tc_cmd(self, cmd):
        """This is a helper for running a 'tc' command specified in 'cmd'."""

        stdout, stderr, exitcode = self._proc.run(cmd)
        if exitcode:
            errmsg = self._proc.cmd_failed_msg(cmd, stdout, stderr, exitcode)
            errors = ("Operation not supported", "Specified qdisc not found")
            if any(err in stderr for err in errors):
                errmsg += "\n\n"
                pkgname = self._tchk.tool_to_pkg("sch_etf.ko")
                if pkgname:
                    kver = KernelVersion.get_kver(proc=self._proc)
                    errmsg += f"Try to install package '{pkgname}'{self._proc.hostmsg}\n"      \
                              f"Currently running kernel version is '{kver}', make sure the\n" \
                              f"installed '{pkgname}' also has version '{kver}'.\n"
                errmsg += "If you are running a custom kernel (as opposed to the vanilla OS\n" \
                          "kernel), ensure your kernel has the following features enabled:\n"    \
                          "* QoS / fair queuing (CONFIG_NET_SCHED)\n"                            \
                          "* Multi-queue priority scheduler (CONFIG_NET_SCH_MQPRIO)\n"           \
                          "* Earliest TxTime First (CONFIG_NET_SCH_ETF)\n"                       \
                          "* Netfilter (CONFIG_NETFILTER_NETLINK)\n"                             \
                          "And related modules, such as 'sch_etf' and 'sch_mqprio',\n"           \
                          "loaded if needed."

            elif "Unknown qdisc \"etf\"" in stderr:
                errmsg += self._old_tc_err_msg

            raise Error(errmsg)

        return stdout, stderr, exitcode
Esempio n. 29
0
    def _ensure_min_collect_time(self):
        """
        This method makes sure all statistics collector made progress and collected at least one
        piece of statistics.
        """

        if not self._start_time:
            raise Error("statistics collection did not start yet")

        max_interval = _get_max_interval(self.stinfo)
        if max_interval == 0:
            return

        # Add some margin of safety.
        max_interval += 1

        if "ipmi" in self.stinfo and self.stinfo["ipmi"]["enabled"]:
            # IPMI may be very slow sometimes, so give it at least 10 seconds.
            max_interval = max(10, max_interval)

        delta = time.time() - self._start_time
        if delta < max_interval:
            _LOG.debug(
                "enforcing minimum %f secs collection time, sleeping %f secs",
                max_interval, max_interval - delta)
            time.sleep(max_interval - delta)
Esempio n. 30
0
    def _create(self):
        """Create the CSV file."""

        try:
            self._fobj = self.path.open("tw+", encoding="utf-8")
        except OSError as err:
            raise Error(
                f"failed to create file '{self.path}':\n{err}") from None