Ejemplo n.º 1
0
def find_processes(regex: str, proc=None):
    """
    Find all processes which match the 'regex' regular expression on the host defined by 'proc'. The
    regular expression is matched against the process executable name + command-line arguments.

    By default this function operates on the local host, but the 'proc' argument can be used to pass
    a connected 'SSH' object in which case this function will operate on the remote host.

    Returns a list of tuples containing the PID and the command line.
    """

    if not proc:
        proc = Procs.Proc()

    cmd = "ps axo pid,args"
    stdout, stderr = proc.run_verify(cmd, join=False)

    if len(stdout) < 2:
        raise Error(
            f"no processes found at all{proc.hostmsg}\nExecuted this command:\n{cmd}\n"
            f"stdout:\n{stdout}\nstderr:{stderr}\n")

    procs = []
    for line in stdout[1:]:
        pid, comm = line.strip().split(" ", 1)
        pid = int(pid)
        if proc.hostname == "localhost" and pid == Trivial.get_pid():
            continue
        if re.search(regex, comm):
            procs.append((int(pid), comm))

    return procs
Ejemplo n.º 2
0
def _parse_turbostat_line(heading, line):
    """Parse a single turbostat line."""

    line_data = {}
    for key, value in zip_longest(heading.keys(), line):
        if value is not None and value != "-":
            if not heading[key]:
                if Trivial.is_int(value):
                    heading[key] = int
                elif Trivial.is_float(value):
                    heading[key] = float
                else:
                    heading[key] = str
            line_data[key] = heading[key](value)

    return line_data
Ejemplo n.º 3
0
def calc_command(args):
    """Implements the 'calc' command  for the 'wult' and 'ndl' tools."""

    if args.list_funcs:
        for name, descr in RORawResult.get_smry_funcs():
            _LOG.info("%s: %s", name, descr)
        return

    if args.funcs:
        funcnames = Trivial.split_csv_line(args.funcs)
        all_funcs = True
    else:
        funcnames = None
        all_funcs = False

    res = RORawResult.RORawResult(args.respath)
    apply_filters(args, res)

    non_numeric = res.get_non_numeric_colnames()
    if non_numeric and (args.csel or args.cfilt):
        non_numeric = ", ".join(non_numeric)
        _LOG.warning("skipping non-numeric column(s): %s", non_numeric)

    res.calc_smrys(funcnames=funcnames, all_funcs=all_funcs)

    _LOG.info("Datapoints count: %d", len(res.df))
    YAML.dump(res.smrys, sys.stdout, float_format="%.2f")
Ejemplo n.º 4
0
    def _load_results(self):
        """Load the test results from the CSV file and/or apply the columns selector."""

        _LOG.debug("summaries will be calculated for these columns: %s",
                   ", ".join(self._smry_colnames))
        _LOG.debug("additional colnames: %s", ", ".join(self._more_colnames))

        for res in self.rsts:
            _LOG.debug("hover colnames: %s", ", ".join(self._hov_colnames[res.reportid]))

            colnames = []
            for colname in self._hov_colnames[res.reportid] + self._more_colnames:
                if colname in res.colnames_set:
                    colnames.append(colname)

            csel = Trivial.list_dedup(self._smry_colnames + colnames)
            res.set_csel(csel)
            res.load_df()

            # We'll be dropping columns and adding temporary columns, so we'll affect the original
            # dataframe. This is more efficient than creating copies.
            self._mangle_loaded_res(res)

        # Some columns from the axes lists could have been dropped, update the lists.
        self._drop_absent_colnames()
Ejemplo n.º 5
0
    def prepare(self):
        """Prepare to start measurements."""

        # Ensure the kernel is fresh enough.
        kver = KernelVersion.get_kver(proc=self._proc)
        if KernelVersion.kver_lt(kver, "5.1-rc1"):
            raise Error(
                f"version of the running kernel{self._proc.hostmsg} is {kver}, but it "
                f"does not support the ETF qdisc.\nPlease, use kernel version 5.1 or "
                f"newer")

        try:
            self._nmcli = _Nmcli.Nmcli(proc=self._proc)
        except ErrorNotSupported:
            pass
        else:
            # We have to configure the I210 network interface in a special way, but if it is managed
            # by NetworkManager, the configuration may get reset at any point. Therefore, detach the
            # network interface from NetworkManager.
            _LOG.info("Detaching network interface '%s' from NetworkManager%s",
                      self._ifname, self._proc.hostmsg)
            self._nmcli.unmanage(self._ifname)

        # Ensure the interface exists and has carrier. It must be brought up before we can check the
        # carrier status.
        self._netif.up()
        self._netif.wait_for_carrier(10)

        # Make sure the network interface has an IP address.
        ipaddr = self._netif.get_ipv4_addr(default=None)
        if ipaddr:
            _LOG.debug("network interface '%s'%s has IP address '%s'",
                       self._ifname, self._proc.hostmsg, ipaddr)
        else:
            ipaddr = self._netif.get_unique_ipv4_addr()
            ipaddr += "/16"
            self._netif.set_ipv4_addr(ipaddr)
            # Ensure the IP was set.
            self._netif.get_ipv4_addr()
            _LOG.info("Assigned IP address '%s' to interface '%s'%s", ipaddr,
                      self._ifname, self._proc.hostmsg)

        self._drv.load(unload=True, opts=f"ifname={self._ifname}")

        # We use the ETF qdisc for scheduling delayed network packets. Configure it and start the
        # 'phc2sys' process in background in order to keep the host and NIC clocks in sync.

        # Get the TAI offset first.
        stdout, _ = self._proc.run_verify(
            f"{self._ndlrunner_bin} --tai-offset")
        tai_offset = self._get_line(prefix="TAI offset", line=stdout)
        if not Trivial.is_int(tai_offset):
            raise Error(
                f"unexpected 'ndlrunner --tai-offset' output:\n{stdout}")

        _LOG.info("Configuring the ETF qdisc%s", self._proc.hostmsg)
        self._etfqdisc.configure()
        _LOG.info("Starting NIC-to-system clock synchronization process%s",
                  self._proc.hostmsg)
        self._etfqdisc.start_phc2sys(tai_offset=int(tai_offset))
Ejemplo n.º 6
0
    def _next_entry(self):
        """Generator which yields entries from IPMI log files."""

        time_regex = re.compile(r"^(timestamp) \| (\d+_\d+_\d+_\d+:\d+:\d+)$")
        entry_regex = re.compile(r"^(.+)\|(.+)\|(.+)$")

        for line in self._lines:
            # Example of the string:
            # timestamp | 2017_01_04_11:02:46
            match = re.match(time_regex, line.strip())
            if match:
                timestamp = datetime.datetime.strptime(
                    match.group(2).strip(), '%Y_%m_%d_%H:%M:%S')
                yield (match.group(1).strip(), timestamp, "")
            else:
                # Example of the string:
                # System Fan 4     | 2491 RPM          | ok
                match = re.match(entry_regex, line.strip())
                if match:
                    val = match.group(2).strip()
                    data = val.split(' ', 1)
                    if val not in ["no reading", "disabled"] and len(data) > 1:
                        yield (match.group(1).strip(),
                               Trivial.str_to_num(data[0]), data[1])
                    else:
                        yield (match.group(1).strip(), None, None)
Ejemplo n.º 7
0
def open_raw_results(respaths, toolname, reportids=None, reportid_additional_chars=None):
    """
    Opens the input raw test results, and returns the list of 'RORawResult' objects.
      * respaths - list of paths to raw results.
      * toolname - name of the tool opening raw results.
      * reportids - list of reportids to override report IDs in raw results.
      * reportid_additional_chars - string of characters allowed in report ID on top of default
                                    characters.
    """

    if reportids:
        reportids = Trivial.split_csv_line(reportids)
    else:
        reportids = []

    if len(reportids) > len(respaths):
        raise Error(f"there are {len(reportids)} report IDs to assign to {len(respaths)} input "
                    f"test results. Please, provide {len(respaths)} or less report IDs.")

    # Append the required amount of 'None's to make the 'reportids' list be of the same length as
    # the 'respaths' list.
    reportids += [None] * (len(respaths) - len(reportids))

    rsts = []
    for respath, reportid in zip(respaths, reportids):
        if reportid:
            ReportID.validate_reportid(reportid, additional_chars=reportid_additional_chars)

        res = RORawResult.RORawResult(respath, reportid=reportid)
        if toolname != res.info["toolname"]:
            raise Error(f"cannot generate '{toolname}' report, results are collected with the"
                        f"'{res.info['toolname']}':\n{respath}")
        rsts.append(res)

    return rsts
Ejemplo n.º 8
0
def _validate_range(rng, what, single_ok):
    """Implements 'parse_ldist()'."""

    if single_ok:
        min_len = 1
    else:
        min_len = 2

    split_rng = Trivial.split_csv_line(rng)

    if len(split_rng) < min_len:
        raise Error(f"bad {what} range '{rng}', it should include {min_len} numbers")
    if len(split_rng) > 2:
        raise Error(f"bad {what} range '{rng}', it should not include more than 2 numbers")

    vals = [None] * len(split_rng)

    for idx, val in enumerate(split_rng):
        vals[idx] = Human.parse_duration_ns(val, default_unit="us", name=what)
        if vals[idx] < 0:
            raise Error(f"bad {what} value '{split_rng[idx]}', should be greater than zero")

    if len(vals) == 2 and vals[1] - vals[0] < 0:
        raise Error(f"bad {what} range '{rng}', first number cannot be greater than the second "
                    f"number")
    if len(vals) == 1:
        vals.append(vals[0])

    return vals
Ejemplo n.º 9
0
def _parse_ip_address_show(raw):
    """
    Parse output of the 'ip address show <IFNAME>' command and return the resulting dictionary.
    """

    info = {}
    for line in raw.splitlines():
        line = line.strip()
        elts = Trivial.split_csv_line(line, sep=" ")
        if re.match(r"^\d+:$", elts[0]):
            info["ifname"] = elts[1][:-1]
        elif elts[0] == "inet":
            ipnet = ipaddress.IPv4Network(elts[1], strict=False)
            info["ipv4"] = {}
            info["ipv4"]["ip"] = ipnet.network_address
            info["ipv4"]["mask"] = ipnet.netmask
            info["ipv4"]["bcast"] = ipnet.broadcast_address
            info["ipv4"]["ip_cidr"] = elts[1]
            info["ipv4"]["cidr"] = str(ipnet)
        elif elts[0] == "link/ether":
            info["ether"] = {}
            info["ether"]["mac"] = elts[1]
            info["ether"]["bcast"] = elts[3]

    return info
Ejemplo n.º 10
0
    def set_filter(res, ops):
        """Set filter operations in 'ops' to test result 'res'."""

        res.clear_filts()
        for name, expr in ops.items():
            # The '--csel' and '--cfilt' options may have comma-separated list of column names.
            if name.startswith("c"):
                expr = Trivial.split_csv_line(expr)
            getattr(res, f"set_{name}")(expr)
Ejemplo n.º 11
0
def is_root(proc=None):
    """
    If 'proc' is 'None' or a 'Proc' object, return 'True' if current process' user name is 'root'
    and 'False' if current process' user name is not 'root'. If 'proc' is an 'SSH' object, returns
    'True' if the SSH user has 'root' permissions on the remote host, otherwise returns 'False'.
    """

    if not proc or not proc.is_remote:
        return Trivial.is_root()

    stdout, _ = proc.run_verify("id -u")
    stdout = stdout.strip()
    if not Trivial.is_int(stdout):
        raise Error(
            "unexpected output from 'id -u' command, expected an integer, got:\n{stdout}"
        )

    return int(stdout) == 0
Ejemplo n.º 12
0
def _get_percentile(funcname):
    """
    Parses and validates the percentile statistics function name (e.g., "99%") and returns the
    percent value (99).
    """

    percent = Trivial.str_to_num(funcname[:-1])
    if percent <= 0 or percent >= 100:
        raise Error(
            f"bad percentile number in '{funcname}', should be in range of "
            f"(0, 100)")
    return percent
Ejemplo n.º 13
0
    def _generate_metric_tabs(self):
        """Generate 'Metric Tabs' which contain the plots and summary tables for each metric."""

        for res in self.rsts:
            _LOG.debug("calculate summary functions for '%s'", res.reportid)
            res.calc_smrys(regexs=self._smry_colnames, funcnames=self._smry_funcs)

        plot_axes = [(x, y) for x, y in itertools.product(self.xaxes, self.yaxes) if x != y]

        if self.exclude_xaxes and self.exclude_yaxes:
            x_axes = self._refres.find_colnames([self.exclude_xaxes])
            y_axes = self._refres.find_colnames([self.exclude_yaxes])
            exclude_axes = list(itertools.product(x_axes, y_axes))
            plot_axes = [axes for axes in plot_axes if axes not in exclude_axes]

        tabs = []
        tab_names = [y for _, y in plot_axes]
        tab_names += self.chist + self.hist
        tab_names = Trivial.list_dedup(tab_names)

        for metric in tab_names:
            _LOG.info("Generating %s tab.", metric)

            tab_plots = []
            smry_metrics = []
            for axes in plot_axes:
                if metric in axes:
                    # Only add plots which have the tab metric on one of the axes.
                    tab_plots.append(axes)
                    # Only add metrics shown in the diagrams to the summary table.
                    smry_metrics += axes

            smry_metrics = Trivial.list_dedup(smry_metrics)

            metric_tab = _MetricTab.MetricTabBuilder(metric, self.rsts, self.outdir)
            metric_tab.add_smrytbl(smry_metrics, self._smry_funcs)
            metric_tab.add_plots(tab_plots, self.hist, self.chist, self._hov_colnames)
            tabs.append(metric_tab.get_tab())

        return tabs
Ejemplo n.º 14
0
    def _set_launch_distance(self):
        """Set launch distance limits to driver."""

        try:
            limit_path = self._basedir / "ldist_max_nsec"
            with self._proc.open(limit_path, "r") as fobj:
                ldist_max = fobj.read().strip()

            limit_path = self._basedir / "ldist_min_nsec"
            with self._proc.open(limit_path, "r") as fobj:
                ldist_min = fobj.read().strip()
        except Error as err:
            raise Error(
                f"failed to read launch distance limit from '{limit_path}'"
                f"{self._proc.hostmsg}:\n{err}") from err

        ldist_min = Trivial.str_to_num(ldist_min)
        ldist_max = Trivial.str_to_num(ldist_max)
        from_path = self._basedir / "ldist_from_nsec"
        to_path = self._basedir / "ldist_to_nsec"

        for idx, ldist in enumerate(self._ldist):
            if not ldist:
                # Special case: 0 means "use the minimum possible value".
                self._ldist[idx] = ldist_min

        for ldist, ldist_path in zip(self._ldist, [from_path, to_path]):
            if ldist < ldist_min or ldist > ldist_max:
                raise Error(
                    f"launch distance '{ldist}' is out of range, it should be in range of "
                    f"[{ldist_min},{ldist_max}]")
            try:
                with self._proc.open(ldist_path, "w") as fobj:
                    fobj.write(str(ldist))
            except Error as err:
                raise Error(
                    f"can't to change launch disatance range\nfailed to open '{ldist_path}'"
                    f"{self._proc.hostmsg} and write {ldist} to it:\n\t{err}"
                ) from err
Ejemplo n.º 15
0
    def get_resolution(self):
        """Returns resolution of the delayed event devices in nanoseconds."""

        try:
            path = self._basedir / "resolution_nsec"
            with self._proc.open(path, "r") as fobj:
                resolution = fobj.read().strip()
        except Error as err:
            raise Error(
                f"failed to read the delayed event resolution from '{path}'"
                f"{self._proc.hostmsg}:\n{err}") from err

        return Trivial.str_to_num(resolution)
Ejemplo n.º 16
0
    def _get_latency(self):
        """
        Read the next latency data line from the 'ndlrunner' helper, parse it, and ireturn the
        resulting dictionary.
        """

        line = self._get_line(prefix="datapoint")
        line = Trivial.split_csv_line(line)

        if len(line) != 2:
            msg = self._unexpected_line_error_prefix(line)
            raise Error(
                f"{msg}\nExpected 2 comma-separated integers, got {len(line)}")

        for val in line:
            if not Trivial.is_int(val):
                msg = self._unexpected_line_error_prefix(line)
                raise Error(
                    f"{msg}\n: Expected 2 comma-separated integers, got a non-integer "
                    f"'{val}'")

        # Convert nanoseconds to microseconds.
        line = [int(val) / 1000 for val in line]
        return {"RTD": line[0], "LDist": line[1]}
Ejemplo n.º 17
0
def _get_deployables(srcpath, proc=None):
    """
    Returns the list of "deployables" (driver names or helper tool names) provided by tools or
    drivers source code directory 'srcpath' on a host defined by 'proc'.
    """

    if not proc:
        proc = Procs.Proc()

    cmd = f"make --silent -C '{srcpath}' list_deployables"
    deployables, _ = proc.run_verify(cmd)
    if deployables:
        deployables = Trivial.split_csv_line(deployables, sep=" ")

    return deployables
Ejemplo n.º 18
0
Archivo: _Nmcli.py Proyecto: intel/wult
    def unmanage(self, ifnames):
        """
        Mark network interfaces in 'ifnames' as 'unmanaged'. The managed state can later be restored
        with the 'restore_managed()'.
        """

        if not Trivial.is_iterable(ifnames):
            ifnames = [ifnames]

        for ifname in ifnames:
            managed = self.is_managed(ifname)
            if not managed:
                continue
            self._toggle_managed(ifname, False)
            if ifname not in self._saved_managed:
                self._saved_managed[ifname] = managed
Ejemplo n.º 19
0
    def add_smrytbl(self, smry_metrics, smry_funcs):
        """
        Summaries table includes values like average and median values for a single metric (column).
        It "summarizes" the metric. This function creates and dumps the summary table for this tab.
        This means that it includes summaries of all the metrics referenced in this tab.
        """

        smry_tbl = _SummaryTable.SummaryTable()

        # Summaries are calculated only for numeric metrics. Tab metric name is represented by
        # 'smrytblpath.name', this should come first.
        metrics = [self.tabname] if self._refres.is_numeric(
            self.tabname) else []
        metrics += [
            metric for metric in smry_metrics
            if self._refres.is_numeric(metric)
        ]
        # Dedupe the list so that each metric only appears once.
        metrics = Trivial.list_dedup(metrics)

        for metric in metrics:
            # Create row in the summary table for each metric.
            defs = self._refres.defs.info[metric]
            fmt = "{:.2f}" if defs["type"] == "float" else None
            smry_tbl.add_metric(metric, defs["short_unit"], defs["descr"], fmt)

            # Select only those functions that are present in all test results. For example, 'std'
            # will not be present if the result has only one datapoint. In this case, we need to
            # exclude the 'std' function.
            funcs = []
            for funcname in smry_funcs:
                if all(res.smrys[metric].get(funcname) for res in self._rsts):
                    funcs.append(funcname)

            # Populate each row with summary functions for each result.
            for res in self._rsts:
                for funcname in funcs:
                    val = res.smrys[metric][funcname]
                    smry_tbl.add_smry_func(res.reportid, metric, funcname, val)
        try:
            smry_tbl.generate(self._smrytblpath)
        except Error as err:
            raise Error("Failed to generate summary table.") from err
Ejemplo n.º 20
0
def deploy_command(args):
    """Implements the 'deploy' command for the 'wult' and 'ndl' tools."""

    args.stmpdir = None # Temporary directory on the SUT.
    args.ctmpdir = None # Temporary directory on the controller (local host).

    if not FSHelpers.which("rsync", default=None):
        raise Error("please, install the 'rsync' tool")

    if not args.timeout:
        args.timeout = 8
    else:
        args.timeout = Trivial.str_to_num(args.timeout)
    if not args.username:
        args.username = "******"

    if args.privkey and not args.privkey.is_file():
        raise Error(f"path '{args.privkey}' does not exist or it is not a file")

    if args.pyhelpers:
        # Local temporary directory is only needed for creating stand-alone version of python
        # helpers.
        args.ctmpdir = FSHelpers.mktemp(prefix=f"{args.toolname}-")

    with contextlib.closing(ToolsCommon.get_proc(args, args.hostname)) as proc:
        if not FSHelpers.which("make", default=None, proc=proc):
            raise Error(f"please, install the 'make' tool{proc.hostmsg}")

        if proc.is_remote or not args.ctmpdir:
            args.stmpdir = FSHelpers.mktemp(prefix=f"{args.toolname}-", proc=proc)
        else:
            args.stmpdir = args.ctmpdir

        success = True
        try:
            _deploy_drivers(args, proc)
            _deploy_helpers(args, proc)
        except:
            success = False
            raise
        finally:
            _remove_deploy_tmpdir(args, proc, success=success)
Ejemplo n.º 21
0
    def set_intervals(self, intervals):
        """
        Set intervals for statistics collectors. The 'intervals' argument should be a dictionary
        with statistics collector names as keys and the collection interval as the value. This
        method should be called prior to the 'configure()' method. By default the statistics
        collectors use intervals from the 'DEFAULT_STINFO' statistics description dictionary.
        Returns a dictionary of the same structure as 'interval', but with interval values that will
        actually be used for all the statistics collectors.
        """

        # Convert intervals to strings and get rid of the trailing ".0" from the final string.
        for stname, interval in intervals.items():
            if not Trivial.is_float(interval):
                raise Error(
                    f"bad interval value '{interval}' for '{stname}' statistics"
                )
            self.stinfo[stname]["interval"] = float(interval)

        actual = {}
        for stname, stinfo in self.stinfo.items():
            if stinfo.get("interval"):
                actual[stname] = stinfo.get("interval")

        return actual
Ejemplo n.º 22
0
    def _next(self):
        """
        Generator which yields a dictionary corresponding to one snapshot of turbostat output at a
        time.
        """

        cpus = {}
        table_started = False
        nontable = {}
        heading = totals = None

        tbl_regex = re.compile(self._cols_regex)

        for line in self._lines:
            # Ignore empty and 'jitter' lines like "turbostat: cpu65 jitter 2574 5881".
            if not line or line.startswith("turbostat: "):
                continue

            # Match the beginning of the turbostat table.
            if not table_started and not re.match(tbl_regex, line):
                _add_nontable_data(nontable, line)
                continue

            line = line.split()
            if Trivial.is_float(line[0]):
                # This is the continuation of the table we are currently parsing. It starts either
                # with a floating-point 'Time_Of_Day_Seconds' an integer 'Core' value. Each line
                # describes a single CPU.
                cpu_data = _parse_turbostat_line(heading, line)
                cpus[cpu_data["CPU"]] = cpu_data
            else:
                # This is the start of the new table.
                if cpus or table_started:
                    if not cpus:
                        # This is the the special case for single-CPU systems. Turbostat does not
                        # print the totals because there is only one CPU and totals is the the same
                        # as the CPU information.
                        cpus[0] = totals
                    yield _construct_the_result(totals, cpus, nontable)
                    nontable = {}
                    cpus = {}

                heading = {}
                for key in line:
                    if "%" in key or "Watt" in key or key in {"Time_Of_Day_Seconds", "IPC"}:
                        heading[key] = float
                    elif key in ("Package", "Core", "CPU"):
                        heading[key] = str
                    else:
                        heading[key] = None

                # The next line is total statistics across all CPUs, exept if there is only one
                # single CPU in the system.

                # False pylint warning, see issue: https://github.com/PyCQA/pylint/issues/1830
                line = next(self._lines).split() # pylint: disable=stop-iteration-return

                # On systems with a single core turbostat does not include the "Core" colum. Similar
                # to single CPU systems - the CPU column is excluded. Make sure we always have them.
                for key in ("Core", "CPU"):
                    if key not in heading:
                        heading[key] = str
                        line.append("0")

                totals = _parse_turbostat_line(heading, line)

            table_started = True

        yield _construct_the_result(totals, cpus, nontable)
Ejemplo n.º 23
0
def kill_pids(pids,
              sig: str = "SIGTERM",
              kill_children: bool = False,
              must_die: bool = False,
              proc=None):
    """
    This function kills or signals processes with PIDs in 'pids' on the host defined by 'procs'. The
    'pids' argument can be a collection of PID numbers ('int' or 'str' types) or a single PID
    number.

    By default the processes are killed (SIGTERM), but you can specify any signal either by name or
    by number.

    The 'children' and 'must_die' arguments must only be used when killing processes (SIGTERM or
    SIGKILL).  The 'children' argument controls whether this function should also try killing the
    children. If the 'must_die' argument is 'True', then this function also verifies that the
    process(es) did actually die, and if any of them did not die, it raises an exception.

    By default this function operates on the local host, but the 'proc' argument can be used to pass
    a connected 'SSH' object in which case this function will operate on the remote host.
    """
    def collect_zombies(proc):
        """In case of a local process we need to 'waitpid()' the children."""

        if not proc.is_remote:
            with contextlib.suppress(OSError):
                os.waitpid(0, os.WNOHANG)

    if not proc:
        proc = Procs.Proc()

    if not pids:
        return

    if not Trivial.is_iterable(pids):
        pids = (pids, )

    pids = [str(int(pid)) for pid in pids]

    if sig is None:
        sig = "SIGTERM"
    else:
        sig = str(sig)

    killing = _is_sigterm(sig) or _is_sigkill(sig)
    if (kill_children or must_die) and not killing:
        raise Error(
            f"'children' and 'must_die' arguments cannot be used with '{sig}' signal"
        )

    if kill_children:
        # Find all the children of the process.
        for pid in pids:
            children, _, exitcode = proc.run(f"pgrep -P {pid}", join=False)
            if exitcode != 0:
                break
            pids += [child.strip() for child in children]

    pids_spc = " ".join(pids)
    pids_comma = ",".join(pids)
    _LOG.debug("sending '%s' signal to the following process%s: %s", sig,
               proc.hostmsg, pids_comma)

    try:
        proc.run_verify(f"kill -{sig} -- {pids_spc}")
    except Error as err:
        if not killing:
            raise Error(
                f"failed to send signal '{sig}' to PIDs '{pids_comma}'{proc.hostmsg}:\n"
                f"{err}") from err
        # Some error happened on the first attempt. We've seen a couple of situations when this
        # happens.
        # 1. Most often, a PID does not exist anymore, the process exited already (race condition).
        # 2 One of the processes in the list is owned by a different user (e.g., root). Let's call
        #   it process A. We have no permissions to kill process A, but we can kill other processes
        #   in the 'pids' list. But often killing other processes in the 'pids' list will make
        #   process A exit. This is why we do not error out just yet.
        #
        # So the strategy is to do the second signal sending round and often times it happens
        # without errors, and all the processes that we want to kill just go away.
    if not killing:
        return

    # Give the processes up to 4 seconds to die.
    timeout = 4
    start_time = time.time()
    while time.time() - start_time <= timeout:
        collect_zombies(proc)
        _, _, exitcode = proc.run(f"kill -0 -- {pids_spc}")
        if exitcode == 1:
            return
        time.sleep(0.2)

    if _is_sigterm(sig):
        # Something refused to die, try SIGKILL.
        try:
            proc.run_verify(f"kill -9 -- {pids_spc}")
        except Error as err:
            # It is fine if one of the processes exited meanwhile.
            if "No such process" not in str(err):
                raise
        collect_zombies(proc)
        if not must_die:
            return
        # Give the processes up to 4 seconds to die.
        timeout = 4
        start_time = time.time()
        while time.time() - start_time <= timeout:
            collect_zombies(proc)
            _, _, exitcode = proc.run(f"kill -0 -- {pids_spc}")
            if exitcode != 0:
                return
            time.sleep(0.2)

    # Something refused to die, find out what.
    msg, _, = proc.run_verify(f"ps -f {pids_spc}", join=False)
    if len(msg) < 2:
        msg = pids_comma

    raise Error(
        f"one of the following processes{proc.hostmsg} did not die after 'SIGKILL': {msg}"
    )
Ejemplo n.º 24
0
    def __init__(self, rsts, outdir, title_descr=None, xaxes=None, yaxes=None, hist=None,
                 chist=None, exclude_xaxes=None, exclude_yaxes=None):
        """
        The class constructor. The arguments are as follows.
          * rsts - list of 'RORawResult' objects representing the raw test results to generate the
                   HTML report for.
          * outdir - the output directory path to store the HTML report at.
          * title_descr - a string describing this report or a file path containing the description.
          *               The description will be put at the top part of the HTML report. It should
          *               describe the report in general (e.g., it compares platform A to platform
          *               B). By default no title description is added to the HTML report.
          * xaxes - list of regular expressions matching datapoints CSV file column names to use for
                    the X axis of scatter plot diagrams. A scatter plot will be generated for each
                    combination of 'xaxes' and 'yaxes' column name pair (except for the pairs in
                    'exclude_xaxes'/'exclude_yaxes'). Default is the first column in the datapoints
                    CSV file.
          * yaxes - list of regular expressions matching datapoints CSV file column names to use for
                    the Y axis of scatter plot diagrams. Default is the second column in the
                    datapoints CSV file.
          * hist - list of regular expressions matching datapoints CSV file column names to create a
                   histogram for. Default is the first column in the datapoints CSV file. An empty
                   string can be used to disable histograms.
          * chist - list of regular expressions matching datapoints CSV file column names to create
                    a cumulative histogram for. Default is he first column in the datapoints CSV
                    file. An empty string can be used to disable cumulative histograms.
          * exclude_xaxes - by default all diagrams of X- vs Y-axes combinations will be created.
                            The 'exclude_xaxes' is a list regular expressions matching datapoints
                            CSV file column names. There will be no scatter plot for each
                            combinations of 'exclude_xaxes' and 'exclude_yaxes'. In other words,
                            this argument along with 'exclude_yaxes' allows for excluding some
                            diagrams from the 'xaxes' and 'yaxes' combinations.
          * exclude_yaxes - same as 'exclude_xaxes', but for Y-axes.
        """

        self.rsts = rsts
        self.outdir = Path(outdir)
        self.title_descr = title_descr
        self.xaxes = xaxes
        self.yaxes = yaxes
        self.exclude_xaxes = exclude_xaxes
        self.exclude_yaxes = exclude_yaxes
        self.hist = hist
        self.chist = chist

        self._projname = "wult"

        # Users can change this to 'True' to make the reports relocatable. In which case the raw
        # results files will be copied from the test result directories to the output directory.
        self.relocatable = False

        # The first result is the 'reference' result.
        self._refres = rsts[0]
        # The raw reference result information.
        self._refinfo = self._refres.info

        # Names of columns in the datapoints CSV file to provide the summary function values for
        # (e.g., median, 99th percentile). The summaries will show up in the summary tables (one
        # table per metric).
        self._smry_colnames = None
        # List of functions to provide in the summary tables.
        self._smry_funcs = ("nzcnt", "max", "99.999%", "99.99%", "99.9%", "99%", "med", "avg",
                            "min", "std")
        # Per-test result list of column names to include into the hover text of the scatter plot.
        # By default only the x and y axis values are included.
        self._hov_colnames = {}
        # Additional columns to load, if they exist in the CSV file.
        self._more_colnames = []

        self._validate_init_args()
        self._init_colnames()

        # We'll provide summaries for every column participating in at least one diagram.
        smry_colnames = Trivial.list_dedup(self.yaxes + self.xaxes + self.hist + self.chist)
        # Summary functions table includes all test results, but the results may have a bit
        # different set of column names (e.g., they were collected with different wult versions # or
        # using different methods, or on different systems). Therefore, include only common columns
        # into it.
        self._smry_colnames = []
        for colname in smry_colnames:
            for res in rsts:
                if colname not in res.colnames_set:
                    break
            else:
                self._smry_colnames.append(colname)

        self._init_assets()

        if (self.exclude_xaxes and not self.exclude_yaxes) or \
           (self.exclude_yaxes and not self.exclude_xaxes):
            raise Error("'exclude_xaxes' and 'exclude_yaxes' must both be 'None' or both not be "
                        "'None'")