示例#1
0
文件: _FTrace.py 项目: intel/wult
    def __init__(self, proc, timeout=30):
        """
        Class constructor. The arguments are as follows.
          * proc - the 'Proc' or 'SSH' object that defines the host to operate on. This object will
                   keep a 'proc' reference and use it in various methods.
          * timeout - longest time in seconds to wait for data in the trace buffer.
        """

        self._reader = None
        self._proc = proc
        self.timeout = timeout
        self.raw_line = None

        mntpoint = FSHelpers.mount_debugfs(proc=proc)
        self.ftpath = mntpoint.joinpath("tracing/trace")
        self.ftpipe_path = mntpoint.joinpath("tracing/trace_pipe")

        for path in (self.ftpath, self.ftpipe_path):
            if not FSHelpers.isfile(path, proc=proc):
                raise ErrorNotSupported(
                    f"linux kernel function trace file was not found at "
                    f"'{path}'{proc.hostmsg}")

        cmd = f"cat {self.ftpipe_path}"
        name = "stale wult function trace reader process"
        ProcHelpers.kill_processes(cmd, log=True, name=name, proc=self._proc)
        self._clear()
        self._reader = self._proc.run_async(cmd)
示例#2
0
    def _copy_asset(self, src, descr, dst):
        """
        Copy asset file to the output directory. Arguments are as follows:
         * src - source path of the file to copy.
         * descr - description of the file which is being copied.
         * dst - where the file should be copied to.
        """

        asset_path = Deploy.find_app_data(self._projname, src, descr=descr)
        FSHelpers.move_copy_link(asset_path, dst, "copy", exist_ok=True)
示例#3
0
    def _get_driver(self):
        """
        Find out whether the PCI device is bound to any driver. If it is not, returns the
        '(None, None)' tuple. Otherwise returns a tuple of:
         * driver name
         * driver sysfs path
        """

        drvpath = Path(f"{self._devpath}/driver")
        if not FSHelpers.exists(drvpath, proc=self._proc):
            return (None, None)

        drvpath = FSHelpers.abspath(drvpath, proc=self._proc)
        drvname = Path(drvpath).name
        return (drvname, drvpath)
示例#4
0
    def _get_hw_addr(self):
        """
        Return the hardware address for the NIC corresponding to the network interface. Typically
        the hardware address is a PCI address, such as '0000:04:00.0'.
        """

        # The "device" symlink leads to the sysfs subdirectory corresponding to the underlying NIC.
        path = self._sysfsbase / "device"
        if not FSHelpers.exists(path, proc=self._proc):
            raise ErrorNotFound(
                f"cannot find network interface '{self.ifname}':\n"
                f"path '{path}' does not exist{self._proc.hostmsg}'")

        # The name of the subdirectory is the hardware address.
        path = FSHelpers.abspath(path, proc=self._proc)
        return path.name
示例#5
0
    def disable_ipv6(self):
        """Disable IPv6 for the network interface."""

        path = Path("/proc/sys/net/ipv6/conf/{self.ifname}/disable_ipv6")
        if FSHelpers.isfile(path, proc=self._proc):
            with self._proc.open(path, "w") as fobj:
                fobj.write("1")
示例#6
0
    def bind(self, drvname):
        """Bind the PCI device to driver 'drvname'."""

        _LOG.debug("binding device '%s' to driver '%s'%s",
                   self._pci_info["pciaddr"], drvname, self._proc.hostmsg)

        failmsg = f"failed to bind device '{self._pci_info['pciaddr']}' to driver '{drvname}'" \
                  f"{self._proc.hostmsg}"

        drvpath = Path(f"/sys/bus/pci/drivers/{drvname}")
        if not FSHelpers.exists(drvpath, proc=self._proc):
            raise Error(
                f"{failmsg}':\npath '{drvpath}' does not exist{self._proc.hostmsg}"
            )

        cur_drvname = self.get_driver_name()
        if cur_drvname == drvname:
            _LOG.debug("device '%s' is already bound to driver '%s'%s",
                       self._pci_info["pciaddr"], drvname, self._proc.hostmsg)
            return

        if cur_drvname:
            raise Error(
                f"{failmsg}:\nit is already bound to driver '{cur_drvname}'")

        # At this point we do not know if the driver supports this PCI ID. So start with the
        # assumption that it does not, in which case writing to the 'new_id' file should do both:
        # * make the driver aware of the PCI ID
        # * bind the device
        path = f"{drvpath}/new_id"
        val = f"{self._pci_info['vendorid']} {self._pci_info['devid']}"
        bound = True

        try:
            with self._proc.open(path, "wt") as fobj:
                _LOG.debug("writing '%s' to file '%s'", val, path)
                fobj.write(val)
        except Error as err:
            bound = False

        if not bound:
            # Probably the driver already knows about this PCI ID. Use the 'bind' file in this case.
            path = f"{drvpath}/bind"
            val = self._pci_info["pciaddr"]
            with self._proc.open(path, "wt") as fobj:
                _LOG.debug("writing '%s' to file '%s'", val, path)
                try:
                    fobj.write(val)
                except Error as err:
                    raise Error(
                        f"{failmsg}:\n{err}\n{self.get_new_dmesg()}") from err

        # Verify that the device is bound to the driver.
        if not self._get_driver()[1]:
            raise Error(f"{failmsg}\n{self.get_new_dmesg()}")

        _LOG.debug("binded device '%s' to driver '%s'%s\n%s",
                   self._pci_info["pciaddr"], drvname, self._proc.hostmsg,
                   self.get_new_dmesg())
示例#7
0
    def __init__(self,
                 dev,
                 cpunum,
                 proc,
                 ldist=None,
                 intr_focus=None,
                 early_intr=None,
                 dcbuf_size=None):
        """
        Initialize a class instance for a PCI device 'devid'. The arguments are as follows.
          * dev - the delayed event device object created by 'Devices.WultDevice()'.
          * cpunum - the measured CPU number.
          * proc - the host to operate on. This object will keep a 'proc' reference and use it in
                   various methods
          * ldist - a pair of numbers specifying the launch distance range. The default value is
                    specific to the delayed event driver.
          * intr_focus - enable inerrupt latency focused measurements ('WakeLatency' is not measured
                         in this case, only 'IntrLatency').
          * early_intr - enable intrrupts before entering the C-state.
          * dcbuf_size - size of a memory buffer to write to before requesting C-states in order to
                         "dirty" the CPU cache. By default the CPU cache dirtying fetature is
                         disabled. The size has to be an integer amount of bytes.
        """

        self.dev = dev
        self._cpunum = cpunum
        self._proc = proc
        self._ldist = ldist
        self._intr_focus = intr_focus
        self._early_intr = early_intr
        self._dcbuf_size = dcbuf_size
        self._drv = None
        self._saved_drvname = None
        self._basedir = None
        self._enabled_path = None
        self._main_drv = None

        # This is a debugging option that allows to disable automatic wult modules unloading on
        # 'close()'.
        self.unload = True

        self._main_drv = KernelModule.KernelModule("wult",
                                                   proc=proc,
                                                   dmesg=dev.dmesg_obj)
        self._drv = KernelModule.KernelModule(self.dev.drvname,
                                              proc=proc,
                                              dmesg=dev.dmesg_obj)

        mntpoint = FSHelpers.mount_debugfs(proc=proc)
        self._basedir = mntpoint / "wult"
        self._enabled_path = self._basedir / "enabled"
        self._intr_focus_path = self._basedir / "intr_focus"
        self._early_intr_path = self._basedir / "early_intr"
        self._dcbuf_size_path = self._basedir / "dcbuf_size"

        msg = f"Compatible device '{self.dev.info['name']}'{proc.hostmsg}:\n" \
              f" * Device ID: {self.dev.info['devid']}\n" \
              f"   - {self.dev.info['descr']}"
        _LOG.info(msg)
示例#8
0
文件: NdlRunner.py 项目: intel/wult
    def _verify_input_args(self):
        """Verify and adjust the constructor input arguments."""

        # Validate the 'ndlrunner' helper path.
        if not FSHelpers.isexe(self._ndlrunner_bin, proc=self._proc):
            raise Error(
                f"bad 'ndlrunner' helper path '{self._ndlrunner_bin}' - does not exist"
                f"{self._proc.hostmsg} or not an executable file")
示例#9
0
def _remove_deploy_tmpdir(args, proc, success=True):
    """Remove temporary files."""

    ctmpdir = getattr(args, "ctmpdir", None)
    stmpdir = getattr(args, "stmpdir", None)

    if args.debug and not success:
        _LOG.debug("preserved the following temporary directories for debugging purposes:")
        if ctmpdir:
            _LOG.debug(" * On the local host: %s", ctmpdir)
        if stmpdir and stmpdir != ctmpdir:
            _LOG.debug(" * On the SUT: %s", stmpdir)
    else:
        if ctmpdir:
            FSHelpers.rm_minus_rf(args.ctmpdir, proc=proc)
        if stmpdir:
            FSHelpers.rm_minus_rf(args.stmpdir, proc=proc)
示例#10
0
    def _init_outdir(self, discovery=False):
        """
        Helper function for 'configure()' that creates the output directory and various of its
        sub-direcories.
        """

        if not self.outdir:
            self.outdir = FSHelpers.mktemp(prefix="stats-collect-",
                                           proc=self._proc)
            self._outdir_created = True
            _LOG.debug("created output directory '%s'%s", self.outdir,
                       self._proc.hostmsg)
        else:
            try:
                FSHelpers.mkdir(self.outdir, parents=True, proc=self._proc)
            except ErrorExists:
                pass
            else:
                self._outdir_created = True

        self._logsdir = self.outdir / "logs"
        FSHelpers.mkdir(self._logsdir, exist_ok=True, proc=self._proc)

        if discovery:
            # The statistics collected during discovery belong to the logs.
            self._statsdir = self._logsdir / "discovery-stats"
        else:
            self._statsdir = self.outdir / "stats"
        FSHelpers.mkdir(self._statsdir, exist_ok=True, proc=self._proc)
示例#11
0
def get_helpers_deploy_path(proc, toolname):
    """
    Get helpers deployment path for 'toolname' on the system associated with the 'proc' object.
    """

    helpers_path = os.environ.get(f"{toolname.upper()}_HELPERSPATH")
    if not helpers_path:
        helpers_path = FSHelpers.get_homedir(proc=proc) / _HELPERS_LOCAL_DIR / "bin"
    return Path(helpers_path)
示例#12
0
def deploy_command(args):
    """Implements the 'deploy' command for the 'wult' and 'ndl' tools."""

    args.stmpdir = None # Temporary directory on the SUT.
    args.ctmpdir = None # Temporary directory on the controller (local host).

    if not FSHelpers.which("rsync", default=None):
        raise Error("please, install the 'rsync' tool")

    if not args.timeout:
        args.timeout = 8
    else:
        args.timeout = Trivial.str_to_num(args.timeout)
    if not args.username:
        args.username = "******"

    if args.privkey and not args.privkey.is_file():
        raise Error(f"path '{args.privkey}' does not exist or it is not a file")

    if args.pyhelpers:
        # Local temporary directory is only needed for creating stand-alone version of python
        # helpers.
        args.ctmpdir = FSHelpers.mktemp(prefix=f"{args.toolname}-")

    with contextlib.closing(ToolsCommon.get_proc(args, args.hostname)) as proc:
        if not FSHelpers.which("make", default=None, proc=proc):
            raise Error(f"please, install the 'make' tool{proc.hostmsg}")

        if proc.is_remote or not args.ctmpdir:
            args.stmpdir = FSHelpers.mktemp(prefix=f"{args.toolname}-", proc=proc)
        else:
            args.stmpdir = args.ctmpdir

        success = True
        try:
            _deploy_drivers(args, proc)
            _deploy_helpers(args, proc)
        except:
            success = False
            raise
        finally:
            _remove_deploy_tmpdir(args, proc, success=success)
示例#13
0
    def __init__(self, devid, cpunum, proc, dmesg=None):
        """The class constructor. The arguments are the same as in '_WultDeviceBase.__init__()'."""

        super().__init__(devid, cpunum, proc, dmesg=dmesg)

        self._pci_info = None
        self._devpath = None

        path = Path(f"/sys/bus/pci/devices/{self._devid}")
        if not FSHelpers.exists(path, proc=proc):
            raise ErrorNotFound(
                f"cannot find device '{self._devid}'{self._proc.hostmsg}:\n"
                f"path {path} does not exist")

        self._devpath = FSHelpers.abspath(path, proc=self._proc)
        self._pci_info = LsPCI.LsPCI(proc).get_info(Path(self._devpath).name)

        if self.supported_devices and self._pci_info[
                "devid"] not in self.supported_devices:
            supported = [
                "%s - %s" % (key, val)
                for key, val in self.supported_devices.items()
            ]
            supported = "\n * ".join(supported)
            raise ErrorNotSupported(
                f"PCI device '{self._pci_info['pciaddr']}' (PCI ID "
                f"{self._pci_info['devid']}) is not supported by wult driver "
                f"{self.drvname}.\nHere is the list of supported PCI IDs:\n* "
                f"{supported}")

        self.info["name"] = "Intel I210"
        self.info["devid"] = self._pci_info["pciaddr"]
        if self.supported_devices:
            self.info["descr"] = self.supported_devices[
                self._pci_info["devid"]]
        else:
            self.info["name"] = self._pci_info["name"]
            self.info["descr"] = self.info['name'].capitalize()

        self.info["descr"] += f". PCI address {self._pci_info['pciaddr']}, Vendor ID " \
                              f"{self._pci_info['vendorid']}, Device ID {self._pci_info['devid']}."
        self.info["aspm_enabled"] = self._pci_info["aspm_enabled"]
示例#14
0
    def _check_ip_tool_present(self):
        """Verifies that the "ip" tool is available."""

        if self._ip_tool_present:
            return

        if not FSHelpers.which("ip", default=None, proc=self._proc):
            raise ErrorNotSupported(
                f"the 'ip' tool is not installed{self._proc.hostmsg}.\nThis "
                f"tool is part of the 'iproute2' project, please install it.")
        self._ip_tool_present = True
示例#15
0
def _get_ifinfos(proc):
    """
    For every network interfaces backed by a real device on the system defined by 'proc', yield the
    following tuples:
    * interface name
    * device HW address
    """

    for ifname, path, typ in FSHelpers.lsdir(_SYSFSBASE, proc=proc):
        if typ != "@":
            # We expect a symlink.
            continue

        devpath = None
        with contextlib.suppress(Error):
            devpath = FSHelpers.abspath(path / "device", proc=proc)
        if not devpath:
            continue

        yield ifname, devpath.name
示例#16
0
文件: LsPCI.py 项目: intel/wult
    def __init__(self, proc=None):
        """Class constructor."""

        if not proc:
            proc = Procs.Proc()

        self._proc = proc
        self._lspci_bin = "lspci"

        if not FSHelpers.which(self._lspci_bin, default=None, proc=proc):
            raise ErrorNotSupported(
                f"the '{self._lspci_bin}' tool is not installed{proc.hostmsg}")
示例#17
0
def _get_module_path(proc, name):
    """Return path to installed module. Return 'None', if module not found."""

    cmd = f"modinfo -n {name}"
    stdout, _, exitcode = proc.run(cmd)
    if exitcode != 0:
        return None

    modpath = Path(stdout.strip())
    if FSHelpers.isfile(modpath, proc):
        return modpath
    return None
示例#18
0
    def _init_outdir(self):
        """Initialize the output directory for writing or appending test results."""

        if self.dirpath.exists():
            # Only accept empty output directory.
            paths = (self.dp_path, self.info_path, self.logs_path,
                     self.stats_path, self.descr_path)
            for path in paths:
                if path.exists():
                    raise ErrorExists(
                        f"cannot use path '{self.dirpath}' as the output directory, "
                        f"it already contains '{path.name}'")
            self._created_paths = paths
        else:
            try:
                self.dirpath.mkdir(parents=True, exist_ok=True)
                self._created_paths.append(self.dirpath)
                FSHelpers.set_default_perm(self.dirpath)
            except OSError as err:
                raise Error(
                    f"failed to create directory '{self.dirpath}':\n{err}"
                ) from None

        self.csv = _CSV.WritableCSV(self.dp_path)

        if self.info_path.exists():
            if not self.info_path.is_file():
                raise Error(
                    f"path '{self.info_path}' exists, but it is not a regular file"
                )
            # Verify that we are going to be able writing to the info file.
            if not os.access(self.info_path, os.W_OK):
                raise Error(f"cannot access '{self.info_path}' for writing")
        else:
            # Create an empty info file in advance.
            try:
                self.info_path.open("tw+", encoding="utf-8").close()
            except OSError as err:
                raise Error(f"failed to create file '{self.info_path}':\n{err}"
                            ) from None
示例#19
0
    def close(self):
        """Close the statistics collector."""

        acquired = self._close_lock.acquire(timeout=1)  # pylint: disable=consider-using-with
        if not acquired:
            return

        try:
            if getattr(self, "_sock", None):
                if self._start_time:
                    with contextlib.suppress(Exception):
                        self._send_command("stop")
                with contextlib.suppress(Exception):
                    self._send_command("exit")
                with contextlib.suppress(Exception):
                    self._disconnect()
                self._sock = None

            if getattr(self, "_proc", None):
                if self._ssht:
                    with contextlib.suppress(Exception):
                        ProcHelpers.kill_processes(self._ssht_search,
                                                   proc=self._proc)
                    self._ssht = None

                if self._sc:
                    with contextlib.suppress(Exception):
                        ProcHelpers.kill_processes(self._sc_search,
                                                   proc=self._proc)
                    self._sc = None

                # Remove the output directory if we created it.
                if getattr(self, "_outdir_created", None):
                    with contextlib.suppress(Exception):
                        FSHelpers.rm_minus_rf(self.outdir, proc=self._proc)
                    self._outdir_created = None

                self._proc = None
        finally:
            self._close_lock.release()
示例#20
0
    def _init_paths(self):
        """
        Helper function for 'start_stats_collect()' that discovers and initializes various
        paths.
        """

        # Discover path to 'stats-collect'.
        if not self._sc_path:
            self._sc_path = FSHelpers.which("stats-collect", proc=self._proc)

        is_root = ProcHelpers.is_root(proc=self._proc)

        if not self._unshare_path and is_root:
            # Unshare is used for running 'stats-collect' in a separate PID namespace. We do this
            # because when the PID 1 process of the namespace is killed, all other processes get
            # automatically killed. This helps to easily and reliably clean up processes upon exit.
            # But creating a PID namespace requires 'root'.
            self._unshare_path = FSHelpers.which("unshare",
                                                 default=None,
                                                 proc=self._proc)
            if not self._unshare_path:
                _LOG.warning(
                    "the 'unshare' tool is missing%s, it is recommended to have it "
                    "installed. This tool is part of the 'util-linux' project",
                    self._proc.hostmsg)

        if not self._nice_path and is_root:
            # We are trying to run 'stats-collect' with high priority, because we want the
            # statistics to be collected at steady intervals. The 'nice' tool helps changing the
            # priority of the process.
            self._nice_path = FSHelpers.which("nice",
                                              default=None,
                                              proc=self._proc)
            if not self._nice_path:
                _LOG.warning(
                    "the 'nice' tool is missing%s, it is recommended to have it "
                    "installed. This tool is part of the 'coreutils' project",
                    self._proc.hostmsg)
示例#21
0
    def _copy_raw_data(self):
        """Copy raw test results to the output directory."""

        # Paths to the stats directory.
        stats_paths = {}
        # Paths to the logs directory.
        logs_paths = {}
        # Paths to test reports' description files.
        descr_paths = {}

        for res in self.rsts:
            resdir = res.dirpath

            if self.relocatable:
                dstpath = self.outdir / f"raw-{res.reportid}"
                try:
                    FSHelpers.copy_dir(resdir, dstpath, exist_ok=True, ignore=["html-report"])
                    FSHelpers.set_default_perm(dstpath)
                except Error as err:
                    raise Error(f"failed to copy raw data to report directory: {err}") from None

                # Use the path of the copied raw results rather than the original.
                resdir = dstpath

            if res.stats_path.is_dir():
                stats_paths[res.reportid] = resdir / res.stats_path.name
            else:
                stats_paths[res.reportid] = None

            if res.logs_path.is_dir():
                logs_paths[res.reportid] = resdir / res.logs_path.name
            else:
                logs_paths[res.reportid] = None

            if res.descr_path.is_file():
                descr_paths[res.reportid] = resdir / res.descr_path.name

        return stats_paths, logs_paths, descr_paths
示例#22
0
文件: _Nmcli.py 项目: intel/wult
    def __init__(self, proc=None):
        """
        Initialize a class instance for the host associated with the 'proc' object. By default it is
        is going to be the local host, but 'proc' can be used to pass a connected 'SSH' object, in
        which case all operation will be done on the remote host. This object will keep a 'proc'
        reference and use it in various methods.
        """

        if not proc:
            proc = Procs.Proc()

        self._proc = proc
        self._saved_managed = {}

        if not FSHelpers.which("nmcli", default=None, proc=proc):
            raise ErrorNotSupported(
                f"the 'nmcli' tool is not installed{proc.hostmsg}")
示例#23
0
文件: NdlRunner.py 项目: intel/wult
    def __init__(self, proc, netif, res, ndlrunner_bin, ldist=None):
        """
        The class constructor. The arguments are as follows.
          * proc - the 'Proc' or 'SSH' object that defines the host to run the measurements on.
          * netif - the 'NetIface' object of network device used for measurements.
          * res - the 'WORawResult' object to store the results at.
          * ndlrunner_bin - path to the 'ndlrunner' helper.
          * ldist - a pair of numbers specifying the launch distance range in nanoseconds (how far
          *         in the future the delayed network packets should be scheduled). Default is
          *         [5000000, 50000000].
        """

        self._proc = proc
        self._netif = netif
        self._res = res
        self._ndlrunner_bin = ndlrunner_bin
        self._ldist = ldist
        self._ifname = netif.ifname

        self._ndl_lines = None
        self._drv = None
        self._rtd_path = None
        self._ndlrunner = None
        self._progress = None
        self._max_rtd = 0
        self._etfqdisc = None
        self._nmcli = None

        if not self._ldist:
            self._ldist = [5000000, 50000000]

        self._verify_input_args()

        self._progress = _ProgressLine.ProgressLine(period=1)
        self._drv = KernelModule.KernelModule("ndl", proc=proc)

        mntpath = FSHelpers.mount_debugfs(proc=proc)
        self._rtd_path = mntpath.joinpath(f"{self._drv.name}/rtd")
        self._etfqdisc = _ETFQdisc.ETFQdisc(netif, proc=proc)
示例#24
0
    def __init__(self, ifid, proc=None):
        """
        Initialize a class instance network interface corresponding to 'ifid' on the host associated
        with the 'proc' object. The 'ifid' argumen can be either the network interface name or its
        hardware address (e.g., the PCI address of the network card corresponding to the network
        interface).

        By default this class is intialized for the local host, but 'proc' can be used to pass a
        connected 'SSH' object, in which case all operation will be done on the remote host. This
        object will keep a 'proc' reference and use it in various methods.
        """

        if not proc:
            proc = Procs.Proc()

        self._ifid = ifid
        self._proc = proc
        self.ifname = None
        self.hwaddr = None
        self._sysfsbase = None
        self._saved_ip_info = {}
        self._ip_tool_present = None

        sysfsbase = _SYSFSBASE.joinpath(ifid)
        if FSHelpers.isdir(sysfsbase, proc=proc):
            # 'ifid' is a network interface name.
            self.ifname = ifid
            self._sysfsbase = sysfsbase
            self.hwaddr = self._get_hw_addr()
        else:
            # 'ifid' is probably a HW address (e.g., PCI address).
            self.ifname = self._hw_addr_to_ifname()
            if not self.ifname:
                self._raise_iface_not_found()
            self.hwaddr = ifid
            self._sysfsbase = _SYSFSBASE.joinpath(self.ifname)
示例#25
0
def collect_before(outdir, proc):
    """
    Collect information about the SUT (System Under Test) defined by 'proc' (an 'SSH' or 'Proc'
    object). This function is supposed to be called before running a workload on the SUT. It will
    collect various global data like the contents of the '/proc/cmdline' file, the 'lspci' output,
    and store the data in the 'outdir' directory on the SUT.
    """

    FSHelpers.mkdir(outdir, parents=True, exist_ok=True, proc=proc)

    cmdinfos = {}

    cmdinfos["proc_cmdline"] = cmdinfo = {}
    outfile = outdir / "proc_cmdline.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"cat /proc/cmdline > '{outfile}' 2>&1"

    cmdinfos["uname_a"] = cmdinfo = {}
    outfile = outdir / "uname-a.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"uname -a > '{outfile}' 2>&1"

    cmdinfos["dmidecode"] = cmdinfo = {}
    outfile = outdir / "dmidecode.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"dmidecode > '{outfile}' 2>&1"

    cmdinfos["dmidecode_u"] = cmdinfo = {}
    outfile = outdir / "dmidecode-u.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"dmidecode -u > '{outfile}' 2>&1"

    cmdinfos["lspci"] = cmdinfo = {}
    outfile = outdir / "lspci.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"lspci > '{outfile}' 2>&1"

    cmdinfos["lspci_vvv"] = cmdinfo = {}
    outfile = outdir / "lspci-vvv.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"lspci -vvv > '{outfile}' 2>&1"

    cmdinfos["proc_cpuinfo"] = cmdinfo = {}
    outfile = outdir / "proc_cpuinfo.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"cat /proc/cpuinfo > '{outfile}' 2>&1"

    cmdinfos["lsmod"] = cmdinfo = {}
    outfile = outdir / "lsmod.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"lsmod > '{outfile}' 2>&1"

    cmdinfos["lsusb"] = cmdinfo = {}
    outfile = outdir / "lsusb.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"lsusb > '{outfile}' 2>&1"

    cmdinfos["lsusb_v"] = cmdinfo = {}
    outfile = outdir / "lsusb-v.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"lsusb -v > '{outfile}' 2>&1"

    cmdinfos["lsblk"] = cmdinfo = {}
    outfile = outdir / "lsblk.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"lsblk > '{outfile}' 2>&1"

    cmdinfos["sysctl_all"] = cmdinfo = {}
    outfile = outdir / "sysctl-all.raw.txt"
    cmdinfo["outfile"] = outfile
    cmdinfo["cmd"] = f"sysctl --all > '{outfile}' 2>&1"

    _run_commands(cmdinfos, proc)
    _collect_totals(outdir, "before", proc)
示例#26
0
def is_deploy_needed(proc, toolname, helpers=None, pyhelpers=None):
    """
    Wult and other tools require additional helper programs and drivers to be installed on the SUT.
    This function tries to analyze the SUT and figure out whether drivers and helper programs are
    present and up-to-date. Returns 'True' if re-deployment is needed, and 'False' otherwise.

    This function works by simply matching the modification date of sources and binaries for every
    required helper and driver. If sources have later date, then re-deployment is probably needed.
      * proc - the 'Proc' or 'SSH' object associated with the SUT.
      * toolname - name of the tool to check the necessity of deployment for (e.g., "wult").
      o helpers - list of helpers required to be deployed on the SUT.
      o pyhelpers - list of python helpers required to be deployed on the SUT.
    """

    def get_newest_mtime(paths):
        """
        Scan list of paths 'paths', find and return the most recent modification time (mtime) among
        files in 'path' and (in case 'path' is irectory) every file under 'path'.
        """

        newest = 0
        for path in paths:
            if not path.is_dir():
                mtime = path.stat().st_mtime
                if mtime > newest:
                    newest = mtime
            else:
                for root, _, files in os.walk(path):
                    for file in files:
                        mtime = Path(root, file).stat().st_mtime
                        if mtime > newest:
                            newest = mtime

        if not newest:
            paths_str = "\n* ".join([str(path) for path in paths])
            raise Error(f"no files found in the following paths:\n{paths_str}")
        return newest

    def deployable_not_found(what):
        """Called when a helper of driver was not found on the SUT to raise an exception."""

        err = f"{what} was not found on {proc.hostmsg}. Please, run:\n{toolname} deploy"
        if proc.is_remote:
            err += f" -H {proc.hostname}"
        raise Error(err)


    # Build the deploy information dictionary. Start with drivers.
    dinfos = {}
    srcpath = find_app_data("wult", _DRV_SRC_SUBPATH / toolname, appname=toolname)
    dstpaths = []
    for deployable in _get_deployables(srcpath):
        dstpath = _get_module_path(proc, deployable)
        if not dstpath:
            deployable_not_found(f"the '{deployable}' kernel module")
        dstpaths.append(_get_module_path(proc, deployable))
    dinfos["drivers"] = {"src" : [srcpath], "dst" : dstpaths}

    # Add non-python helpers' deploy information.
    if helpers or pyhelpers:
        helpers_deploy_path = get_helpers_deploy_path(proc, toolname)

    if helpers:
        for helper in helpers:
            srcpath = find_app_data("wult", _HELPERS_SRC_SUBPATH / helper, appname=toolname)
            dstpaths = []
            for deployable in _get_deployables(srcpath):
                dstpaths.append(helpers_deploy_path / deployable)
            dinfos[helper] = {"src" : [srcpath], "dst" : dstpaths}

    # Add python helpers' deploy information. Note, python helpers are deployed only to the remote
    # host. The local copy of python helpers comes via 'setup.py'. Therefore, check them only for
    # the remote case.
    if pyhelpers and proc.is_remote:
        for pyhelper in pyhelpers:
            datapath = find_app_data("wult", _HELPERS_SRC_SUBPATH / pyhelper, appname=toolname)
            srcpaths = []
            dstpaths = []
            for deployable in _get_deployables(datapath, Procs.Proc()):
                if datapath.joinpath(deployable).exists():
                    # This case is relevant for running wult from sources - python helpers are
                    # in the 'helpers/pyhelper' directory.
                    srcpath = datapath
                else:
                    # When wult is installed with 'pip', the python helpers go to the "bindir",
                    # and they are not installed to the data directory.
                    srcpath = FSHelpers.which(deployable).parent

                srcpaths += _get_pyhelper_dependencies(srcpath / deployable)
                dstpaths.append(helpers_deploy_path / deployable)
            dinfos[pyhelper] = {"src" : srcpaths, "dst" : dstpaths}

    # We are about to get timestamps for local and remote files. Take into account the possible time
    # shift between local and remote systems.
    time_delta = 0
    if proc.is_remote:
        time_delta = time.time() - RemoteHelpers.time_time(proc=proc)

    # Compare source and destination files' timestamps.
    for what, dinfo in dinfos.items():
        src = dinfo["src"]
        src_mtime = get_newest_mtime(src)
        for dst in dinfo["dst"]:
            try:
                dst_mtime = FSHelpers.get_mtime(dst, proc)
            except ErrorNotFound:
                deployable_not_found(dst)

            if src_mtime > time_delta + dst_mtime:
                src_str = ", ".join([str(path) for path in src])
                _LOG.debug("%s src time %d + %d > dst_mtime %d\nsrc: %s\ndst %s",
                           what, src_mtime, time_delta, dst_mtime, src_str, dst)
                return True

    return False
示例#27
0
def _deploy_drivers(args, proc):
    """Deploy drivers to the SUT represented by 'proc'."""

    drvsrc = find_app_data("wult", _DRV_SRC_SUBPATH/f"{args.toolname}",
                           descr=f"{args.toolname} drivers sources")
    if not drvsrc.is_dir():
        raise Error(f"path '{drvsrc}' does not exist or it is not a directory")

    kver = None
    if not args.ksrc:
        kver = KernelVersion.get_kver(proc=proc)
        if not args.ksrc:
            args.ksrc = Path(f"/lib/modules/{kver}/build")
    else:
        args.ksrc = FSHelpers.abspath(args.ksrc, proc=proc)

    if not FSHelpers.isdir(args.ksrc, proc=proc):
        raise Error(f"kernel sources directory '{args.ksrc}' does not exist{proc.hostmsg}")

    if not kver:
        kver = KernelVersion.get_kver_ktree(args.ksrc, proc=proc)

    _LOG.info("Kernel sources path: %s", args.ksrc)
    _LOG.info("Kernel version: %s", kver)

    if KernelVersion.kver_lt(kver, args.minkver):
        raise Error(f"version of the kernel{proc.hostmsg} is {kver}, and it is not new enough.\n"
                    f"Please, use kernel version {args.minkver} or newer.")

    _LOG.debug("copying the drivers to %s:\n   '%s' -> '%s'", proc.hostname, drvsrc, args.stmpdir)
    proc.rsync(f"{drvsrc}/", args.stmpdir / "drivers", remotesrc=False, remotedst=True)
    drvsrc = args.stmpdir / "drivers"

    kmodpath = Path(f"/lib/modules/{kver}")
    if not FSHelpers.isdir(kmodpath, proc=proc):
        raise Error(f"kernel modules directory '{kmodpath}' does not exist{proc.hostmsg}")

    # Build the drivers on the SUT.
    _LOG.info("Compiling the drivers%s", proc.hostmsg)
    cmd = f"make -C '{drvsrc}' KSRC='{args.ksrc}'"
    if args.debug:
        cmd += " V=1"

    stdout, stderr, exitcode = proc.run(cmd)
    if exitcode != 0:
        msg = proc.cmd_failed_msg(cmd, stdout, stderr, exitcode)
        if "synth_event_" in stderr:
            msg += "\n\nLooks like synthetic events support is disabled in your kernel, enable " \
                   "the 'CONFIG_SYNTH_EVENTS' kernel configuration option."
        raise Error(msg)

    _log_cmd_output(args, stdout, stderr)

    # Deploy the drivers.
    dstdir = kmodpath / _DRV_SRC_SUBPATH
    FSHelpers.mkdir(dstdir, parents=True, exist_ok=True, proc=proc)

    for name in _get_deployables(drvsrc, proc):
        installed_module = _get_module_path(proc, name)
        srcpath = drvsrc / f"{name}.ko"
        dstpath = dstdir / f"{name}.ko"
        _LOG.info("Deploying driver '%s' to '%s'%s", name, dstpath, proc.hostmsg)
        proc.rsync(srcpath, dstpath, remotesrc=True, remotedst=True)

        if installed_module and installed_module.resolve() != dstpath.resolve():
            _LOG.debug("removing old module '%s'%s", installed_module, proc.hostmsg)
            proc.run_verify(f"rm -f '{installed_module}'")

    stdout, stderr = proc.run_verify(f"depmod -a -- '{kver}'")
    _log_cmd_output(args, stdout, stderr)

    # Potentially the deployed driver may crash the system before it gets to write-back data
    # to the file-system (e.g., what 'depmod' modified). This may lead to subsequent boot
    # problems. So sync the file-system now.
    proc.run_verify("sync")
示例#28
0
def _create_standalone_python_script(script, pyhelperdir):
    """
    Create a standalone version of a python script 'script'. The 'pyhelperdir' argument is path to
    the python helper sources directory on the local host. The script hast to be aready installed
    installed on the local host.

    The 'script' depends on wult modules, but this function creates a single file version of it. The
    file will be an executable zip archive containing 'script' and all the wult dependencies it has.

    The resulting standalone script will be saved in 'pyhelperdir' under the 'script'.standalone
    name.
    """

    script_path = FSHelpers.which(script)
    deps = _get_pyhelper_dependencies(script_path)

    # Create an empty '__init__.py' file. We will be adding it to the sub-directories of the
    # depenencies. For example, if one of the dependencies is 'helperlibs/Trivial.py',
    # we'll have to add '__init__.py' to 'wultlibs/' and 'helperlibs'.
    init_path = pyhelperdir / "__init__.py"
    try:
        with init_path.open("w+"):
            pass
    except OSError as err:
        raise Error(f"failed to create file '{init_path}:\n{err}'") from None

    # pylint: disable=consider-using-with
    try:
        fobj = zipobj = None

        # Start creating the stand-alone version of the script: create an empty file and write
        # python shebang there.
        standalone_path = pyhelperdir / f"{script}.standalone"
        try:
            fobj = standalone_path.open("bw+")
            fobj.write("#!/usr/bin/python3\n".encode("utf8"))
        except OSError as err:
            raise Error(f"failed to create and initialize file '{standalone_path}:\n{err}") from err

        # Create a zip archive in the 'standalone_path' file. The idea is that this file will start
        # with python shebang, and then include compressed version the script and its dependencies.
        # Python interpreter is smart and can run such zip archives.
        try:
            zipobj = zipfile.ZipFile(fobj, "w", compression=zipfile.ZIP_DEFLATED)
        except Exception as err:
            raise Error(f"faild to initialize a zip archive from file "
                        f"'{standalone_path}':\n{err}") from err

        # Make 'zipobj' raies exceptions of typ 'Error', so that we do not have to wrap every
        # 'zipobj' operation into 'try/except'.
        zipobj = WrapExceptions.WrapExceptions(zipobj)

        # Put the script to the archive under the '__main__.py' name.
        zipobj.write(script_path, arcname="./__main__.py")

        pkgdirs = set()

        for src in deps:
            # Form the destination path. It is just part of the source path staring from the
            # 'wultlibs' of 'helperlibs' components.
            try:
                idx = src.parts.index("wultlibs")
            except ValueError:
                try:
                    idx = src.parts.index("helperlibs")
                except ValueError:
                    raise Error(f"script '{script}' has bad depenency '{src}' - the path does not "
                                f"have the 'wultlibs' or 'helperlibs' component in it.") from None

            dst = Path(*src.parts[idx:])
            zipobj.write(src, arcname=dst)

            # Collecect all directory paths present in the dependencies. They are all python
            # packages and we'll have to ensure we have the '__init__.py' file in each of the
            # sub-directory.
            pkgdir = dst.parent
            for idx, _ in enumerate(pkgdir.parts):
                pkgdirs.add(Path(*pkgdir.parts[:idx+1]))

        # Ensure the '__init__.py' file is present in all sub-directories.
        zipped_files = {Path(name) for name in zipobj.namelist()}
        for pkgdir in pkgdirs:
            path = pkgdir / "__init__.py"
            if path not in zipped_files:
                zipobj.write(init_path, arcname=pkgdir / "__init__.py")
    finally:
        if zipobj:
            zipobj.close()
        if fobj:
            fobj.close()
    # pylint: enable=consider-using-with

    # Make the standalone file executable.
    try:
        mode = standalone_path.stat().st_mode | 0o777
        standalone_path.chmod(mode)
    except OSError as err:
        raise Error(f"cannot change '{standalone_path}' file mode to {oct(mode)}:\n{err}") from err
示例#29
0
def _deploy_helpers(args, proc):
    """Deploy helpers (including python helpers) to the SUT represented by 'proc'."""

    # Python helpers need to be deployd only to a remote host. The local host already has them
    # deployed by 'setup.py'.
    if not proc.is_remote:
        args.pyhelpers = []

    helpers = args.helpers + args.pyhelpers
    if not helpers:
        return

    # We assume all helpers are in the same base directory.
    helper_path = _HELPERS_SRC_SUBPATH/f"{helpers[0]}"
    helpersrc = find_app_data("wult", helper_path, descr=f"{args.toolname} helper sources")
    helpersrc = helpersrc.parent

    if not helpersrc.is_dir():
        raise Error(f"path '{helpersrc}' does not exist or it is not a directory")

    # Make sure all helpers are available.
    for helper in helpers:
        helperdir = helpersrc / helper
        if not helperdir.is_dir():
            raise Error(f"path '{helperdir}' does not exist or it is not a directory")

    # Copy python helpers to the temporary directory on the controller.
    for pyhelper in args.pyhelpers:
        srcdir = helpersrc / pyhelper
        _LOG.debug("copying helper %s:\n  '%s' -> '%s'",
                   pyhelper, srcdir, args.ctmpdir)
        Procs.Proc().rsync(f"{srcdir}", args.ctmpdir, remotesrc=False, remotedst=False)

    # Build stand-alone version of every python helper.
    for pyhelper in args.pyhelpers:
        _LOG.info("Building a stand-alone version of '%s'", pyhelper)
        basedir = args.ctmpdir / pyhelper
        deployables = _get_deployables(basedir)
        for name in deployables:
            _create_standalone_python_script(name, basedir)

    # And copy the "standoline-ized" version of python helpers to the SUT.
    if proc.is_remote:
        for pyhelper in args.pyhelpers:
            srcdir = args.ctmpdir / pyhelper
            _LOG.debug("copying helper '%s' to %s:\n  '%s' -> '%s'",
                       pyhelper, proc.hostname, srcdir, args.stmpdir)
            proc.rsync(f"{srcdir}", args.stmpdir, remotesrc=False, remotedst=True)

    # Copy non-python helpers to the temporary directory on the SUT.
    for helper in args.helpers:
        srcdir = helpersrc/ helper
        _LOG.debug("copying helper '%s' to %s:\n  '%s' -> '%s'",
                   helper, proc.hostname, srcdir, args.stmpdir)
        proc.rsync(f"{srcdir}", args.stmpdir, remotesrc=False, remotedst=True)

    deploy_path = get_helpers_deploy_path(proc, args.toolname)

    # Build the non-python helpers on the SUT.
    if args.helpers:
        for helper in args.helpers:
            _LOG.info("Compiling helper '%s'%s", helper, proc.hostmsg)
            helperpath = f"{args.stmpdir}/{helper}"
            stdout, stderr = proc.run_verify(f"make -C '{helperpath}'")
            _log_cmd_output(args, stdout, stderr)

    # Make sure the the destination deployment directory exists.
    FSHelpers.mkdir(deploy_path, parents=True, exist_ok=True, proc=proc)

    # Deploy all helpers.
    _LOG.info("Deploying helpers to '%s'%s", deploy_path, proc.hostmsg)

    helpersdst = args.stmpdir / "helpers_deployed"
    _LOG.debug("deploying helpers to '%s'%s", helpersdst, proc.hostmsg)

    for helper in helpers:
        helperpath = f"{args.stmpdir}/{helper}"

        cmd = f"make -C '{helperpath}' install PREFIX='{helpersdst}'"
        stdout, stderr = proc.run_verify(cmd)
        _log_cmd_output(args, stdout, stderr)

        proc.rsync(str(helpersdst) + "/bin/", deploy_path, remotesrc=True, remotedst=True)
示例#30
0
    def _fetch_stat_collect_socket_path(self):
        """
        This is a helper for '_start_stats_collect()'. When 'stats-collect' starts, it prints unix
        socket path it is listening for connections on. This functions parses 'stats-collect' output
        and fetches the socket path.
        """

        # Spend max. 5 secs waiting for 'stats-collect' to startup and print the socket file path.
        attempts = 0
        while not self._uspath and attempts < 5:
            _, _, exitcode = self._sc.wait_for_cmd(timeout=1,
                                                   capture_output=False)
            attempts += 1

            logdata = logerr = None
            try:
                with self._proc.open(self._logpath, "r") as fobj:
                    logdata = fobj.read()
            except Error as logerr:
                pass

            if exitcode is not None:
                msg = self._proc.cmd_failed_msg(self._cmd, logdata, None,
                                                exitcode)
                if not logdata:
                    msg += f"\nCheck '{self._logpath}'{self._proc.hostmsg} for details"
                raise Error(msg)

            if not logdata:
                # The log file has not been created yet or has no data yet.
                continue

            # Search for the socket file path in the log.
            pfx = "Listening on Unix socket "
            for line in logdata.splitlines():
                if line.startswith(pfx):
                    self._uspath = line.strip()[len(pfx):]
                    break

        if self._uspath:
            _LOG.debug("stats-collect PID: %d, socket file path: %s",
                       self._sc.pid, self._uspath)

            self._sc_id = f"{self._proc.hostname}:{self._uspath}"
            msg = f"stats-collect (PID {self._sc.pid}) that reported it is listening on Unix " \
                  f"socket {self._uspath}{self._proc.hostmsg}"

            try:
                if FSHelpers.issocket(Path(self._uspath), proc=self._proc):
                    return
            except Error as err:
                msg = f"{msg}\nBut checking the file path failed: {err}"
            else:
                msg = f"{msg}\nBut this is not a Unix socket file"
        else:
            # Failed to extract socket file path.
            if exitcode is None:
                with contextlib.suppress(Error):
                    ProcHelpers.kill_pids([
                        self._sc.pid,
                    ],
                                          kill_children=True,
                                          must_die=False,
                                          proc=self._proc)

            msg = f"failed to extract socket file path from 'stats-collect' log\n" \
                  f"The command was: {self._cmd}\n" \
                  f"The log is in '{self._logpath}'{self._proc.hostmsg}"

        if logerr:
            msg += f"\nFailed to read the log file: {logerr}"
        elif logdata:
            msg += f"\nLog file contents was:\n{logdata}"

        raise Error(msg)