def get_kver_ktree(ktree, split=False, proc=None, makecmd=None): """ Get version of the kernel in the kernel sources directory 'ktree'. The 'ktree' directory must contain an already configured kernel or it should be path to the kernel build directory if the kernel was compliled out of tree (make O=<ktree>). By default this function runs the 'make -C <ktree> --quiet -- kernelrelease' command to get the kernel version. However, you can use the 'makecmd' argument to verride the 'make -C <ktree>' part of it. The 'split' and 'proc' arguments are the same as in 'get_kver()'. """ if not proc: proc = Procs.Proc() if not makecmd: makecmd = "make -C '%s'" % ktree cmd = makecmd + " --quiet -- kernelrelease" try: kver = proc.run_verify(cmd)[0].strip() except proc.Error as err: raise Error("cannot detect kernel version in '%s':\n%s\nMake sure kernel sources are " "configured." % (ktree, err)) if split: return split_kver(kver) return kver
def mount_points(proc=None): """ This generator parses '/proc/mounts' and for each mount point yields the following named tuples: * device - name of the mounted device * mntpoint - mount point * fstype - file-system type * options - list of options By default this function operates on the local host, but the 'proc' argument can be used to pass a connected 'SSH' object in which case this function will operate on the remote host. """ mounts_file = "/proc/mounts" mntinfo = namedtuple("mntinfo", ["device", "mntpoint", "fstype", "options"]) if not proc: proc = Procs.Proc() try: with proc.open(mounts_file, "r") as fobj: contents = fobj.read() except OSError as err: raise Error(f"cannot read '{mounts_file}': {err}") for line in contents.splitlines(): if not line: continue device, mntpoint, fstype, options, _ = line.split(maxsplit=4) yield mntinfo(device, mntpoint, fstype, options.split(","))
def rsync(self, src, dst, opts="rlptD", remotesrc=True, remotedst=True): """ Copy data from path 'src' to path 'dst' using 'rsync' with options specified in 'opts'. By default the 'src' and 'dst' path is assumed to be on the remote host, but the 'rmotesrc' and 'remotedst' arguments can be set to 'False' to specify local source and/or destination paths. """ cmd = f"rsync -{opts}" if remotesrc and remotedst: proc = self else: ssh_opts = f"-o \"Port={self.port}\" -o \"User={self.username}\"" if self.privkeypath: ssh_opts += f" -o \"IdentityFile={self.privkeypath}\"" proc = Procs.Proc() cmd += f" -e 'ssh {ssh_opts}'" if remotesrc: src = f"{self.hostname}:{src}" if remotedst: dst = f"{self.hostname}:{dst}" try: proc.run_verify(f"{cmd} -- '{src}' '{dst}'") except proc.Error as err: raise Error(f"failed to copy files '{src}' to '{dst}':\n{err}")
def mkdir(dirpath: Path, parents: bool = False, exist_ok: bool = False, proc=None): """ Create a directory. If 'parents' is 'True', the parent directories are created as well. If the directory already exists, this function raises an exception if 'exist_ok' is 'True', and it returns without an error if 'exist_ok' is 'False'. By default this function operates on the local host, but the 'proc' argument can be used to pass a connected 'SSH' object in which case this function will operate on the remote host. """ if not proc: proc = Procs.Proc() if shell_test(dirpath, "-e", proc=proc): if exist_ok: return raise Error(f"path '{dirpath}' already exists{proc.hostmsg}") if proc.is_remote: cmd = "mkdir" if parents: cmd += " -p" cmd += f" -- '{dirpath}'" proc.run_verify(cmd) else: try: dirpath.mkdir(parents=parents, exist_ok=exist_ok) except OSError as err: raise Error(f"failed to create directory '{dirpath}':\n{err}")
def get_path_pairs(proc, toolname, helperpath): """ Yield paths for 'toolname' driver and helpertool source code and deployables. Arguments are same as in 'is_deploy_needed()'. """ lproc = Procs.Proc() for path, is_drv in [(_DRV_SRC_SUBPATH, True), (_HELPERS_SRC_SUBPATH, False)]: srcpath = FSHelpers.search_for_app_data(toolname, path / toolname, default=None) # Some tools may not have helpers. if not srcpath: continue for deployable in _get_deployables(srcpath, lproc): deploypath = None # Deployable can be driver module or helpertool. if is_drv: deploypath = _get_module_path(proc, deployable) else: if helperpath and helperpath.name == deployable: deploypath = helperpath else: deploypath = get_helpers_deploy_path(proc, toolname) deploypath = Path(deploypath, "bin", deployable) yield srcpath, deploypath
def find_processes(regex: str, proc=None): """ Find all processes which match the 'regex' regular expression on the host defined by 'proc'. The regular expression is matched against the process executable name + command-line arguments. By default this function operates on the local host, but the 'proc' argument can be used to pass a connected 'SSH' object in which case this function will operate on the remote host. Returns a list of tuples containing the PID and the command line. """ if not proc: proc = Procs.Proc() cmd = "ps axo pid,args" stdout, stderr = proc.run_verify(cmd, join=False) if len(stdout) < 2: raise Error(f"no processes found at all{proc.hostmsg}\nExecuted this command:\n{cmd}\n" f"stdout:\n{stdout}\nstderr:{stderr}\n") procs = [] for line in stdout[1:]: pid, comm = line.strip().split(" ", 1) pid = int(pid) if proc.hostname == "localhost" and pid == Trivial.get_pid(): continue if re.search(regex, comm): procs.append((int(pid), comm)) return procs
def mktemp(prefix: str = None, tmpdir: Path = None, proc=None): """ Create a temporary directory by running the 'mktemp' tool. The 'prefix' argument can be used to specify the temporary directory name prefix. The 'tmpdir' argument path to the base directory where the temporary directory should be created. By default this function operates on the local host, but the 'proc' argument can be used to pass a connected 'SSH' object in which case this function will operate on the remote host. """ if not proc: proc = Procs.Proc() cmd = "mktemp -d -t '" if prefix: cmd += prefix cmd += "XXXXXX'" if tmpdir: cmd += " -p '{tmpdir}'" path, _ = proc.run_verify(cmd) path = path.strip() if not path: raise Error( f"cannot create a temporary directory{proc.hostmsg}, the following command " f"returned an empty string:\n{cmd}") _LOG.debug("created a temporary directory '%s'%s", path, proc.hostmsg) return Path(path)
def get_kver_bin(path, split=False, proc=None): """ Get version of a kernel binary at 'path'. The 'split' and 'proc' arguments are the same as in 'get_kver()'. """ if not proc: proc = Procs.Proc() cmd = f"file -- {path}" stdout = proc.run_verify(cmd)[0].strip() msg = f"ran this command: {cmd}, got output:\n{stdout}" matchobj = re.match(r".* Linux kernel.* executable .*", stdout) if not matchobj: raise Error(f"file at '{path}'{proc.hostmsg} is not a Linux kernel binary file\n{msg}") matchobj = re.match(r".* version ([^ ]+) .*", stdout) if not matchobj: raise Error(f"{msg}\nFailed to find kernel version in the output.") kver = matchobj.group(1) if split: return split_kver(kver) return kver
def lsdir(path: Path, must_exist: bool = True, proc=None): """ For each directory entry in 'path', yield the ('name', 'path', 'type') tuple, where 'name' is the direntry name, 'path' is full directory entry path, and 'type' is the file type indicator (see 'ls -F' for details). If 'path' does not exist, this function raises an exception. However, this behavior can be changed with the 'must_exist' argument. If 'must_exist' is 'False, this function just returns and does not yield anything. By default this function operates on the local host, but the 'proc' argument can be used to pass a connected 'SSH' object in which case this function will operate on the remote host. """ if not proc: proc = Procs.Proc() if not must_exist and not exists(path, proc=proc): return stdout, _ = proc.run_verify(f"ls -c -1 --file-type -- '{path}'", join=False) if not stdout: return for entry in (entry.strip() for entry in stdout): ftype = "" if entry[-1] in "/=>@|": ftype = entry[-1] entry = entry[:-1] yield (entry, Path(f"{path}/{entry}"), ftype)
def _scp(self, src, dst): """ Helper that copies 'src' to 'dst' using 'scp'. File names should be already quoted. The remote path should use double quoting, otherwise 'scp' fails if path contains symbols like ')'. """ opts = f"-o \"Port={self.port}\" -o \"User={self.username}\"" if self.privkeypath: opts += f" -o \"IdentityFile={self.privkeypath}\"" cmd = f"scp -r {opts}" try: Procs.run_verify(f"{cmd} -- {src} {dst}") except Procs.Error as err: raise Error(f"failed to copy files '{src}' to '{dst}':\n{err}")
def get_proc(args, hostname): """ Returns and "SSH" object or the 'Procs' object depending on 'hostname'. """ if hostname == "localhost": return Procs.Proc() return SSH.SSH(hostname=hostname, username=args.username, privkeypath=args.privkey, timeout=args.timeout)
def __new__(cls, *_, **kwargs): """ This method makes sure that when users creates an 'SSH' object with 'None' 'hostname', we create an instance of 'Proc' class instead of an instance of 'SSH' class. The two classes have similar API. """ if "hostname" not in kwargs or kwargs["hostname"] is None: return Procs.Proc() return super().__new__(cls)
def __init__(self, proc=None): """Class constructor.""" if not proc: proc = Procs.Proc() self._proc = proc self._lspci_bin = "lspci" if not FSHelpers.which(self._lspci_bin, default=None, proc=proc): raise ErrorNotSupported( f"the '{self._lspci_bin}' tool is not installed{proc.hostmsg}")
def __init__(self, proc=None): """ The class constructor. The 'proc' argument is a 'Proc' or 'SSH' object that defines the host to create a class instance for (default is the local host). This object will keep a 'proc' reference and use it in various methods. """ if not proc: proc = Procs.Proc() self._proc = proc self.cpugeom = None self._lscpu_cache = None
def __init__(self, proc=None): """ Initialize a class instance for the host associated with the 'proc' object. By default it is is going to be the local host, but 'proc' can be used to pass a connected 'SSH' object, in which case all operation will be done on the remote host. This object will keep a 'proc' reference and use it in various methods. """ if not proc: proc = Procs.Proc() self._proc = proc self._saved_managed = OrderedDict() if not FSHelpers.which("nmcli", default=None, proc=proc): raise ErrorNotSupported( f"the 'nmcli' tool is not installed{proc.hostmsg}")
def get_kver(split=False, proc=None): """ Return version of the kernel running on the host associated with the 'proc' object. By default it is the local system. But one can pass a connect 'SSH' object via 'proc' in order to get the version of the kernel running on a remote system. By default this function returns the kernel version string (e.g., "4.18.1-build0"), but if 'split' is 'True', this function returns the split kernel version (refer to 'split_kver()' for the details). """ if not proc: proc = Procs.Proc() kver = proc.run_verify("uname -r")[0].strip() if split: return split_kver(kver) return kver
def shell_test(path: Path, opt: str, proc=None): """ Run a shell test against path 'path'. The 'opt' argument specifies the the 'test' command options. For example, pass '-f' to run 'test -f' which returns 0 if 'path' exists and is a regular file and 1 otherwise. By default this function operates on the local host, but the 'proc' argument can be used to pass a connected 'SSH' object in which case this function will operate on the remote host. """ if not proc: proc = Procs.Proc() cmd = f"test {opt} '{path}'" stdout, stderr, exitcode = proc.run(cmd) if stdout or stderr or exitcode not in (0, 1): raise Error(proc.cmd_failed_msg(cmd, stdout, stderr, exitcode)) return exitcode == 0
def read(path, default=_RAISE, proc=None): """ Read file 'path'. If it fails return 'default' or rise an exception if the 'default' value was not provided. By default this function operates on the local host, but the 'proc' argument can be used to pass a connected 'SSH' object in which case this function will operate on the remote host. """ if not proc: proc = Procs.Proc() try: with proc.open(path, "r") as fobj: val = fobj.read().strip() except Error as err: if default is _RAISE: raise Error(f"failed to read file '{path}'{proc.hostmsg}:\n{err}") return default return val
def kill_processes(regex: str, sig: str = "SIGTERM", log: bool = False, name: str = None, proc=None): """ Kill or signal all processes matching the 'regex' regular expression on the host defined by 'proc'. The regular expression is matched against the process executable name + command-line arguments. By default the processes are killed (SIGTERM), but you can specify any signal either by name or by number. If 'log' is 'True', then this function also prints a message which includes the PIDs of the processes which are going to be killed. The 'name' argument is a human readable name of the processes which are being killed - this name will be part of the printed message. By default this function operates on the local host, but the 'proc' argument can be used to pass a connected 'SSH' object in which case this function will operate on the remote host. Returns the list of found and killed processes. """ if not proc: proc = Procs.Proc() procs = find_processes(regex, proc=proc) if not procs: return [] if not name: name = "the following process(es)" pids = [pid for pid, _ in procs] if log: pids_str = ", ".join([str(pid) for pid in pids]) _LOG.info("Sending '%s' signal to %s%s, PID(s): %s", sig, name, proc.hostmsg, pids_str) killing = _is_sigterm(sig) or _is_sigkill(sig) kill_pids(pids, sig=sig, kill_children=killing, proc=proc) return procs
def __init__(self, ifid, proc=None): """ Initialize a class instance network interface corresponding to 'ifid' on the host associated with the 'proc' object. The 'ifid' argumen can be either the network interface name or its hardware address (e.g., the PCI address of the network card corresponding to the network interface). By default this class is intialized for the local host, but 'proc' can be used to pass a connected 'SSH' object, in which case all operation will be done on the remote host. This object will keep a 'proc' reference and use it in various methods. """ if not proc: proc = Procs.Proc() self._ifid = ifid self._proc = proc self.ifname = None self.hwaddr = None self._sysfsbase = None self._saved_ip_info = {} self._ip_tool_present = None sysfsbase = _SYSFSBASE.joinpath(ifid) if FSHelpers.isdir(sysfsbase, proc=proc): # 'ifid' is a network interface name. self.ifname = ifid self._sysfsbase = sysfsbase self.hwaddr = self._get_hw_addr() else: # 'ifid' is probably a HW address (e.g., PCI address). self.ifname = self._hw_addr_to_ifname() if not self.ifname: self._raise_iface_not_found() self.hwaddr = ifid self._sysfsbase = _SYSFSBASE.joinpath(self.ifname)
def kill_pids(pids, sig: str = "SIGTERM", kill_children: bool = False, must_die: bool = False, proc=None): """ This function kills or signals processes with PIDs in 'pids' on the host defined by 'procs'. The 'pids' argument can be a collection of PID numbers ('int' or 'str' types) or a single PID number. By default the processes are killed (SIGTERM), but you can specify any signal either by name or by number. The 'children' and 'must_die' arguments must only be used when killing processes (SIGTERM or SIGKILL). The 'children' argument controls whether this function should also try killing the children. If the 'must_die' argument is 'True', then this function also verifies that the process(es) did actually die, and if any of them did not die, it raises an exception. By default this function operates on the local host, but the 'proc' argument can be used to pass a connected 'SSH' object in which case this function will operate on the remote host. """ def collect_zombies(proc): """In case of a local process we need to 'waitpid()' the children.""" if not proc.is_remote: with contextlib.suppress(OSError): os.waitpid(0, os.WNOHANG) if not proc: proc = Procs.Proc() if not pids: return if not Trivial.is_iterable(pids): pids = (pids, ) pids = [str(int(pid)) for pid in pids] if sig is None: sig = "SIGTERM" else: sig = str(sig) killing = _is_sigterm(sig) or _is_sigkill(sig) if (kill_children or must_die) and not killing: raise Error(f"'children' and 'must_die' arguments cannot be used with '{sig}' signal") if kill_children: # Find all the children of the process. for pid in pids: children, _, exitcode = proc.run(f"pgrep -P {pid}", join=False) if exitcode != 0: break pids += [child.strip() for child in children] pids_spc = " ".join(pids) pids_comma = ",".join(pids) _LOG.debug("sending '%s' signal to the following process%s: %s", sig, proc.hostmsg, pids_comma) try: proc.run_verify(f"kill -{sig} -- {pids_spc}") except Error as err: if not killing: raise Error(f"failed to send signal '{sig}' to PIDs '{pids_comma}'{proc.hostmsg}:\n" f"{err}") # Some error happened on the first attempt. We've seen a couple of situations when this # happens. # 1. Most often, a PID does not exist anymore, the process exited already (race condition). # 2 One of the processes in the list is owned by a different user (e.g., root). Let's call # it process A. We have no permissions to kill process A, but we can kill other processes # in the 'pids' list. But often killing other processes in the 'pids' list will make # process A exit. This is why we do not error out just yet. # # So the strategy is to do the second signal sending round and often times it happens # without errors, and all the processes that we want to kill just go away. if not killing: return # Give the processes up to 4 seconds to die. timeout = 4 start_time = time.time() while time.time() - start_time <= timeout: collect_zombies(proc) _, _, exitcode = proc.run(f"kill -0 -- {pids_spc}") if exitcode != 0: return time.sleep(0.2) if _is_sigterm(sig): # Something refused to die, try SIGKILL. try: proc.run_verify(f"kill -9 -- {pids_spc}") except Error as err: # It is fine if one of the processes exited meanwhile. if "No such process" not in str(err): raise collect_zombies(proc) if not must_die: return # Give the processes up to 4 seconds to die. timeout = 4 start_time = time.time() while time.time() - start_time <= timeout: collect_zombies(proc) _, _, exitcode = proc.run(f"kill -0 -- {pids_spc}") if exitcode != 0: return time.sleep(0.2) # Something refused to die, find out what. msg, _, = proc.run_verify(f"ps -f {pids_spc}", join=False) if len(msg) < 2: msg = pids_comma raise Error(f"one of the following processes{proc.hostmsg} did not die after 'SIGKILL': {msg}")