def validate_cpunum(cpunum, proc=None): """ Validate CPU number 'cpunum'. The optional 'cpugeom' argument is the CPU geometry dictionary generated by 'CPUInfo.get_cpu_geometry()' function. If 'proc' is not provided, this function just checks that 'cpunum' is a positive integer number, nothing else. If 'proc' is provided, then this function discovers CPU count on the host associated with 'proc', and verifies that 'cpunum' does not exceed the host CPU count and the CPU is online. Note, 'proc' shoulc be an 'SSH' or 'Proc' object. """ if not Trivial.is_int(cpunum) or int(cpunum) < 0: raise Error(f"bad CPU number '{cpunum}', should be a positive integer") cpunum = int(cpunum) if proc: with CPUInfo.CPUInfo(proc=proc) as cpuinfo: cpugeom = cpuinfo.get_cpu_geometry() if cpunum in cpugeom["offcpus"]: raise Error(f"CPU '{cpunum}'{proc.hostmsg} is offline") if cpunum not in cpugeom["cpus"]: raise Error(f"CPU '{cpunum}' does not exist{proc.hostmsg}") return cpunum
def _get_cstates_info(self, cpus, indexes, ordered): """Implements 'get_cstates_info()'.""" indexes_regex = cpus_regex = "[[:digit:]]+" if cpus is not None: cpus_regex = "|".join([str(cpu) for cpu in cpus]) if indexes is not None: indexes_regex = "|".join([str(index) for index in indexes]) cmd = fr"find '{self._sysfs_base}' -type f -regextype posix-extended " \ fr"-regex '.*cpu({cpus_regex})/cpuidle/state({indexes_regex})/[^/]+' " \ fr"-exec printf '%s' {{}}: \; -exec grep . {{}} \;" stdout, _ = self._proc.run_verify(cmd, join=False) if not stdout: raise Error( f"failed to find C-states information in '{self._sysfs_base}'" f"{self._proc.hostmsg}") if ordered: stdout = sorted(stdout) regex = re.compile( r".+/cpu([0-9]+)/cpuidle/state([0-9]+)/(.+):([^\n]+)") info = {} index = prev_index = cpu = prev_cpu = None for line in stdout: matchobj = re.match(regex, line) if not matchobj: raise Error( f"failed to parse the follwoing line from file in '{self._sysfs_base}'" f"{self._proc.hostmsg}:\n{line.strip()}") cpu = int(matchobj.group(1)) index = int(matchobj.group(2)) key = matchobj.group(3) val = matchobj.group(4) if Trivial.is_int(val): val = int(val) if prev_cpu is None: prev_cpu = cpu if prev_index is None: prev_index = index if cpu != prev_cpu or index != prev_index: info["cpu"] = prev_cpu info["index"] = prev_index yield info prev_cpu = cpu prev_index = index info = {} info[key] = val info["cpu"] = prev_cpu info["index"] = prev_index yield info
def _get_level(self, start, end, nums=None): """ Returns list of level 'end' values belonging to level 'start' for each ID in 'nums'. Returns all values if 'nums' is None or "all". Offline CPUs are ignored. """ if start not in LEVELS or end not in LEVELS: levels = ", ".join(LEVELS) raise Error(f"bad levels '{start}','{end}', use: {levels}") start_idx = LEVELS.index(start) end_idx = LEVELS.index(end) if start_idx > end_idx: raise Error(f"bad level order, cannot get {end}s from level '{start}'") items = {} for line in self._get_lscpu(): if line.startswith("#"): continue # Each line has comma-separated integers for socket, node, core and cpu. For example: # 1,1,9,61,Y. In case of offline CPU, the final element is going to be "N". line = line.strip().split(",") if line[-1] != "Y": # Skip non-online CPUs. continue line = [int(val) for val in line[0:-1]] if line[start_idx] in items.keys(): items[line[start_idx]].append(line[end_idx]) else: items[line[start_idx]] = [line[end_idx]] # So now 'items' is a dictionary with keys being the 'start' level elements and values being # lists of the 'end' level elements. # For example, suppose we are looking for CPUs in packages, and the system has 2 packages, # each containing 8 CPUs. The 'items' dictionary will look like this: # items[0] = {0, 2, 4, 6, 8, 10, 12, 14} # items[1] = {1, 3, 6, 7, 9, 11, 13, 15} # In this example, package 0 includes CPUs with even numbers, and package 1 includes CPUs # with odd numbers. if not nums or nums == "all": nums = list(items.keys()) result = [] for num in nums: if not Trivial.is_int(num): raise Error(f"bad {start} number '{num}', should be an integer") num = int(num) if num not in items: items_str = ", ".join(str(key) for key in items) raise Error(f"{start} {num} does not exist{self._proc.hostmsg}, use: {items_str}") result += items[num] return Trivial.list_dedup(result)
def is_root(proc=None): """ If 'proc' is 'None' or a 'Proc' object, return 'True' if current process' user name is 'root' and 'False' if current process' user name is not 'root'. If 'proc' is an 'SSH' object, returns 'True' if the SSH user has 'root' permissions on the remote host, otherwise returns 'False'. """ if not proc or not proc.is_remote: return Trivial.is_root() stdout, _ = proc.run_verify("id -u") stdout = stdout.strip() if not Trivial.is_int(stdout): raise Error("unexpected output from 'id -u' command, expected an integer, got:\n{stdout}") return int(stdout) == 0
def _get_cstate_indexes(self, cpu): """Yield tuples of of C-state indexes and sysfs paths for cpu number 'cpu'.""" basedir = self._sysfs_base / f"cpu{cpu}" / "cpuidle" name = None for name, path, typ in FSHelpers.lsdir(basedir, proc=self._proc): errmsg = f"unexpected entry '{name}' in '{basedir}'{self._proc.hostmsg}" if typ != "/" or not name.startswith("state"): raise Error(errmsg) index = name[len("state"):] if not Trivial.is_int(index): raise Error(errmsg) yield int(index), Path(path) if name is None: raise Error(f"C-states are not supported{self._proc.hostmsg}")
def set_post_trigger(self, path, trange=None): """ Configure the post-trigger - a program that has to be executed after a datapoint is collected. The arguments are as follows. * path - path to the executable program to run. The program will be executed with the '--latency <value>' option, where '<value>' is the observed wake latency value in nanoseconds. * trange - the post-trigger range. By default, the trigger program is executed on every datapoint. But if the trigger range is provided, the trigger program will be executed only when wake latency is in trigger range. """ if not FSHelpers.isexe(path, proc=self._proc): raise Error( f"post-trigger program '{path}' does not exist{self._proc.hostmsg} or it " f"is not an executable file") self._post_trigger = path if trange is not None: vals = Trivial.split_csv_line(trange) for idx, val in enumerate(vals): if not Trivial.is_int(val): raise Error( f"bad post-trigger range value '{val}', should be an integer " f"amount of nanoseconds") vals[idx] = Trivial.str_to_num(val, default=None) if vals[idx] < 0: raise Error( f"bad post trigger range value '{vals[idx]}', should be greater or " f"equal to zero") if len(vals) != 2: raise Error( f"bad post trigger range '{trange}', it should include 2 numbers" ) if vals[1] - vals[0] < 0: raise Error( f"bad post trigger range '{trange}', first number cannot be greater " f"than the second number") self._post_trigger_range = vals
def _normalize_cstates(self, cstates): """ Some methods accept the C-states to operate on as a string or a list. There may be C-state names or indexes in the list. This method turns the user input into a list of C-state indexes and returns this list. """ if isinstance(cstates, int): cstates = str(cstates) if cstates is not None: if isinstance(cstates, str): cstates = Trivial.split_csv_line(cstates, dedup=True) indexes = [] for cstate in cstates: if not Trivial.is_int(cstate): cstate = self._name2idx(cstate) indexes.append(int(cstate)) cstates = indexes return cstates
def get_dpcnt(res, dpcnt): """ This helper function validates number of datapoints the user requested to collect ('dpcnt'). It also looks at how many datapoints are already present in the 'res' object (represents a raw test result) and returns the number datapoints to collect in order for 'rest' to end up with 'dpcnt' datapoints. """ if not Trivial.is_int(dpcnt) or int(dpcnt) <= 0: raise Error( f"bad datapoints count '{dpcnt}', should be a positive integer") dpcnt = int(dpcnt) - res.csv.initial_rows_cnt if dpcnt <= 0: _LOG.info("Raw test result at '%s' already includes %d datapoints", res.dirpath, res.csv.initial_rows_cnt) _LOG.info("Nothing to collect") return 0 return dpcnt
def parse_int_list(nums, ints=False): """ Turn a string contaning a comma-separated list of numbers and ranges into a list of numbers and return it. For example, a string like "0,1-3,7" would become ["0", "1", "2", "3", "7"]. The 'ints' argument controls whether the resulting list should contain strings or integers. """ if nums is None: return None if isinstance(nums, int): nums = str(nums) if isinstance(nums, str): nums = Trivial.split_csv_line(nums) nums_set = set() for elts in nums: elts = str(elts) if "-" in elts: elts = Trivial.split_csv_line(elts, sep="-") if len(elts) != 2: raise Error("bad range '%s', should be two integers separated by '-'" % elts) else: elts = [elts] for elt in elts: if not Trivial.is_int(elt): raise Error("bad number '%s', should be an integer" % elt) if len(elts) > 1: if int(elts[0]) > int(elts[1]): raise Error("bad range %s-%s, the first number should be smaller than thesecond" % (elts[0], elts[1])) nums_set.update([str(elt) for elt in range(int(elts[0]), int(elts[1]) + 1)]) else: nums_set.add(elts[0]) result = sorted([int(num) for num in nums_set]) if not ints: result = [str(num) for num in result] return result
def _get_latency(self, dp): """ Read the next latency data line from the 'ndlrunner' helper, parse it, and save the result in the 'dp' dictionary. """ line = self._get_line(prefix="datapoint") line = Trivial.split_csv_line(line) if len(line) != 2: msg = self._unexpected_line_error_prefix(line) raise Error( f"{msg}\nExpected 2 comma-separated integers, got {len(line)}") for val in line: if not Trivial.is_int(val): msg = self._unexpected_line_error_prefix(line) raise Error( f"{msg}\n: Expected 2 comma-separated integers, got a non-integer " f"'{val}'") dp["RTD"] = int(line[0]) dp["LDist"] = int(line[1])
def set_post_trigger(self, path, thresh=None): """ Configure the post-trigger - a program that has to be executed after a datapoint is collected. The arguments are as follows. * path - path to the executable program to run. The program will be executed with the '--latency <value>' option, where '<value>' is the observed latency value in nanoseconds. * thresh - the post-trigger threshold. By default, the trigger program is executed on evey datapoint. But if a threshold is provided, the trigger program will be executed only when latency exceeds the threshold. """ if not FSHelpers.isexe(path, proc=self._proc): raise Error( f"file '{path}' does not exist or it is not an executalbe file" ) self._post_trigger = path if thresh is not None: if not Trivial.is_int(thresh): raise Error( f"bad post-trigger threshold value '{thresh}', should be an integer " f"amount of nanoseconds") self._post_trigger_thresh = int(thresh)
def prepare(self): """Prepare to start measurements.""" # Ensure the kernel is fresh enough. kver = KernelVersion.get_kver(proc=self._proc) if KernelVersion.kver_lt(kver, "5.1-rc1"): raise Error( f"version of the running kernel{self._proc.hostmsg} is {kver}, but it " f"does not support the ETF qdisc.\nPlease, use kernel version 5.1 or " f"newer") try: self._nmcli = _Nmcli.Nmcli(proc=self._proc) except ErrorNotSupported: pass else: # We have to configure the I210 network interface in a special way, but if it is managed # by NetworkManager, the configuration may get reset at any point. Therefore, detach the # network interface from NetworkManager. _LOG.info("Detaching network interface '%s' from NetworkManager%s", self._ifname, self._proc.hostmsg) self._nmcli.unmanage(self._ifname) # Ensure the interface exists and has carrier. It must be brought up before we can check the # carrier status. self._netif = _NetIface.NetIface(self._ifname, proc=self._proc) self._netif.up() self._netif.wait_for_carrier(10) # Make sure the network interface has an IP address. ipaddr = self._netif.get_ipv4_addr(default=None) if ipaddr: _LOG.debug("network interface '%s'%s has IP address '%s'", self._ifname, self._proc.hostmsg, ipaddr) else: ipaddr = self._netif.get_unique_ipv4_addr() ipaddr += "/16" self._netif.set_ipv4_addr(ipaddr) # Ensure the IP was set. self._netif.get_ipv4_addr() _LOG.info("Assigned IP address '%s' to interface '%s'%s", ipaddr, self._ifname, self._proc.hostmsg) self._drv.load(unload=True, opts=f"ifname={self._ifname}") # We use the ETF qdisc for scheduling delayed network packets. Configure it and start the # 'phc2sys' process in background in order to keep the host and NIC clocks in sync. # Get the TAI offset first. stdout, _ = self._proc.run_verify( f"{self._ndlrunner_bin} --tai-offset") tai_offset = self._get_line(prefix="TAI offset", line=stdout) if not Trivial.is_int(tai_offset): raise Error( f"unexpected 'ndlrunner --tai-offset' output:\n{stdout}") _LOG.info("Configuring the ETF qdisc%s", self._proc.hostmsg) self._etfqdisc.configure() _LOG.info("Starting NIC-to-system clock synchronization process%s", self._proc.hostmsg) self._etfqdisc.start_phc2sys(tai_offset=int(tai_offset))