def execute(self, iterations=1): dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio') dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio') try: self.run_the_test(iterations) finally: utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio) utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
def execute(self, iterations = 1): dirty_ratio = utils.read_one_line('/proc/sys/vm/dirty_ratio') dirty_background_ratio = utils.read_one_line('/proc/sys/vm/dirty_background_ratio') try: self.run_the_test(iterations) finally: utils.write_one_line('/proc/sys/vm/dirty_ratio', dirty_ratio) utils.write_one_line('/proc/sys/vm/dirty_background_ratio', dirty_background_ratio)
def initialize(self, hugetlbfs_dir=None, pages_requested=20): self.install_required_pkgs() self.hugetlbfs_dir = None # check if basic utilities are present self.job.require_gcc() utils.check_kernel_ver("2.6.16") os_dep.library('libpthread.a') # Check huge page number pages_available = 0 if os.path.exists('/proc/sys/vm/nr_hugepages'): utils.write_one_line('/proc/sys/vm/nr_hugepages', str(pages_requested)) nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages') pages_available = int(nr_hugepages) else: raise error.TestNAError('Kernel does not support hugepages') if pages_available < pages_requested: raise error.TestError('%d pages available, < %d pages requested' % (pages_available, pages_requested)) # Check if hugetlbfs has been mounted if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'): if not hugetlbfs_dir: hugetlbfs_dir = os.path.join(self.tmpdir, 'hugetlbfs') os.makedirs(hugetlbfs_dir) utils.system('mount -t hugetlbfs none %s' % hugetlbfs_dir) self.hugetlbfs_dir = hugetlbfs_dir
def initialize(self, hugetlbfs_dir=None, pages_requested=20): self.hugetlbfs_dir = None # check if basic utilities are present self.job.require_gcc() utils.check_kernel_ver("2.6.16") os_dep.library('libpthread.a') # Check huge page number pages_available = 0 if os.path.exists('/proc/sys/vm/nr_hugepages'): utils.write_one_line('/proc/sys/vm/nr_hugepages', str(pages_requested)) nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages') pages_available = int(nr_hugepages) else: raise error.TestNAError('Kernel does not support hugepages') if pages_available < pages_requested: raise error.TestError('%d pages available, < %d pages requested' % (pages_available, pages_requested)) # Check if hugetlbfs has been mounted if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'): if not hugetlbfs_dir: hugetlbfs_dir = os.path.join(self.tmpdir, 'hugetlbfs') os.makedirs(hugetlbfs_dir) utils.system('mount -t hugetlbfs none %s' % hugetlbfs_dir) self.hugetlbfs_dir = hugetlbfs_dir
def get_mem_nodes(container_name): # all mem nodes now available to a container, both exclusive & shared file_name = mems_path(container_name) if os.path.exists(file_name): return rangelist_to_set(utils.read_one_line(file_name)) else: return set()
def initialize(self, dir = None, pages_requested = 20): self.dir = None self.job.require_gcc() utils.check_kernel_ver("2.6.16") # Check huge page number pages_available = 0 if os.path.exists('/proc/sys/vm/nr_hugepages'): utils.write_one_line('/proc/sys/vm/nr_hugepages', str(pages_requested)) nr_hugepages = utils.read_one_line('/proc/sys/vm/nr_hugepages') pages_available = int(nr_hugepages) else: raise error.TestNAError('Kernel does not support hugepages') if pages_available < pages_requested: raise error.TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested)) # Check if hugetlbfs has been mounted if not utils.file_contains_pattern('/proc/mounts', 'hugetlbfs'): if not dir: dir = os.path.join(self.tmpdir, 'hugetlbfs') os.makedirs(dir) utils.system('mount -t hugetlbfs none %s' % dir) self.dir = dir
def get_boot_numa(): # get boot-time numa=fake=xyz option for current boot # eg numa=fake=nnn, numa=fake=nnnM, or nothing label = 'numa=fake=' for arg in utils.read_one_line('/proc/cmdline').split(): if arg.startswith(label): return arg[len(label):] return ''
def get_provisioning_mode(device, host_id): """ Get disk provisioning mode, value usually is 'writesame_16', depends on params for scsi_debug module. """ device_name = os.path.basename(device) path = "/sys/block/%s/device/scsi_disk" % device_name path += "/%s/provisioning_mode" % host_id return utils.read_one_line(path).strip()
def create_container_via_memcg(name, parent, bytes, cpus): # create container via direct memcg cgroup writes os.mkdir(full_path(name)) nodes = utils.read_one_line(mems_path(parent)) utils.write_one_line(mems_path(name), nodes) # inherit parent's nodes utils.write_one_line(memory_path(name) + '.limit_in_bytes', str(bytes)) utils.write_one_line(cpus_path(name), ','.join(map(str, cpus))) logging.debug('Created container %s directly via memcg,' ' has %d cpus and %s bytes', name, len(cpus), utils.human_format(container_bytes(name)))
def do_nodedev_dumpxml(dev_name, dev_opt="", **dargs): """ Do dumpxml and check the result. (1).execute nodedev-dumpxml command. (2).compare info in xml with info in sysfs. :param dev_name: name of device. :param dev_opt: command extra options :param dargs: extra dict args :raise TestFail: if execute command failed or check result failed. """ result = virsh.nodedev_dumpxml(dev_name, options=dev_opt, **dargs) if result.exit_status: raise error.TestError("Dumpxml node device %s failed.\n" "Detail:%s." % (dev_name, result.stderr)) logging.debug('Executing "virsh nodedev-dumpxml %s" finished.', dev_name) # compare info in xml with info in sysfs. nodedevxml = nodedev_xml.NodedevXML.new_from_dumpxml(dev_name) if not nodedevxml.validates: raise error.TestError("nodedvxml of %s is not validated." % (dev_name)) # Get the dict of key to value in xml. # key2value_dict_xml contain the all keys and values in xml need checking. key2value_dict_xml = nodedevxml.get_key2value_dict() # Get the dict of key to path in sysfs. # key2syspath_dict contain the all keys and the path of file which contain # information for each key. key2syspath_dict = nodedevxml.get_key2syspath_dict() # Get the values contained in files. # key2value_dict_sys contain the all keys and values in sysfs. key2value_dict_sys = {} for key, filepath in key2syspath_dict.items(): value = utils.read_one_line(filepath) key2value_dict_sys[key] = value # Compare the value in xml and in syspath. for key in key2value_dict_xml: value_xml = key2value_dict_xml.get(key) value_sys = key2value_dict_sys.get(key) if not value_xml == value_sys: if (key == 'numa_node' and not libvirt_version.version_compare(1, 2, 5)): logging.warning("key: %s in xml is not supported yet" % key) else: raise error.TestError("key: %s in xml is %s," "but in sysfs is %s." % (key, value_xml, value_sys)) else: continue logging.debug( "Compare info in xml and info in sysfs finished" "for device %s.", dev_name)
def get_allocation_bitmap(): """ get block allocation bitmap """ path = "/sys/bus/pseudo/drivers/scsi_debug/map" try: return utils.read_one_line(path).strip() except IOError: logging.warn("could not get bitmap info, path '%s' is " "not exist", path) return ""
def container_bytes(name): if fake_numa_containers: return nodes_avail_mbytes(get_mem_nodes(name)) << 20 else: while True: file = memory_path(name) + '.limit_in_bytes' limit = int(utils.read_one_line(file)) if limit < NO_LIMIT: return limit if name == SUPER_ROOT: return root_container_bytes name = os.path.dirname(name)
def do_nodedev_dumpxml(dev_name, dev_opt="", **dargs): """ Do dumpxml and check the result. (1).execute nodedev-dumpxml command. (2).compare info in xml with info in sysfs. :param dev_name: name of device. :param dev_opt: command extra options :param dargs: extra dict args :raise TestFail: if execute command failed or check result failed. """ result = virsh.nodedev_dumpxml(dev_name, options=dev_opt, **dargs) if result.exit_status: raise error.TestError("Dumpxml node device %s failed.\n" "Detail:%s." % (dev_name, result.stderr)) logging.debug('Executing "virsh nodedev-dumpxml %s" finished.', dev_name) # compare info in xml with info in sysfs. nodedevxml = nodedev_xml.NodedevXML.new_from_dumpxml(dev_name) if not nodedevxml.validates: raise error.TestError("nodedvxml of %s is not validated." % (dev_name)) # Get the dict of key to value in xml. # key2value_dict_xml contain the all keys and values in xml need checking. key2value_dict_xml = nodedevxml.get_key2value_dict() # Get the dict of key to path in sysfs. # key2syspath_dict contain the all keys and the path of file which contain # information for each key. key2syspath_dict = nodedevxml.get_key2syspath_dict() # Get the values contained in files. # key2value_dict_sys contain the all keys and values in sysfs. key2value_dict_sys = {} for key, filepath in key2syspath_dict.items(): value = utils.read_one_line(filepath) key2value_dict_sys[key] = value # Compare the value in xml and in syspath. for key in key2value_dict_xml: value_xml = key2value_dict_xml.get(key) value_sys = key2value_dict_sys.get(key) if not value_xml == value_sys: if (key == 'numa_node' and not libvirt_version.version_compare(1, 2, 5)): logging.warning("key: %s in xml is not supported yet" % key) else: raise error.TestError("key: %s in xml is %s," "but in sysfs is %s." % (key, value_xml, value_sys)) else: continue logging.debug("Compare info in xml and info in sysfs finished" "for device %s.", dev_name)
def _init_cmdline(self, extra_copy_cmdline): """ Initialize default cmdline for booted kernels in this job. """ copy_cmdline = set(["console"]) if extra_copy_cmdline is not None: copy_cmdline.update(extra_copy_cmdline) # extract console= and other args from cmdline and add them into the # base args that we use for all kernels we install cmdline = utils.read_one_line("/proc/cmdline") kernel_args = [] for karg in cmdline.split(): for param in copy_cmdline: if karg.startswith(param) and (len(param) == len(karg) or karg[len(param)] == "="): kernel_args.append(karg) self.config_set("boot.default_args", " ".join(kernel_args))
def end_reboot_and_verify(self, expected_when, expected_id, subdir, type="src", patches=[]): """ Check the passed kernel identifier against the command line and the running kernel, abort the job on missmatch. """ logging.info( "POST BOOT: checking booted kernel " "mark=%d identity='%s' type='%s'", expected_when, expected_id, type ) running_id = utils.running_os_ident() cmdline = utils.read_one_line("/proc/cmdline") find_sum = re.compile(r".*IDENT=(\d+)") m = find_sum.match(cmdline) cmdline_when = -1 if m: cmdline_when = int(m.groups()[0]) # We have all the facts, see if they indicate we # booted the requested kernel or not. bad = False if ( type == "src" and expected_id != running_id or type == "rpm" and not running_id.startswith(expected_id + "::") ): logging.error("Kernel identifier mismatch") bad = True if expected_when != cmdline_when: logging.error("Kernel command line mismatch") bad = True if bad: logging.error(" Expected Ident: " + expected_id) logging.error(" Running Ident: " + running_id) logging.error(" Expected Mark: %d", expected_when) logging.error("Command Line Mark: %d", cmdline_when) logging.error(" Command Line: " + cmdline) self._record_reboot_failure(subdir, "reboot.verify", "boot failure", running_id=running_id) raise error.JobError("Reboot returned with the wrong kernel") self.record("GOOD", subdir, "reboot.verify", utils.running_os_full_version()) self.end_reboot(subdir, expected_id, patches, running_id=running_id)
def end_reboot_and_verify(self, expected_when, expected_id, subdir, type='src', patches=[]): """ Check the passed kernel identifier against the command line and the running kernel, abort the job on missmatch. """ logging.info("POST BOOT: checking booted kernel " "mark=%d identity='%s' type='%s'", expected_when, expected_id, type) running_id = utils.running_os_ident() cmdline = utils.read_one_line("/proc/cmdline") find_sum = re.compile(r'.*IDENT=(\d+)') m = find_sum.match(cmdline) cmdline_when = -1 if m: cmdline_when = int(m.groups()[0]) # We have all the facts, see if they indicate we # booted the requested kernel or not. bad = False if (type == 'src' and expected_id != running_id or type == 'rpm' and not running_id.startswith(expected_id + '::')): logging.error("Kernel identifier mismatch") bad = True if expected_when != cmdline_when: logging.error("Kernel command line mismatch") bad = True if bad: logging.error(" Expected Ident: " + expected_id) logging.error(" Running Ident: " + running_id) logging.error(" Expected Mark: %d", expected_when) logging.error("Command Line Mark: %d", cmdline_when) logging.error(" Command Line: " + cmdline) self._record_reboot_failure(subdir, "reboot.verify", "boot failure", running_id=running_id) raise error.JobError("Reboot returned with the wrong kernel") self.record('GOOD', subdir, 'reboot.verify', utils.running_os_full_version()) self.end_reboot(subdir, expected_id, patches, running_id=running_id)
def _init_cmdline(self, extra_copy_cmdline): """ Initialize default cmdline for booted kernels in this job. """ copy_cmdline = set(['console']) if extra_copy_cmdline is not None: copy_cmdline.update(extra_copy_cmdline) # extract console= and other args from cmdline and add them into the # base args that we use for all kernels we install cmdline = utils.read_one_line('/proc/cmdline') kernel_args = [] for karg in cmdline.split(): for param in copy_cmdline: if karg.startswith(param) and \ (len(param) == len(karg) or karg[len(param)] == '='): kernel_args.append(karg) self.config_set('boot.default_args', ' '.join(kernel_args))
def my_container_name(): # Get current process's inherited or self-built container name # within /dev/cpuset or /dev/cgroup. Is '' for root container. name = utils.read_one_line('/proc/%i/cpuset' % os.getpid()) return name[1:] # strip leading /
def get_cpus(container_name): file_name = cpus_path(container_name) if os.path.exists(file_name): return rangelist_to_set(utils.read_one_line(file_name)) else: return set()
def readline(self, logdir): path = os.path.join(logdir, self.logf) if os.path.exists(path): return utils.read_one_line(path) else: return ""