def run_test_dommemstat(case): """ Test virsh command dommemstat related cases :param case: test case """ if case == 'disk_caches': # Verify dommemstat show right disk cache for RHEL8 guest # Update memballoon device balloon_dict = { k: v for k, v in params.items() if k.startswith('membal_') } libvirt.update_memballoon_xml(vmxml, balloon_dict) logging.debug(virsh.dumpxml(vm_name).stdout_text) vm.start() session = vm.wait_for_login() # Get info from virsh dommemstat command dommemstat_output = virsh.dommemstat( vm_name, debug=True).stdout_text.strip() dommemstat = {} for line in dommemstat_output.splitlines(): k, v = line.strip().split(' ') dommemstat[k] = v # Get info from vm meminfo_keys = ['Buffers', 'Cached', 'SwapCached'] meminfo = { k: utils_misc.get_mem_info(session, k) for k in meminfo_keys } # from kernel commit: Buffers + Cached + SwapCached = disk_caches tmp_sum = meminfo['Buffers'] + meminfo['Cached'] + meminfo[ 'SwapCached'] logging.info('Buffers %d + Cached %d + SwapCached %d = %d kb', meminfo['Buffers'], meminfo['Cached'], meminfo['SwapCached'], tmp_sum) # Compare and make sure error is within allowable range logging.info('disk_caches is %s', dommemstat['disk_caches']) allow_error = int(params.get('allow_error', 15)) actual_error = (tmp_sum - int(dommemstat['disk_caches'])) / tmp_sum * 100 logging.debug('Actual error: %.2f%%', actual_error) if actual_error > allow_error: test.fail('Buffers + Cached + SwapCached (%d) ' 'should be close to disk_caches (%s). ' 'Allowable error: %.2f%%' % (tmp_sum, dommemstat['disk_caches'], allow_error))
def set_vm_for_dump(test, params): """ Update vm mem and image size params to match dump generate and analysed. :param test: kvm test object :param params: Params object """ host_free_mem = utils_misc.get_mem_info(attr='MemFree') host_avail_disk = int(process.getoutput(params["get_avail_disk"])) sys_image_size = int( float(utils_misc.normalize_data_size(params["image_size"], "G"))) if host_avail_disk < (host_free_mem // 1024**2) * 1.2 + sys_image_size: params["mem"] = (host_avail_disk - sys_image_size) * 0.8 // 2.4 * 1024 image_size_stg = int( float(utils_misc.normalize_data_size(params["mem"] + "M", "G")) * 1.4) params["image_size_stg"] = str(image_size_stg) + "G" params["force_create_image_stg"] = "yes" image_params = params.object_params("stg") env_process.preprocess_image(test, image_params, "stg")
def run(test, params, env): """ Bind guest node0 and node1 to 2 host nodes, do migration test 1. Boot src guest with 2 numa node and all bind to 2 host numa nodes 2. Migration 3. Check the numa memory size in guest, linux guest only 4. Check the numa memory policy in dest host :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def get_nodes_size(size_type='MemTotal', session=None): """ Get the node size of each node in host/guest, descending sort with size :param size_type: the type of the node size :param session: ShellSession object :return: a list of tuple include node id and node size(M) :rtype: list """ numa_info = NumaInfo(session=session) nodes_size = {} numa_nodes = numa_info.online_nodes for node in numa_nodes: node_size = numa_info.online_nodes_meminfo[node][size_type] nodes_size[node] = float(normalize_data_size('%s KB' % node_size)) nodes_size = sorted(nodes_size.items(), key=lambda item: item[1], reverse=True) return nodes_size host_nodes_size = get_nodes_size(size_type='MemFree') mem_devs = params.objects('mem_devs') if len(host_nodes_size) < len(mem_devs): test.cancel("Host do not have enough nodes for testing!") for mem_dev in mem_devs: size_mem = params.object_params(mem_dev).get('size_mem') size_mem = float(normalize_data_size(size_mem)) if host_nodes_size[0][1] >= size_mem: params['host-nodes_mem_%s' % mem_dev] = str(host_nodes_size[0][0]) del host_nodes_size[0] else: test.cancel("host nodes do not have enough memory for testing!") params['start_vm'] = 'yes' env_process.preprocess(test, params, env) vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() # do migration mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") vm.migrate(mig_timeout, mig_protocol, env=env) session = vm.wait_for_login() os_type = params["os_type"] if os_type == 'linux': error_context.context("Check the numa memory size in guest", logging.info) # Use 30 plus the gap of 'MemTotal' in OS and '-m' in cli as threshold mem_total = get_mem_info(session, 'MemTotal') mem_total = float(normalize_data_size('%s KB' % mem_total)) error_context.context("MemTotal in guest os is %s MB" % mem_total, logging.info) threshold = float(params.get_numeric("mem") - mem_total) + 30 error_context.context("The acceptable threshold is: %s" % threshold, logging.info) guest_nodes_size = get_nodes_size(size_type='MemTotal', session=session) guest_nodes_size = dict(guest_nodes_size) for nodenr, node in enumerate(params.objects('guest_numa_nodes')): mdev = params.get("numa_memdev_node%d" % nodenr) if mdev: mdev = mdev.split('-')[1] size = float( normalize_data_size(params.get("size_mem_%s" % mdev))) if abs(size - guest_nodes_size[nodenr]) > threshold: test.fail( "[Guest]Wrong size of numa node %d: %f. Expected:" " %s" % (nodenr, guest_nodes_size[nodenr], size)) error_context.context("Check the numa memory policy in dest host", logging.info) qemu_pid = vm.get_pid() for mem_dev in mem_devs: memdev_params = params.object_params(mem_dev) size_mem = memdev_params.get('size_mem') size_mem = int(float(normalize_data_size(size_mem, 'K'))) smaps = process.getoutput("grep -E -B1 '^Size: *%d' /proc/%d/smaps" % (size_mem, qemu_pid)) mem_start_pattern = r'(\w+)-\w+\s+\w+-\w+\s+\w+\s+\w+:\w+\s\w+\s+\n'\ r'Size:\s+%d' % size_mem match = re.search(mem_start_pattern, smaps) if not match: test.error("Failed to get the mem start address in smaps: %s" % smaps) mem_start = match.groups()[0] numa_maps = process.getoutput("grep %s /proc/%d/numa_maps" % (mem_start, qemu_pid)) node_match = re.search(r'bind:(\d+)', numa_maps) if not node_match: test.fail("Failed to get the bind node in numa_maps: %s" % numa_maps) bind_node = node_match.groups()[0] expected_node = memdev_params.get('host-nodes_mem') if bind_node != expected_node: test.fail("Host node for memdev %s in numa_maps is %s, while the " "expected is:%s" % (mem_dev, bind_node, expected_node))
def run(test, params, env): """ Simple test to check if NUMA options are being parsed properly 1) Boot vm with different numa nodes 2) With qemu monitor, check if size and cpus for every node match with cli 3) In guest os, check if size and cpus for every node match with cli :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def numa_info_guest(): """ The numa info in guest os, linux only return: An array of (ram, cpus) tuples, where ram is the RAM size in MB and cpus is a set of CPU numbers """ numa_info_guest = NumaInfo(session=session) numa_guest = [] nodes_guest = numa_info_guest.online_nodes for node in nodes_guest: node_size = numa_info_guest.online_nodes_meminfo[node]['MemTotal'] node_size = float(normalize_data_size('%s KB' % node_size)) node_cpus = numa_info_guest.online_nodes_cpus[node] node_cpus = set([int(v) for v in node_cpus.split()]) numa_guest.append((node_size, node_cpus)) # It is a known WONTFIX issue for x86 and ARM, node info of node0 and # node1 is opposite in guest os when vm have 2 nodes if (vm_arch in ("x86_64", "i686", "aarch64") and len(numa_guest) == 2): numa_guest.reverse() return numa_guest vm = env.get_vm(params["main_vm"]) os_type = params["os_type"] vm_arch = params["vm_arch_name"] session = vm.wait_for_login() error_context.context("starting numa_opts test...", logging.info) # Get numa info from monitor numa_monitor = vm.monitors[0].info_numa() error_context.context("numa info in monitor: %r" % numa_monitor, logging.info) monitor_expect_nodes = params.get_numeric("monitor_expect_nodes") if len(numa_monitor) != monitor_expect_nodes: test.fail("[Monitor]Wrong number of numa nodes: %d. Expected: %d" % (len(numa_monitor), monitor_expect_nodes)) if os_type == 'linux': # Get numa info in guest os, only for Linux numa_guest = numa_info_guest() error_context.context("numa info in guest: %r" % numa_guest, logging.info) guest_expect_nodes = int(params.get("guest_expect_nodes", monitor_expect_nodes)) if len(numa_guest) != guest_expect_nodes: test.fail("[Guest]Wrong number of numa nodes: %d. Expected: %d" % (len(numa_guest), guest_expect_nodes)) # Use 30 plus the gap of 'MemTotal' in OS and '-m' in cli as threshold MemTotal = get_mem_info(session, 'MemTotal') MemTotal = float(normalize_data_size('%s KB' % MemTotal)) error_context.context("MemTotal in guest os is %s MB" % MemTotal, logging.info) threshold = float(params.get_numeric("mem") - MemTotal) + 30 error_context.context("The acceptable threshold is: %s" % threshold, logging.info) else: numa_guest = numa_monitor session.close() for nodenr, node in enumerate(numa_guest): mdev = params.get("numa_memdev_node%d" % (nodenr)) if mdev: mdev = mdev.split('-')[1] size = float(normalize_data_size(params.get("size_%s" % mdev))) else: size = params.get_numeric("mem") cpus = params.get("numa_cpus_node%d" % (nodenr)) if cpus is not None: cpus = set([int(v) for v in cpus.split(",")]) else: cpus = set([int(v) for v in range(params.get_numeric('smp'))]) if len(numa_monitor) != 0: if size != numa_monitor[nodenr][0]: test.fail("[Monitor]Wrong size of numa node %d: %f. Expected: %f" % (nodenr, numa_monitor[nodenr][0], size)) if cpus != numa_monitor[nodenr][1]: test.fail("[Monitor]Wrong CPU set on numa node %d: %s. Expected: %s" % (nodenr, numa_monitor[nodenr][1], cpus)) if os_type == 'linux': if size - numa_guest[nodenr][0] > threshold: test.fail("[Guest]Wrong size of numa node %d: %f. Expected: %f" % (nodenr, numa_guest[nodenr][0], size)) if cpus != numa_guest[nodenr][1]: test.fail("[Guest]Wrong CPU set on numa node %d: %s. Expected: %s" % (nodenr, numa_guest[nodenr][1], cpus))