def cpu_affinity_check(test, vm_name, cpuset=None, node=None): """ Check vcpuinfo cpu affinity :param test: test object :param vm_name: the vm name :param cpuset: cpuset list :param node: node number list :raises: test.fail if cpu affinity is not expected """ result = virsh.vcpuinfo(vm_name, debug=True) output = result.stdout.strip().splitlines()[-1] cpu_affinity = output.split(":")[-1].strip() if node: tmp_list = [] for node_num in node: host_node = utils_misc.NumaNode(i=node_num + 1) logging.debug("node %s cpu list is %s" % (node_num, host_node.cpus)) tmp_list += host_node.cpus cpu_list = [int(i) for i in tmp_list] if cpuset: cpu_list = cpuset ret = format_affinity_str(cpu_list) logging.debug("expect cpu affinity is %s", ret) if cpu_affinity != ret: test.fail("vcpuinfo cpu affinity not expected")
def _pin_vm_threads(vm, node): if node: if not isinstance(node, utils_misc.NumaNode): node = utils_misc.NumaNode(int(node)) utils_test.qemu.pin_vm_threads(vm, node) return node
def _pin_vm_threads(vm, node): """ Pin guest to certain numa node. param vm: a vm object param node: a numa node to pin to """ node = utils_misc.NumaNode(node) utils_test.qemu.pin_vm_threads(vm, node)
def _pin_vm_threads(node): """ pin guest vcpu and vhost threads to cpus of a numa node repectively :param node: which numa node to pin """ if node: if not isinstance(node, utils_misc.NumaNode): node = utils_misc.NumaNode(int(node)) utils_test.qemu.pin_vm_threads(vm, node)
def pin_vm_threads(vm, node): """ pin vm threads to assigned node """ if node: if not isinstance(node, utils_misc.NumaNode): node = utils_misc.NumaNode(int(node)) utils_test.qemu.pin_vm_threads(vm, node) return node
def setUp(self): self.god = mock.mock_god(ut=self) self.god.stub_with(process, 'run', utils_run) all_nodes = tempfile.NamedTemporaryFile(delete=False) all_nodes.write(all_nodes_contents) all_nodes.close() online_nodes = tempfile.NamedTemporaryFile(delete=False) online_nodes.write(online_nodes_contents) online_nodes.close() self.all_nodes_path = all_nodes.name self.online_nodes_path = online_nodes.name self.numa_node = utils_misc.NumaNode(-1, self.all_nodes_path, self.online_nodes_path)
def run(test, params, env): """ Test numa tuning with memory """ numad_log = [] memory_status = [] def _logger(line): """ Callback function to log libvirtd output. """ numad_log.append(line) def mem_compare(used_node, left_node): """ Memory in used nodes should greater than left nodes :param used_node: used node list :param left_node: left node list """ used_mem_total = 0 left_node_mem_total = 0 for i in used_node: used_mem_total += int(memory_status[i]) for i in left_node: left_node_mem_total += int(memory_status[i]) if left_node_mem_total > used_mem_total: test.fail("nodes memory usage not expected.") def format_affinity_str(cpu_list): """ Format affinity str :param cpu_list: list of cpu number :return: cpu affinity string """ cmd = "lscpu | grep '^CPU(s):'" ret = process.run(cmd, shell=True) cpu_num = int(ret.stdout_text.split(':')[1].strip()) cpu_affinity_str = "" for i in range(cpu_num): if i in cpu_list: cpu_affinity_str += "y" else: cpu_affinity_str += "-" return cpu_affinity_str def cpu_affinity_check(cpuset=None, node=None): """ Check vcpuinfo cpu affinity :param cpuset: cpuset list :param node: node number list """ result = virsh.vcpuinfo(vm_name, debug=True) output = result.stdout.strip().splitlines()[-1] cpu_affinity = output.split(":")[-1].strip() if node: tmp_list = [] for node_num in node: host_node = utils_misc.NumaNode(i=node_num + 1) logging.debug("node %s cpu list is %s" % (node_num, host_node.cpus)) tmp_list += host_node.cpus cpu_list = [int(i) for i in tmp_list] if cpuset: cpu_list = cpuset ret = format_affinity_str(cpu_list) logging.debug("expect cpu affinity is %s", ret) if cpu_affinity != ret: test.fail("vcpuinfo cpu affinity not expected") def numa_mode_check(mode_nodeset): """ when the mode = 'preferred' or 'interleave', it is better to check numa_maps. """ vm_pid = vm.get_pid() numa_map = '/proc/%s/numa_maps' % vm_pid # Open a file with open(numa_map) as file: for line in file.readlines(): if line.split()[1] != mode_nodeset: test.fail("numa node and nodeset %s is " "not expected" % mode_nodeset) vcpu_placement = params.get("vcpu_placement") vcpu_cpuset = params.get("vcpu_cpuset") bug_url = params.get("bug_url", "") status_error = "yes" == params.get("status_error", "no") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) # Get host numa node list host_numa_node = utils_misc.NumaInfo() node_list = host_numa_node.online_nodes_withmem logging.debug("host node list is %s", " ".join(map(str, node_list))) # Prepare numatune memory parameter dict mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset') numa_memory = {} for mem_param in mem_tuple: value = params.get(mem_param) if value: numa_memory[mem_param.split('_')[1]] = value arch = platform.machine() if 'ppc64' in arch: try: ppc_memory_nodeset = "" nodes = numa_memory["nodeset"] if '-' in nodes: for nnode in range(int(nodes.split('-')[0]), int(nodes.split('-')[1]) + 1): ppc_memory_nodeset += str(node_list[nnode]) + ',' else: node_lst = nodes.split(',') for nnode in range(len(node_lst)): ppc_memory_nodeset += str(node_list[int( node_lst[nnode])]) + ',' numa_memory["nodeset"] = ppc_memory_nodeset[:-1] except (KeyError, IndexError): pass # Prepare libvirtd session with log level as 1 config_path = os.path.join(data_dir.get_tmp_dir(), "virt-test.conf") open(config_path, 'a').close() config = utils_config.LibvirtdConfig(config_path) config.log_level = 1 arg_str = "--config %s" % config_path numad_reg = ".*numad" libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger, logging_pattern=numad_reg) try: libvirtd.start(arg_str=arg_str) # As libvirtd start as session use root, need stop virtlogd service # and start it as daemon to fix selinux denial try: path.find_command('virtlogd') process.run("service virtlogd stop", ignore_status=True) process.run("virtlogd -d") except path.CmdNotFoundError: pass # Allow for more times to libvirtd restarted successfully. ret = utils_misc.wait_for(lambda: libvirtd.is_working(), timeout=240, step=1) if not ret: test.fail("Libvirtd hang after restarted") # Get host cpu list tmp_list = [] for node_num in node_list: host_node = utils_misc.NumaNode(i=node_num + 1) logging.debug("node %s cpu list is %s" % (node_num, host_node.cpus)) tmp_list += host_node.cpus cpu_list = [int(i) for i in tmp_list] dynamic_parameters = params.get('can_be_dynamic', 'no') == 'yes' if numa_memory.get('nodeset'): used_node = cpu.cpus_parser(numa_memory['nodeset']) logging.debug("set node list is %s", used_node) if not status_error: if not set(used_node).issubset(node_list): if not dynamic_parameters: test.cancel("nodeset %s out of range" % numa_memory['nodeset']) else: if '-' in numa_memory['nodeset']: nodes_size = len(numa_memory['nodeset'].split('-')) else: nodes_size = len(numa_memory['nodeset'].split(',')) if nodes_size > len(node_list): test.cancel("nodeset %s out of range" % numa_memory['nodeset']) else: numa_memory['nodeset'] = node_list[:nodes_size] if vcpu_cpuset: pre_cpuset = cpu.cpus_parser(vcpu_cpuset) logging.debug("Parsed cpuset list is %s", pre_cpuset) if not set(pre_cpuset).issubset(cpu_list): if not dynamic_parameters: test.cancel("cpuset %s out of range" % vcpu_cpuset) else: random_cpus = [] # Choose the random cpus from the list of available CPUs on the system and make sure no cpu is # added twice or the list of selected CPUs is not long enough for i in range( len([int(i) for i in vcpu_cpuset.split(',')])): rand_cpu = random.randint(min(cpu_list), max(cpu_list)) while rand_cpu in random_cpus: rand_cpu = random.randint(min(cpu_list), max(cpu_list)) random_cpus.append(rand_cpu) random_cpus.sort() vcpu_cpuset = (','.join( [str(cpu_num) for cpu_num in random_cpus])) pre_cpuset = cpu.cpus_parser(vcpu_cpuset) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) vmxml.numa_memory = numa_memory vcpu_num = vmxml.vcpu max_mem = vmxml.max_mem if vmxml.xmltreefile.find('cputune'): vmxml.xmltreefile.remove_by_xpath('/cputune') else: logging.debug('No vcpupin found') if vcpu_placement: vmxml.placement = vcpu_placement if vcpu_cpuset: vmxml.cpuset = vcpu_cpuset logging.debug("vm xml is %s", vmxml) vmxml.sync() numad_cmd_opt = "-w %s:%s" % (vcpu_num, max_mem // 1024) try: vm.start() vm.wait_for_login() vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name) numa_memory_new = vmxml_new.numa_memory logging.debug("Current memory config dict is %s" % numa_memory_new) # Check xml config if numa_memory.get('placement') == 'static': pre_numa_memory = numa_memory.copy() del pre_numa_memory['placement'] else: pre_numa_memory = numa_memory if pre_numa_memory != numa_memory_new: test.fail("memory config %s not expected " "after domain start" % numa_memory_new) pos_vcpu_placement = vmxml_new.placement logging.debug("vcpu placement after domain start is %s", pos_vcpu_placement) try: pos_cpuset = vmxml_new.cpuset logging.debug("vcpu cpuset after vm start is %s", pos_cpuset) except libvirt_xml.xcepts.LibvirtXMLNotFoundError: if vcpu_cpuset and vcpu_placement != 'auto': test.fail("cpuset not found in domain xml.") except virt_vm.VMStartError as e: # Starting VM failed. if status_error: return else: test.fail("Test failed in positive case.\n " "error: %s\n%s" % (e, bug_url)) # Check qemu process numa memory usage memory_status, qemu_cpu = utils_test.qemu.get_numa_status( host_numa_node, vm.get_pid()) logging.debug("The memory status is %s", memory_status) logging.debug("The cpu usage is %s", qemu_cpu) if vcpu_cpuset: total_cpu = [] for node_cpu in qemu_cpu: total_cpu += node_cpu for i in total_cpu: if int(i) not in pre_cpuset: test.fail("cpu %s is not expected" % i) cpu_affinity_check(cpuset=pre_cpuset) if numa_memory.get('nodeset'): # If there are inconsistent node numbers on host, # convert it into sequence number so that it can be used # in mem_compare if numa_memory.get('mode') == 'strict': left_node = [ node_list.index(i) for i in node_list if i not in used_node ] used_node = [node_list.index(i) for i in used_node] mem_compare(used_node, left_node) elif numa_memory.get('mode') == 'preferred': mode_nodeset = 'prefer:' + numa_memory.get('nodeset') numa_mode_check(mode_nodeset) else: mode_nodeset = numa_memory.get('mode') + ':' + numa_memory.get( 'nodeset') numa_mode_check(mode_nodeset) logging.debug("numad log list is %s", numad_log) if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto': if not numad_log: test.fail("numad usage not found in libvirtd log") if numad_log[0].split("numad ")[-1] != numad_cmd_opt: logging.warning('numa log:\n%s\n' % numad_log[0].split("numad ")[-1]) logging.warning('numa cmd opt:\n%s\n' % numad_cmd_opt) test.fail("numad command not expected in log") numad_ret = numad_log[1].split("numad: ")[-1] numad_node = cpu.cpus_parser(numad_ret) left_node = [ node_list.index(i) for i in node_list if i not in numad_node ] numad_node_seq = [node_list.index(i) for i in numad_node] logging.debug("numad nodes are %s", numad_node) if numa_memory.get('placement') == 'auto': if numa_memory.get('mode') == 'strict': mem_compare(numad_node_seq, left_node) elif numa_memory.get('mode') == 'preferred': mode_nodeset = 'prefer:' + numad_ret numa_mode_check(mode_nodeset) else: mode_nodeset = numa_memory.get('mode') + ':' + numad_ret numa_mode_check(mode_nodeset) if vcpu_placement == 'auto': for i in left_node: if qemu_cpu[i]: test.fail("cpu usage in node %s is not expected" % i) cpu_affinity_check(node=numad_node) finally: try: path.find_command('virtlogd') process.run('pkill virtlogd', ignore_status=True) process.run('systemctl restart virtlogd.socket', ignore_status=True) except path.CmdNotFoundError: pass libvirtd.exit() if config_path: config.restore() if os.path.exists(config_path): os.remove(config_path) if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync()
def run(test, params, env): """ Test numa tuning with memory """ numad_log = [] memory_status = [] def _logger(line): """ Callback function to log libvirtd output. """ numad_log.append(line) def mem_compare(used_node, left_node): """ Memory in used nodes should greater than left nodes :param used_node: used node list :param left_node: left node list """ used_mem_total = 0 left_node_mem_total = 0 for i in used_node: used_mem_total += int(memory_status[i]) for i in left_node: left_node_mem_total += int(memory_status[i]) if left_node_mem_total > used_mem_total: raise error.TestFail("nodes memory usage not expected.") def format_affinity_str(cpu_list): """ Format affinity str :param cpu_list: list of cpu number :return: cpu affinity string """ cmd = "lscpu | grep '^CPU(s):'" cpu_num = int(utils.run(cmd).stdout.strip().split(':')[1].strip()) cpu_affinity_str = "" for i in range(cpu_num): if i in cpu_list: cpu_affinity_str += "y" else: cpu_affinity_str += "-" return cpu_affinity_str def cpu_affinity_check(cpuset=None, node=None): """ Check vcpuinfo cpu affinity :param cpuset: cpuset list :param node: node number list """ result = virsh.vcpuinfo(vm_name, debug=True) output = result.stdout.strip().splitlines()[-1] cpu_affinity = output.split(":")[-1].strip() if node: tmp_list = [] for node_num in node: host_node = utils_misc.NumaNode(i=node_num+1) logging.debug("node %s cpu list is %s" % (node_num, host_node.cpus)) tmp_list += host_node.cpus cpu_list = [int(i) for i in tmp_list] if cpuset: cpu_list = cpuset ret = format_affinity_str(cpu_list) logging.debug("expect cpu affinity is %s", ret) if cpu_affinity != ret: raise error.TestFail("vcpuinfo cpu affinity not expected") vcpu_placement = params.get("vcpu_placement") vcpu_cpuset = params.get("vcpu_cpuset") bug_url = params.get("bug_url", "") status_error = "yes" == params.get("status_error", "no") vm_name = params.get("vms") vm = env.get_vm(vm_name) backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) # Prepare numatune memory parameter dict mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset') numa_memory = {} for mem_param in mem_tuple: value = params.get(mem_param) if value: numa_memory[mem_param.split('_')[1]] = value # Prepare libvirtd session with log level as 1 config_path = "/var/tmp/virt-test.conf" open(config_path, 'a').close() config = utils_config.LibvirtdConfig(config_path) config.log_level = 1 arg_str = "--config %s" % config_path numad_reg = ".*numad" libvirtd = utils_libvirtd.LibvirtdSession(logging_handler=_logger, logging_pattern=numad_reg) try: libvirtd.start(arg_str=arg_str) # Get host numa node list host_numa_node = utils_misc.NumaInfo() node_list = host_numa_node.online_nodes logging.debug("host node list is %s", node_list) # Get host cpu list tmp_list = [] for node_num in node_list: host_node = utils_misc.NumaNode(i=node_num+1) logging.debug("node %s cpu list is %s" % (node_num, host_node.cpus)) tmp_list += host_node.cpus cpu_list = [int(i) for i in tmp_list] if numa_memory.get('nodeset'): used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset']) logging.debug("set node list is %s", used_node) if not status_error: if not set(used_node).issubset(node_list): raise error.TestNAError("nodeset %s out of range" % numa_memory['nodeset']) if vcpu_cpuset: pre_cpuset = utils_test.libvirt.cpus_parser(vcpu_cpuset) logging.debug("Parsed cpuset list is %s", pre_cpuset) if not set(pre_cpuset).issubset(cpu_list): raise error.TestNAError("cpuset %s out of range" % vcpu_cpuset) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) vmxml.numa_memory = numa_memory vcpu_num = vmxml.vcpu max_mem = vmxml.max_mem if vcpu_placement: vmxml.placement = vcpu_placement if vcpu_cpuset: vmxml.cpuset = vcpu_cpuset logging.debug("vm xml is %s", vmxml) vmxml.sync() numad_cmd_opt = "-w %s:%s" % (vcpu_num, max_mem/1024) try: vm.start() vm.wait_for_login() vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name) numa_memory_new = vmxml_new.numa_memory logging.debug("Current memory config dict is %s" % numa_memory_new) # Check xml config if numa_memory.get('placement') == 'static': pre_numa_memory = numa_memory.copy() del pre_numa_memory['placement'] else: pre_numa_memory = numa_memory if pre_numa_memory != numa_memory_new: raise error.TestFail("memory config %s not expected after " "domain start" % numa_memory_new) pos_vcpu_placement = vmxml_new.placement logging.debug("vcpu placement after domain start is %s", pos_vcpu_placement) try: pos_cpuset = vmxml_new.cpuset logging.debug("vcpu cpuset after vm start is %s", pos_cpuset) except libvirt_xml.xcepts.LibvirtXMLNotFoundError: if vcpu_cpuset and vcpu_placement != 'auto': raise error.TestFail("cpuset not found in domain xml.") except virt_vm.VMStartError, e: # Starting VM failed. if status_error: return else: raise error.TestFail("Test failed in positive case.\n error:" " %s\n%s" % (e, bug_url)) # Check qemu process numa memory usage memory_status, qemu_cpu = utils_test.qemu.get_numa_status( host_numa_node, vm.get_pid()) logging.debug("The memory status is %s", memory_status) logging.debug("The cpu usage is %s", qemu_cpu) if vcpu_cpuset: total_cpu = [] for node_cpu in qemu_cpu: total_cpu += node_cpu for i in total_cpu: if int(i) not in pre_cpuset: raise error.TestFail("cpu %s is not expected" % i) cpu_affinity_check(cpuset=pre_cpuset) if numa_memory.get('nodeset'): # If there are inconsistent node numbers on host, # convert it into sequence number so that it can be used # in mem_compare left_node = [node_list.index(i) for i in node_list if i not in used_node] used_node = [node_list.index(i) for i in used_node] mem_compare(used_node, left_node) logging.debug("numad log list is %s", numad_log) if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto': if not numad_log: raise error.TestFail("numad usage not found in libvirtd log") if numad_log[0].split("numad ")[-1] != numad_cmd_opt: raise error.TestFail("numad command not expected in log") numad_ret = numad_log[1].split("numad: ")[-1] numad_node = utils_test.libvirt.cpus_parser(numad_ret) left_node = [node_list.index(i) for i in node_list if i not in numad_node] numad_node_seq = [node_list.index(i) for i in numad_node] logging.debug("numad nodes are %s", numad_node) if numa_memory.get('placement') == 'auto': mem_compare(numad_node_seq, left_node) if vcpu_placement == 'auto': for i in left_node: if qemu_cpu[i]: raise error.TestFail("cpu usage in node %s is not " "expected" % i) cpu_affinity_check(node=numad_node)
def run_ntttcp(test, params, env): """ Run NTttcp on Windows guest 1) Install NTttcp in server/client side by Autoit 2) Start NTttcp in server/client side 3) Get test results :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ login_timeout = int(params.get("login_timeout", 360)) timeout = int(params.get("timeout")) results_path = os.path.join(test.resultsdir, 'raw_output_%s' % test.iteration) platform = "x86" if "64" in params["vm_arch_name"]: platform = "x64" buffers = params.get("buffers").split() buf_num = params.get("buf_num", 200000) session_num = params.get("session_num") vm_sender = env.get_vm(params["main_vm"]) vm_sender.verify_alive() vm_receiver = None receiver_addr = params.get("receiver_address") logging.debug(utils.system("numactl --hardware", ignore_status=True)) logging.debug(utils.system("numactl --show", ignore_status=True)) # pin guest vcpus/memory/vhost threads to last numa node of host by default if params.get('numa_node'): numa_node = int(params.get('numa_node')) node = utils_misc.NumaNode(numa_node) utils_test.qemu.pin_vm_threads(vm_sender, node) if not receiver_addr: vm_receiver = env.get_vm("vm2") vm_receiver.verify_alive() try: sess = None sess = vm_receiver.wait_for_login(timeout=login_timeout) receiver_addr = vm_receiver.get_address() if not receiver_addr: raise error.TestError("Can't get receiver(%s) ip address" % vm_sender.name) if params.get('numa_node'): utils_test.qemu.pin_vm_threads(vm_receiver, node) finally: if sess: sess.close() @error.context_aware def install_ntttcp(session): """ Install ntttcp through a remote session """ logging.info("Installing NTttcp ...") try: # Don't install ntttcp if it's already installed error.context("NTttcp directory already exists") session.cmd(params.get("check_ntttcp_cmd")) except aexpect.ShellCmdError: ntttcp_install_cmd = params.get("ntttcp_install_cmd") error.context("Installing NTttcp on guest") session.cmd(ntttcp_install_cmd % (platform, platform), timeout=200) def receiver(): """ Receive side """ logging.info("Starting receiver process on %s", receiver_addr) if vm_receiver: session = vm_receiver.wait_for_login(timeout=login_timeout) else: username = params.get("username", "") password = params.get("password", "") prompt = params.get("shell_prompt", "[\#\$]") linesep = eval("'%s'" % params.get("shell_linesep", r"\n")) client = params.get("shell_client") port = int(params.get("shell_port")) log_filename = ( "session-%s-%s.log" % (receiver_addr, utils_misc.generate_random_string(4))) session = remote.remote_login(client, receiver_addr, port, username, password, prompt, linesep, log_filename, timeout) session.set_status_test_command("echo %errorlevel%") install_ntttcp(session) ntttcp_receiver_cmd = params.get("ntttcp_receiver_cmd") global _receiver_ready f = open(results_path + ".receiver", 'a') for b in buffers: utils_misc.wait_for(lambda: not _wait(), timeout) _receiver_ready = True rbuf = params.get("fixed_rbuf", b) cmd = ntttcp_receiver_cmd % (session_num, receiver_addr, rbuf, buf_num) r = session.cmd_output(cmd, timeout=timeout, print_func=logging.debug) f.write("Send buffer size: %s\n%s\n%s" % (b, cmd, r)) f.close() session.close() def _wait(): """ Check if receiver is ready """ global _receiver_ready if _receiver_ready: return _receiver_ready return False def sender(): """ Send side """ logging.info("Sarting sender process ...") session = vm_sender.wait_for_login(timeout=login_timeout) install_ntttcp(session) ntttcp_sender_cmd = params.get("ntttcp_sender_cmd") f = open(results_path + ".sender", 'a') try: global _receiver_ready for b in buffers: cmd = ntttcp_sender_cmd % (session_num, receiver_addr, b, buf_num) # Wait until receiver ready utils_misc.wait_for(_wait, timeout) r = session.cmd_output(cmd, timeout=timeout, print_func=logging.debug) _receiver_ready = False f.write("Send buffer size: %s\n%s\n%s" % (b, cmd, r)) finally: f.close() session.close() def parse_file(resultfile): """ Parse raw result files and generate files with standard format """ fileobj = open(resultfile, "r") lst = [] found = False for line in fileobj.readlines(): o = re.findall("Send buffer size: (\d+)", line) if o: bfr = o[0] if "Total Throughput(Mbit/s)" in line: found = True if found: fields = line.split() if len(fields) == 0: continue try: [float(i) for i in fields] lst.append([bfr, fields[-1]]) except ValueError: continue found = False return lst try: bg = utils.InterruptedThread(receiver, ()) bg.start() if bg.isAlive(): sender() bg.join(suppress_exception=True) else: raise error.TestError("Can't start backgroud receiver thread") finally: for i in glob.glob("%s.receiver" % results_path): f = open("%s.RHS" % results_path, "w") raw = " buf(k)| throughput(Mbit/s)" logging.info(raw) f.write("#ver# %s\n#ver# host kernel: %s\n" % (commands.getoutput("rpm -q qemu-kvm"), os.uname()[2])) desc = """#desc# The tests are sessions of "NTttcp", send buf number is %s. 'throughput' was taken from ntttcp's report. #desc# How to read the results: #desc# - The Throughput is measured in Mbit/sec. #desc# """ % (buf_num) f.write(desc) f.write(raw + "\n") for j in parse_file(i): raw = "%8s| %8s" % (j[0], j[1]) logging.info(raw) f.write(raw + "\n") f.close()
def run_netperf(test, params, env): """ Network stress test with netperf. 1) Boot up VM(s), setup SSH authorization between host and guest(s)/external host 2) Prepare the test environment in server/client/host 3) Execute netperf tests, collect and analyze the results @param test: KVM test object. @param params: Dictionary with the test parameters. @param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) if params.get("rh_perf_envsetup_script"): utils_test.service_setup(vm, session, test.virtdir) server = vm.get_address() server_ctl = vm.get_address(1) session.close() logging.debug(commands.getoutput("numactl --hardware")) logging.debug(commands.getoutput("numactl --show")) # pin guest vcpus/memory/vhost threads to last numa node of host by default if params.get('numa_node'): numa_node = int(params.get('numa_node')) node = utils_misc.NumaNode(numa_node) utils_test.pin_vm_threads(vm, node) if "vm2" in params["vms"]: vm2 = env.get_vm("vm2") vm2.verify_alive() session2 = vm2.wait_for_login(timeout=login_timeout) if params.get("rh_perf_envsetup_script"): utils_test.service_setup(vm2, session2, test.virtdir) client = vm2.get_address() session2.close() if params.get('numa_node'): utils_test.pin_vm_threads(vm2, node) if params.get("client"): client = params["client"] if params.get("host"): host = params["host"] else: cmd = "ifconfig %s|awk 'NR==2 {print $2}'|awk -F: '{print $2}'" host = commands.getoutput(cmd % params["netdst"]) shell_port = int(params["shell_port"]) password = params["password"] username = params["username"] def env_setup(ip): logging.debug("Setup env for %s" % ip) ssh_key.setup_ssh_key(hostname=ip, user=username, port=shell_port, password=password) ssh_cmd(ip, "service iptables stop") ssh_cmd(ip, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore") netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2") for i in params.get("netperf_files").split(): remote.scp_to_remote(ip, shell_port, username, password, "%s/%s" % (netperf_dir, i), "/tmp/") ssh_cmd(ip, params.get("setup_cmd")) logging.info("Prepare env of server/client/host") env_setup(server_ctl) env_setup(client) env_setup(host) logging.info("Start netperf testing ...") start_test(server, server_ctl, host, client, test.resultsdir, l=int(params.get('l')), sessions_rr=params.get('sessions_rr'), sessions=params.get('sessions'), sizes_rr=params.get('sizes_rr'), sizes=params.get('sizes'), protocols=params.get('protocols'), ver_cmd=params.get('ver_cmd', "rpm -q qemu-kvm"), netserver_port=params.get('netserver_port', "12865"), test=test)
def run(test, params, env): """ Test numa tuning with memory """ memory_status = [] vcpu_placement = params.get("vcpu_placement") vcpu_cpuset = params.get("vcpu_cpuset") bug_url = params.get("bug_url", "") status_error = "yes" == params.get("status_error", "no") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) # Get host numa node list host_numa_node = utils_misc.NumaInfo() node_list = host_numa_node.online_nodes_withmem logging.debug("host node list is %s", " ".join(map(str, node_list))) # Prepare numatune memory parameter dict mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset') numa_memory = {} for mem_param in mem_tuple: value = params.get(mem_param) if value: numa_memory[mem_param.split('_')[1]] = value arch = platform.machine() if 'ppc64' in arch: try: ppc_memory_nodeset = "" nodes = numa_memory["nodeset"] if '-' in nodes: for nnode in range(int(nodes.split('-')[0]), int(nodes.split('-')[1]) + 1): ppc_memory_nodeset += str(node_list[nnode]) + ',' else: node_lst = nodes.split(',') for nnode in range(len(node_lst)): ppc_memory_nodeset += str(node_list[int(node_lst[nnode])]) + ',' numa_memory["nodeset"] = ppc_memory_nodeset[:-1] except (KeyError, IndexError): pass try: # Get host cpu list tmp_list = [] for node_num in node_list: host_node = utils_misc.NumaNode(i=node_num + 1) logging.debug("node %s cpu list is %s" % (node_num, host_node.cpus)) tmp_list += host_node.cpus cpu_list = [int(i) for i in tmp_list] dynamic_parameters = params.get('can_be_dynamic', 'no') == 'yes' if numa_memory.get('nodeset'): used_node = cpu.cpus_parser(numa_memory['nodeset']) logging.debug("set node list is %s", used_node) if not status_error: if not set(used_node).issubset(node_list): if not dynamic_parameters: test.cancel("nodeset %s out of range" % numa_memory['nodeset']) else: if '-' in numa_memory['nodeset']: nodes_size = len(numa_memory['nodeset'].split('-')) else: nodes_size = len(numa_memory['nodeset'].split(',')) if nodes_size > len(node_list): test.cancel("nodeset %s out of range" % numa_memory['nodeset']) else: numa_memory['nodeset'] = ','.join(map(str, node_list[:nodes_size])) if vcpu_cpuset: pre_cpuset = cpu.cpus_parser(vcpu_cpuset) logging.debug("Parsed cpuset list is %s", pre_cpuset) if not set(pre_cpuset).issubset(cpu_list): if not dynamic_parameters: test.cancel("cpuset %s out of range" % vcpu_cpuset) else: random_cpus = [] # Choose the random cpus from the list of available CPUs on the system and make sure no cpu is # added twice or the list of selected CPUs is not long enough for i in range(len([int(i) for i in vcpu_cpuset.split(',')])): rand_cpu = random.randint(min(cpu_list), max(cpu_list)) while rand_cpu in random_cpus: rand_cpu = random.randint(min(cpu_list), max(cpu_list)) random_cpus.append(rand_cpu) random_cpus.sort() vcpu_cpuset = (','.join([str(cpu_num) for cpu_num in random_cpus])) pre_cpuset = cpu.cpus_parser(vcpu_cpuset) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) vmxml.numa_memory = numa_memory if vmxml.xmltreefile.find('cputune'): vmxml.xmltreefile.remove_by_xpath('/cputune') else: logging.debug('No vcpupin found') if vcpu_placement: vmxml.placement = vcpu_placement if vcpu_cpuset: vmxml.cpuset = vcpu_cpuset logging.debug("vm xml is %s", vmxml) vmxml.sync() try: vm.start() vm.wait_for_login().close() vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name) numa_memory_new = vmxml_new.numa_memory logging.debug("Current memory config dict is %s" % numa_memory_new) # Check xml config if numa_memory.get('placement') == 'static': pre_numa_memory = numa_memory.copy() del pre_numa_memory['placement'] else: pre_numa_memory = numa_memory if pre_numa_memory != numa_memory_new: test.fail("memory config %s not expected " "after domain start" % numa_memory_new) pos_vcpu_placement = vmxml_new.placement logging.debug("vcpu placement after domain start is %s", pos_vcpu_placement) try: pos_cpuset = vmxml_new.cpuset logging.debug("vcpu cpuset after vm start is %s", pos_cpuset) except libvirt_xml.xcepts.LibvirtXMLNotFoundError: if vcpu_cpuset and vcpu_placement != 'auto': test.fail("cpuset not found in domain xml.") except virt_vm.VMStartError as e: # Starting VM failed. if status_error: return else: test.fail("Test failed in positive case.\n " "error: %s\n%s" % (e, bug_url)) # Check qemu process numa memory usage memory_status, qemu_cpu = utils_test.qemu.get_numa_status( host_numa_node, vm.get_pid()) logging.debug("The memory status is %s", memory_status) logging.debug("The cpu usage is %s", qemu_cpu) if vcpu_cpuset: total_cpu = [] for node_cpu in qemu_cpu: total_cpu += node_cpu for i in total_cpu: if int(i) not in pre_cpuset: test.fail("cpu %s is not expected" % i) cpu_affinity_check(test, vm_name, cpuset=pre_cpuset) if numa_memory.get('nodeset'): # If there are inconsistent node numbers on host, # convert it into sequence number so that it can be used # in mem_compare if numa_memory.get('mode') == 'strict': left_node = [node_list.index(i) for i in node_list if i not in used_node] used_node = [node_list.index(i) for i in used_node] mem_compare(test, used_node, left_node, memory_status=memory_status) elif numa_memory.get('mode') == 'preferred': mode_nodeset = 'prefer:' + numa_memory.get('nodeset') numa_mode_check(test, vm, mode_nodeset) else: mode_nodeset = numa_memory.get('mode') + ':' + numa_memory.get('nodeset') numa_mode_check(test, vm, mode_nodeset) if vcpu_placement == 'auto' or numa_memory.get('placement') == 'auto': verify_numa_for_auto_replacement(test, params, vmxml, node_list, qemu_cpu, vm, memory_status=memory_status) finally: if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync()