def run_test_memorybacking(case): """ Test memory backing cases :param case: test case """ if case == 'no_numa': # Verify <access mode='shared'/> is ignored # if no NUMA nodes are configured if libvirt_version.version_compare(7, 0, 0) or\ not libvirt_version.version_compare(5, 0, 0): test.cancel('This case is not supported by current libvirt.') access_mode = params.get('access_mode') # Setup memoryBacking mem_backing = vm_xml.VMMemBackingXML() mem_backing.access_mode = access_mode hugepages = vm_xml.VMHugepagesXML() mem_backing.hugepages = hugepages vmxml.mb = mem_backing logging.debug('membacking xml is: %s', mem_backing) vmxml.xmltreefile.write() # Define xml cmd_result = virsh.define(vmxml.xml, debug=True) check_result(cmd_result, status_error, error_msg) if case == 'mem_lock': # Allow use mlock without hard limit hard_limit = params.get('hard_limit') hard_limit_unit = params.get('hard_limit_unit', 'KiB') mem_backing = vm_xml.VMMemBackingXML() mem_backing.locked = True vmxml.mb = mem_backing if hard_limit: mem_tune = vm_xml.VMMemTuneXML() mem_tune.hard_limit = int(hard_limit) mem_tune.hard_limit_unit = hard_limit_unit vmxml.memtune = mem_tune vmxml.sync() vm.start() output = process.run('prlimit -p `pidof qemu-kvm`', shell=True, verbose=True).stdout_text if not re.search(expect_msg, output): test.fail('Not found expected content "%s" in output.' % expect_msg)
def modify_domain_xml(vmxml): membacking = vm_xml.VMMemBackingXML() hugepages = vm_xml.VMHugepagesXML() vmxml.memory = int(1024000) membacking.hugepages = hugepages vmxml.mb = membacking logging.debug(vmxml)
def modify_domain_xml(): """ Modify domain xml and define it. """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) mem_unit = params.get("mem_unit", "KiB") vcpu = params.get("vcpu", "4") if max_mem_rt: vmxml.max_mem_rt = int(max_mem_rt) vmxml.max_mem_rt_slots = max_mem_slots vmxml.max_mem_rt_unit = mem_unit if vcpu: vmxml.vcpu = int(vcpu) vcpu_placement = params.get("vcpu_placement", "static") vmxml.placement = vcpu_placement if numa_memnode: vmxml.numa_memory = {} vmxml.numa_memnode = numa_memnode else: try: del vmxml.numa_memory del vmxml.numa_memnode except: # Not exists pass if numa_cells: cells = [ast.literal_eval(x) for x in numa_cells] cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu><numa/></cpu>" cpu_mode = params.get("cpu_mode") model_fallback = params.get("model_fallback") if cpu_mode: cpu_xml.mode = cpu_mode if model_fallback: cpu_xml.fallback = model_fallback cpu_xml.numa_cell = cells vmxml.cpu = cpu_xml # Delete memory and currentMemory tag, # libvirt will fill it automatically del vmxml.max_mem del vmxml.current_mem # hugepages setting if huge_pages: membacking = vm_xml.VMMemBackingXML() hugepages = vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(huge_pages)): pagexml = hugepages.PageXML() pagexml.update(huge_pages[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug("vm xml: %s", vmxml) vmxml.sync()
def create_mbxml(mb_params): """ Create memoryBacking xml :param mb_params: dict containing memory backing attributes :return memoryBacking xml """ mb_xml = vm_xml.VMMemBackingXML() for attr_key in mb_params: setattr(mb_xml, attr_key, mb_params[attr_key]) logging.debug(mb_xml) return mb_xml.copy()
def create_mbxml(): """ Create memoryBacking xml for test """ mb_params = {k: v for k, v in params.items() if k.startswith('mbxml_')} logging.debug(mb_params) mb_xml = vm_xml.VMMemBackingXML() mb_xml.xml = "<memoryBacking></memoryBacking>" for attr_key in mb_params: val = mb_params[attr_key] logging.debug('Set mb params') setattr(mb_xml, attr_key.replace('mbxml_', ''), eval(val) if ':' in val else val) logging.debug(mb_xml) return mb_xml.copy()
def modify_domain_xml(): """ Modify domain xml and define it. """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) mem_unit = params.get("mem_unit", "KiB") vcpu = params.get("vcpu", "4") if max_mem_rt: vmxml.max_mem_rt = int(max_mem_rt) vmxml.max_mem_rt_slots = max_mem_slots vmxml.max_mem_rt_unit = mem_unit if max_mem: vmxml.max_mem = int(max_mem) if cur_mem: vmxml.current_mem = int(cur_mem) if memory_val: vmxml.memory = int(memory_val) if vcpu: vmxml.vcpu = int(vcpu) vcpu_placement = params.get("vcpu_placement", "static") vmxml.placement = vcpu_placement if numa_memnode: vmxml.numa_memory = {} vmxml.numa_memnode = numa_memnode else: try: del vmxml.numa_memory del vmxml.numa_memnode except Exception: # Not exists pass if numa_cells: cells = [ast.literal_eval(x) for x in numa_cells] # Rounding the numa memory values if align_mem_values: for cell in range(cells.__len__()): memory_value = str( utils_numeric.align_value(cells[cell]["memory"], align_to_value)) cells[cell]["memory"] = memory_value cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu mode='host-model'><numa/></cpu>" cpu_mode = params.get("cpu_mode") model_fallback = params.get("model_fallback") if cpu_mode: cpu_xml.mode = cpu_mode if model_fallback: cpu_xml.fallback = model_fallback cpu_xml.numa_cell = cpu_xml.dicts_to_cells(cells) vmxml.cpu = cpu_xml # Delete memory and currentMemory tag, # libvirt will fill it automatically del vmxml.max_mem del vmxml.current_mem # hugepages setting if huge_pages or discard or cold_plug_discard: membacking = vm_xml.VMMemBackingXML() membacking.discard = True membacking.source = '' membacking.source_type = 'file' if huge_pages: hugepages = vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(huge_pages)): pagexml = hugepages.PageXML() pagexml.update(huge_pages[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug("vm xml: %s", vmxml) vmxml.sync()
def run(test, params, env): """ Test interafce xml options. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) host_arch = platform.machine() virsh_dargs = {'debug': True, 'ignore_status': False} if not utils_package.package_install(["lsof"]): test.cancel("Failed to install dependency package lsof" " on host") def create_iface_xml(iface_mac): """ Create interface xml file """ iface = Interface(type_name=iface_type) source = ast.literal_eval(iface_source) if source: iface.source = source iface.model = iface_model if iface_model else "virtio" iface.mac_address = iface_mac driver_dict = {} driver_host = {} driver_guest = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) if iface_driver_host: driver_host = ast.literal_eval(iface_driver_host) if iface_driver_guest: driver_guest = ast.literal_eval(iface_driver_guest) iface.driver = iface.new_driver(driver_attr=driver_dict, driver_host=driver_host, driver_guest=driver_guest) if test_target: iface.target = {"dev": target_dev} logging.debug("Create new interface xml: %s", iface) return iface def modify_iface_xml(update, status_error=False): """ Modify interface xml options """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] if iface_model: iface.model = iface_model else: del iface.model if iface_type: iface.type_name = iface_type del iface.source source = ast.literal_eval(iface_source) if source: net_ifs = utils_net.get_net_if(state="UP") # Check source device is valid or not, # if it's not in host interface list, try to set # source device to first active interface of host if (iface.type_name == "direct" and 'dev' in source and source['dev'] not in net_ifs): logging.warn( "Source device %s is not a interface" " of host, reset to %s", source['dev'], net_ifs[0]) source['dev'] = net_ifs[0] iface.source = source backend = ast.literal_eval(iface_backend) if backend: iface.backend = backend driver_dict = {} driver_host = {} driver_guest = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) if iface_driver_host: driver_host = ast.literal_eval(iface_driver_host) if iface_driver_guest: driver_guest = ast.literal_eval(iface_driver_guest) iface.driver = iface.new_driver(driver_attr=driver_dict, driver_host=driver_host, driver_guest=driver_guest) if test_target: logging.debug("iface.target is %s" % target_dev) iface.target = {"dev": target_dev} if iface.address: del iface.address if set_ip: iface.ips = [ast.literal_eval(x) for x in set_ips] logging.debug("New interface xml file: %s", iface) if unprivileged_user: # Create disk image for unprivileged user disk_index = xml_devices.index( xml_devices.by_device_tag("disk")[0]) disk_xml = xml_devices[disk_index] logging.debug("source: %s", disk_xml.source) disk_source = disk_xml.source.attrs["file"] cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}" "".format(disk_source, dst_disk, unprivileged_user)) process.run(cmd, shell=True) disk_xml.source = disk_xml.new_disk_source( attrs={"file": dst_disk}) vmxml.devices = xml_devices # Remove all channels to avoid of permission problem channels = vmxml.get_devices(device_type="channel") for channel in channels: vmxml.del_device(channel) logging.info("Unprivileged users can't use 'dac' security driver," " removing from domain xml if present...") vmxml.del_seclabel([('model', 'dac')]) # Set vm memory to 2G if it's larger than 2G if vmxml.memory > 2097152: vmxml.memory = vmxml.current_mem = 2097152 vmxml.xmltreefile.write() logging.debug("New VM xml: %s", vmxml) process.run("chmod a+rw %s" % vmxml.xml, shell=True) virsh.define(vmxml.xml, **virsh_dargs) # Try to modify interface xml by update-device or edit xml elif update: iface.xmltreefile.write() ret = virsh.update_device(vm_name, iface.xml, ignore_status=True) libvirt.check_exit_status(ret, status_error) else: vmxml.devices = xml_devices vmxml.xmltreefile.write() try: vmxml.sync() if define_error: test.fail("Define VM succeed, but it should fail") except xcepts.LibvirtXMLError as e: if not define_error: test.fail("Define VM fail: %s" % e) def check_offloads_option(if_name, driver_options, session=None): """ Check interface offloads by ethtool output """ offloads = { "csum": "tx-checksumming", "tso4": "tcp-segmentation-offload", "tso6": "tx-tcp6-segmentation", "ecn": "tx-tcp-ecn-segmentation", "ufo": "udp-fragmentation-offload" } if session: ret, output = session.cmd_status_output("ethtool -k %s | head" " -18" % if_name) else: out = process.run("ethtool -k %s | head -18" % if_name, shell=True) ret, output = out.exit_status, out.stdout_text if ret: test.fail("ethtool return error code") logging.debug("ethtool output: %s", output) for offload in list(driver_options.keys()): if offload in offloads: if (output.count(offloads[offload]) and not output.count( "%s: %s" % (offloads[offload], driver_options[offload]))): test.fail("offloads option %s: %s isn't" " correct in ethtool output" % (offloads[offload], driver_options[offload])) def run_xml_test(iface_mac): """ Test for interface options in vm xml """ # Get the interface object according the mac address vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_devices = vmxml.get_devices(device_type="interface") iface = None for iface_dev in iface_devices: if iface_dev.mac_address == iface_mac: iface = iface_dev if not iface: test.fail("Can't find interface with mac" " '%s' in vm xml" % iface_mac) driver_dict = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) for driver_opt in list(driver_dict.keys()): if not driver_dict[driver_opt] == iface.driver.driver_attr[ driver_opt]: test.fail("Can't see driver option %s=%s in vm xml" % (driver_opt, driver_dict[driver_opt])) else: logging.info("Find %s=%s in vm xml" % (driver_opt, driver_dict[driver_opt])) if iface_target: if ("dev" not in iface.target or not iface.target["dev"].startswith(iface_target)): test.fail("Can't see device target dev in vm xml") # Check macvtap mode by ip link command if iface_target == "macvtap" and "mode" in iface.source: cmd = "ip -d link show %s" % iface.target["dev"] output = process.run(cmd, shell=True).stdout_text logging.debug("ip link output: %s", output) mode = iface.source["mode"] if mode == "passthrough": mode = "passthru" if not re.search(r"macvtap\s+mode %s" % mode, output): test.fail("Failed to verify macvtap mode") # Check if the "target dev" is set successfully # 1. Target dev name with prefix as "vnet" will always be override; # 2. Target dev name with prefix as "macvtap" or "macvlan" with direct # type interface will be override; # 3. Other scenarios, the target dev should be set successfully. if test_target: if target_dev != iface.target["dev"]: if target_dev.startswith("vnet") or \ (iface_type == "direct" and (target_dev.startswith("macvtap") or target_dev.startswith("macvlan"))): logging.debug("target dev %s is override" % target_dev) else: test.fail("Failed to set target dev to %s", target_dev) else: logging.debug("target dev set successfully to %s", iface.target["dev"]) def run_cmdline_test(iface_mac, host_arch): """ Test qemu command line :param iface_mac: expected MAC :param host_arch: host architecture, e.g. x86_64 :raise avocado.core.exceptions.TestError: if preconditions are not met :raise avocado.core.exceptions.TestFail: if commandline doesn't match :return: None """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) ret = process.run(cmd, shell=True) logging.debug("Command line %s", ret.stdout_text) if test_vhost_net: if not ret.stdout_text.count("vhost=on") and not rm_vhost_driver: test.fail("Can't see vhost options in" " qemu-kvm command line") if iface_model == "virtio": if host_arch == 's390x': model_option = "device virtio-net-ccw" else: model_option = "device virtio-net-pci" elif iface_model == 'rtl8139': model_option = "device rtl8139" else: test.error( "Don't know which device driver to expect on qemu cmdline" " for iface_model %s" % iface_model) iface_cmdline = re.findall( r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout_text) if not iface_cmdline: test.fail("Can't see %s with mac %s in command" " line" % (model_option, iface_mac)) cmd_opt = {} for opt in iface_cmdline[0].split(','): tmp = opt.rsplit("=") cmd_opt[tmp[0]] = tmp[1] logging.debug("Command line options %s", cmd_opt) driver_dict = {} # Test <driver> xml options. if iface_driver: iface_driver_dict = ast.literal_eval(iface_driver) for driver_opt in list(iface_driver_dict.keys()): if driver_opt == "name": continue elif driver_opt == "txmode": if iface_driver_dict["txmode"] == "iothread": driver_dict["tx"] = "bh" else: driver_dict["tx"] = iface_driver_dict["txmode"] elif driver_opt == "queues": driver_dict["mq"] = "on" if "pci" in model_option: driver_dict["vectors"] = str( int(iface_driver_dict["queues"]) * 2 + 2) else: driver_dict[driver_opt] = iface_driver_dict[driver_opt] # Test <driver><host/><driver> xml options. if iface_driver_host: driver_dict.update(ast.literal_eval(iface_driver_host)) # Test <driver><guest/><driver> xml options. if iface_driver_guest: driver_dict.update(ast.literal_eval(iface_driver_guest)) for driver_opt in list(driver_dict.keys()): if (driver_opt not in cmd_opt or not cmd_opt[driver_opt] == driver_dict[driver_opt]): test.fail("Can't see option '%s=%s' in qemu-kvm " " command line" % (driver_opt, driver_dict[driver_opt])) logging.info("Find %s=%s in qemu-kvm command line" % (driver_opt, driver_dict[driver_opt])) if test_backend: guest_pid = ret.stdout_text.rsplit()[1] cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid) if process.system(cmd, ignore_status=True, shell=True): test.fail("Guest process didn't open backend file" " %s" % backend["tap"]) cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid) if process.system(cmd, ignore_status=True, shell=True): test.fail("Guest process didn't open backend file" " %s" % backend["vhost"]) def get_guest_ip(session, mac): """ Wrapper function to get guest ip address """ utils_net.restart_guest_network(session, mac) # Wait for IP address is ready utils_misc.wait_for(lambda: utils_net.get_guest_ip_addr(session, mac), 10) return utils_net.get_guest_ip_addr(session, mac) def check_user_network(session): """ Check user network ip address on guest """ vm_ips = [] vm_ips.append(get_guest_ip(session, iface_mac_old)) if attach_device: vm_ips.append(get_guest_ip(session, iface_mac)) logging.debug("IP address on guest: %s", vm_ips) if len(vm_ips) != len(set(vm_ips)): logging.debug( "Duplicated IP address on guest. Check bug: " "https://bugzilla.redhat.com/show_bug.cgi?id=1147238") for vm_ip in vm_ips: if not vm_ip or vm_ip != expect_ip: logging.debug("vm_ip is %s, expect_ip is %s", vm_ip, expect_ip) test.fail("Found wrong IP address" " on guest") # Check gateway address gateway = str(utils_net.get_default_gateway(False, session)) if expect_gw not in gateway: test.fail("The gateway on guest is %s, while expect is %s" % (gateway, expect_gw)) # Check dns server address ns_list = utils_net.get_guest_nameserver(session) if expect_ns not in ns_list: test.fail("The dns found is %s, which expect is %s" % (ns_list, expect_ns)) def check_mcast_network(session, add_session): """ Check multicast ip address on guests :param session: vm session :param add_session: additional vm session """ src_addr = ast.literal_eval(iface_source)['address'] vms_sess_dict = {vm_name: session, additional_vm.name: add_session} # Check mcast address on host cmd = "netstat -g | grep %s" % src_addr if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Can't find multicast ip address" " on host") vms_ip_dict = {} # Get ip address on each guest for vms in list(vms_sess_dict.keys()): vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms) vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac) if not vm_ip: test.fail("Can't get multicast ip" " address on guest") vms_ip_dict.update({vms: vm_ip}) if len(set(vms_ip_dict.values())) != len(vms_sess_dict): test.fail("Got duplicated multicast ip address") logging.debug("Found ips on guest: %s", vms_ip_dict) # Run omping server on host if not utils_package.package_install(["omping"]): test.error("Failed to install omping" " on host") cmd = ("iptables -F;omping -m %s %s" % (src_addr, "192.168.122.1 %s" % ' '.join(list(vms_ip_dict.values())))) # Run a backgroup job waiting for connection of client bgjob = utils_misc.AsyncJob(cmd) # Run omping client on guests for vms in list(vms_sess_dict.keys()): # omping should be installed first if not utils_package.package_install(["omping"], vms_sess_dict[vms]): test.error("Failed to install omping" " on guest") cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" % (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms])) ret, output = vms_sess_dict[vms].cmd_status_output(cmd) logging.debug("omping ret: %s, output: %s", ret, output) if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%') or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')): test.fail("omping failed on guest") # Kill the backgroup job bgjob.kill_func() def get_iface_model(iface_model, host_arch): """ Get iface_model. On s390x use default model 'virtio' if non-virtio given :param iface_model: value as by test configuration or default :param host_arch: host architecture, e.g. x86_64 :return: iface_model """ if 's390x' == host_arch and 'virtio' not in iface_model: return "virtio" else: return iface_model def check_vhostuser_guests(session1, session2): """ Check the vhostuser interface in guests param session1: Session of original guest param session2: Session of original additional guest """ logging.debug("iface details is %s" % libvirt.get_interface_details(vm_name)) vm1_mac = str(libvirt.get_interface_details(vm_name)[0]['mac']) vm2_mac = str(libvirt.get_interface_details(add_vm_name)[0]['mac']) utils_net.set_guest_ip_addr(session1, vm1_mac, guest1_ip) utils_net.set_guest_ip_addr(session2, vm2_mac, guest2_ip) ping_status, ping_output = utils_net.ping(dest=guest2_ip, count='3', timeout=5, session=session1) logging.info("output:%s" % ping_output) if ping_status != 0: if ping_expect_fail: logging.info("Can not ping guest2 as expected") else: test.fail("Can not ping guest2 from guest1") else: if ping_expect_fail: test.fail("Ping guest2 successfully not expected") else: logging.info("Can ping guest2 from guest1") def get_ovs_statis(ovs): """ Get ovs-vsctl interface statistics and format in dict param ovs: openvswitch instance """ ovs_statis_dict = {} ovs_iface_info = ovs.ovs_vsctl(["list", "interface"]).stdout_text.strip() ovs_iface_list = re.findall( 'name\s+: (\S+)\n.*?statistics\s+: {(.*?)}\n', ovs_iface_info, re.S) logging.info("ovs iface list is %s", ovs_iface_list) # Dict of iface name and statistics for iface_name in vhostuser_names.split(): for ovs_iface in ovs_iface_list: if iface_name == eval(ovs_iface[0]): format_statis = dict( re.findall(r'(\S*?)=(\d*?),', ovs_iface[1])) ovs_statis_dict[iface_name] = format_statis break return ovs_statis_dict status_error = "yes" == params.get("status_error", "no") start_error = "yes" == params.get("start_error", "no") define_error = "yes" == params.get("define_error", "no") unprivileged_user = params.get("unprivileged_user") # Interface specific attributes. iface_type = params.get("iface_type", "network") iface_source = params.get("iface_source", "{}") iface_driver = params.get("iface_driver") iface_model = get_iface_model(params.get("iface_model", "virtio"), host_arch) iface_target = params.get("iface_target") iface_backend = params.get("iface_backend", "{}") iface_driver_host = params.get("iface_driver_host") iface_driver_guest = params.get("iface_driver_guest") ovs_br_name = params.get("ovs_br_name") vhostuser_names = params.get("vhostuser_names") attach_device = params.get("attach_iface_device") expect_tx_size = params.get("expect_tx_size") guest1_ip = params.get("vhostuser_guest1_ip", "192.168.100.1") guest2_ip = params.get("vhostuser_guest2_ip", "192.168.100.2") change_option = "yes" == params.get("change_iface_options", "no") update_device = "yes" == params.get("update_iface_device", "no") additional_guest = "yes" == params.get("additional_guest", "no") serial_login = "******" == params.get("serial_login", "no") rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no") test_option_cmd = "yes" == params.get("test_iface_option_cmd", "no") test_option_xml = "yes" == params.get("test_iface_option_xml", "no") test_vhost_net = "yes" == params.get("test_vhost_net", "no") test_option_offloads = "yes" == params.get("test_option_offloads", "no") test_iface_user = "******" == params.get("test_iface_user", "no") test_iface_mcast = "yes" == params.get("test_iface_mcast", "no") test_libvirtd = "yes" == params.get("test_libvirtd", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") restart_vm = "yes" == params.get("restart_vm", "no") test_guest_ip = "yes" == params.get("test_guest_ip", "no") test_backend = "yes" == params.get("test_backend", "no") check_guest_trans = "yes" == params.get("check_guest_trans", "no") set_ip = "yes" == params.get("set_user_ip", "no") set_ips = params.get("set_ips", "").split() expect_ip = params.get("expect_ip") expect_gw = params.get("expect_gw") expect_ns = params.get("expect_ns") test_target = "yes" == params.get("test_target", "no") target_dev = params.get("target_dev", None) # test params for vhostuser test huge_page = ast.literal_eval(params.get("huge_page", "{}")) numa_cell = ast.literal_eval(params.get("numa_cell", "{}")) additional_iface_source = ast.literal_eval( params.get("additional_iface_source", "{}")) vcpu_num = params.get("vcpu_num") cpu_mode = params.get("cpu_mode") hugepage_num = params.get("hugepage_num") log_pattern = params.get("log_pattern") # judgement params for vhostuer test need_vhostuser_env = "yes" == params.get("need_vhostuser_env", "no") ping_expect_fail = "yes" == params.get("ping_expect_fail", "no") check_libvirtd_log = "yes" == params.get("check_libvirtd_log", "no") check_statistics = "yes" == params.get("check_statistics", "no") enable_multiqueue = "yes" == params.get("enable_multiqueue", "no") queue_size = None if iface_driver: driver_dict = ast.literal_eval(iface_driver) if "queues" in driver_dict: queue_size = int(driver_dict.get("queues")) if iface_driver_host or iface_driver_guest or test_backend: if not libvirt_version.version_compare(1, 2, 8): test.cancel("Offloading/backend options not " "supported in this libvirt version") if iface_driver and "queues" in ast.literal_eval(iface_driver): if not libvirt_version.version_compare(1, 0, 6): test.cancel("Queues options not supported" " in this libvirt version") if unprivileged_user: if not libvirt_version.version_compare(1, 1, 1): test.cancel("qemu-bridge-helper not supported" " on this host") virsh_dargs["unprivileged_user"] = unprivileged_user # Create unprivileged user if needed cmd = ("grep {0} /etc/passwd || " "useradd {0}".format(unprivileged_user)) process.run(cmd, shell=True) # Need another disk image for unprivileged user to access dst_disk = "/tmp/%s.img" % unprivileged_user # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name) # iface_mac will update if attach a new interface iface_mac = iface_mac_old # Additional vm for test additional_vm = None libvirtd = utils_libvirtd.Libvirtd() libvirtd_log_path = None libvirtd_conf = None if check_libvirtd_log: libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log") libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path libvirtd.restart() # Prepare vhostuser ovs = None if need_vhostuser_env: # Reserve selinux status selinux_mode = utils_selinux.get_status() # Reserve orig page size orig_size = utils_memory.get_num_huge_pages() ovs_dir = data_dir.get_tmp_dir() ovs = utils_net.setup_ovs_vhostuser(hugepage_num, ovs_dir, ovs_br_name, vhostuser_names, queue_size) try: # Build the xml and run test. try: # Prepare interface backend files if test_backend: if not os.path.exists("/dev/vhost-net"): process.run("modprobe vhost-net", shell=True) backend = ast.literal_eval(iface_backend) backend_tap = "/dev/net/tun" backend_vhost = "/dev/vhost-net" if not backend: backend["tap"] = backend_tap backend["vhost"] = backend_vhost if not start_error: # Create backend files for normal test if not os.path.exists(backend["tap"]): os.rename(backend_tap, backend["tap"]) if not os.path.exists(backend["vhost"]): os.rename(backend_vhost, backend["vhost"]) # Edit the interface xml. if change_option: modify_iface_xml(update=False) if define_error: return if test_target: logging.debug("Setting target device name to %s", target_dev) modify_iface_xml(update=False) if rm_vhost_driver: # remove vhost driver on host and # the character file /dev/vhost-net cmd = ("modprobe -r {0}; " "rm -f /dev/vhost-net".format("vhost_net")) if process.system(cmd, ignore_status=True, shell=True): test.error("Failed to remove vhost_net driver") else: # Load vhost_net driver by default cmd = "modprobe vhost_net" process.system(cmd, shell=True) # Attach a interface when vm is shutoff if attach_device == 'config': iface_mac = utils_net.generate_mac_address_simple() iface_xml_obj = create_iface_xml(iface_mac) iface_xml_obj.xmltreefile.write() ret = virsh.attach_device(vm_name, iface_xml_obj.xml, flagstr="--config", ignore_status=True) libvirt.check_exit_status(ret) # Add hugepage and update cpu for vhostuser testing if huge_page: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) membacking = vm_xml.VMMemBackingXML() hugepages = vm_xml.VMHugepagesXML() pagexml = hugepages.PageXML() pagexml.update(huge_page) hugepages.pages = [pagexml] membacking.hugepages = hugepages vmxml.mb = membacking vmxml.vcpu = int(vcpu_num) cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu><numa/></cpu>" cpu_xml.numa_cell = cpu_xml.dicts_to_cells([numa_cell]) cpu_xml.mode = cpu_mode if cpu_mode == "custom": vm_capability = capability_xml.CapabilityXML() cpu_xml.model = vm_capability.model vmxml.cpu = cpu_xml vmxml.sync() logging.debug("xmltreefile:%s", vmxml.xmltreefile) # Clone additional vm if additional_guest: add_vm_name = "%s_%s" % (vm_name, '1') # Clone additional guest timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, add_vm_name, True, timeout=timeout) additional_vm = vm.clone(add_vm_name) # Update iface source if needed if additional_iface_source: add_vmxml = vm_xml.VMXML.new_from_dumpxml(add_vm_name) add_xml_devices = add_vmxml.devices add_iface_index = add_xml_devices.index( add_xml_devices.by_device_tag("interface")[0]) add_iface = add_xml_devices[add_iface_index] add_iface.source = additional_iface_source add_vmxml.devices = add_xml_devices add_vmxml.xmltreefile.write() add_vmxml.sync() logging.debug("add vm xmltreefile:%s", add_vmxml.xmltreefile) additional_vm.start() # additional_vm.wait_for_login() username = params.get("username") password = params.get("password") add_session = additional_vm.wait_for_serial_login( username=username, password=password) # Start the VM. if unprivileged_user: virsh.start(vm_name, **virsh_dargs) cmd = ("su - %s -c 'virsh console %s'" % (unprivileged_user, vm_name)) session = aexpect.ShellSession(cmd) session.sendline() remote.handle_prompts(session, params.get("username"), params.get("password"), r"[\#\$]\s*$", 60) # Get ip address on guest if not get_guest_ip(session, iface_mac): test.error("Can't get ip address on guest") else: # Will raise VMStartError exception if start fails vm.start() if serial_login: session = vm.wait_for_serial_login() else: session = vm.wait_for_login() if start_error: test.fail("VM started unexpectedly") # Attach a interface when vm is running if attach_device == 'live': iface_mac = utils_net.generate_mac_address_simple() iface_xml_obj = create_iface_xml(iface_mac) iface_xml_obj.xmltreefile.write() ret = virsh.attach_device(vm_name, iface_xml_obj.xml, flagstr="--live", ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) # Need sleep here for attachment take effect time.sleep(5) # Update a interface options if update_device: modify_iface_xml(update=True, status_error=status_error) # Run tests for qemu-kvm command line options if test_option_cmd: run_cmdline_test(iface_mac, host_arch) # Run tests for vm xml if test_option_xml: run_xml_test(iface_mac) # Run tests for offloads options if test_option_offloads: if iface_driver_host: ifname_guest = utils_net.get_linux_ifname( session, iface_mac) check_offloads_option(ifname_guest, ast.literal_eval(iface_driver_host), session) if iface_driver_guest: ifname_host = libvirt.get_ifname_host(vm_name, iface_mac) check_offloads_option(ifname_host, ast.literal_eval(iface_driver_guest)) if test_iface_user: # Test user type network check_user_network(session) if test_iface_mcast: # Test mcast type network check_mcast_network(session, add_session) # Check guest ip address if test_guest_ip: if not get_guest_ip(session, iface_mac): test.fail("Guest can't get a" " valid ip address") # Check guest RX/TX ring if check_guest_trans: ifname_guest = utils_net.get_linux_ifname(session, iface_mac) ret, outp = session.cmd_status_output("ethtool -g %s" % ifname_guest) if ret: test.fail("ethtool return error code") logging.info("ethtool output is %s", outp) driver_dict = ast.literal_eval(iface_driver) if expect_tx_size: driver_dict['tx_queue_size'] = expect_tx_size for outp_p in outp.split("Current hardware"): if 'rx_queue_size' in driver_dict: if re.search( r"RX:\s*%s" % driver_dict['rx_queue_size'], outp_p): logging.info("Find RX setting RX:%s by ethtool", driver_dict['rx_queue_size']) else: test.fail("Cannot find matching rx setting") if 'tx_queue_size' in driver_dict: if re.search( r"TX:\s*%s" % driver_dict['tx_queue_size'], outp_p): logging.info("Find TX settint TX:%s by ethtool", driver_dict['tx_queue_size']) else: test.fail("Cannot find matching tx setting") if test_target: logging.debug("Check if the target dev is set") run_xml_test(iface_mac) # Check vhostuser guest if additional_iface_source: check_vhostuser_guests(session, add_session) # Check libvirtd log if check_libvirtd_log: find = 0 with open(libvirtd_log_path) as f: lines = "".join(f.readlines()) if log_pattern in lines: logging.info("Finding msg<%s> in libvirtd log", log_pattern) else: test.fail("Can not find msg:<%s> in libvirtd.log" % log_pattern) # Check statistics if check_statistics: session.sendline("ping %s" % guest2_ip) add_session.sendline("ping %s" % guest1_ip) time.sleep(5) vhost_name = vhostuser_names.split()[0] ovs_statis_dict = get_ovs_statis(ovs)[vhost_name] domif_info = {} domif_info = libvirt.get_interface_details(vm_name) virsh.domiflist(vm_name, debug=True) domif_stat_result = virsh.domifstat(vm_name, vhost_name) if domif_stat_result.exit_status != 0: test.fail("domifstat cmd fail with msg:%s" % domif_stat_result.stderr) else: domif_stat = domif_stat_result.stdout.strip() logging.debug("vhost_name is %s, domif_stat is %s", vhost_name, domif_stat) domif_stat_dict = dict( re.findall("%s (\S*) (\d*)" % vhost_name, domif_stat)) logging.debug("ovs_statis is %s, domif_stat is %s", ovs_statis_dict, domif_stat_dict) ovs_cmp_dict = { 'tx_bytes': ovs_statis_dict['rx_bytes'], 'tx_drop': ovs_statis_dict['rx_dropped'], 'tx_errs': ovs_statis_dict['rx_errors'], 'tx_packets': ovs_statis_dict['rx_packets'], 'rx_bytes': ovs_statis_dict['tx_bytes'], 'rx_drop': ovs_statis_dict['tx_dropped'] } logging.debug("ovs_cmp_dict is %s", ovs_cmp_dict) for dict_key in ovs_cmp_dict.keys(): if domif_stat_dict[dict_key] != ovs_cmp_dict[dict_key]: test.fail( "Find ovs %s result (%s) different with domifstate result (%s)" % (dict_key, ovs_cmp_dict[dict_key], domif_stat_dict[dict_key])) else: logging.info("ovs %s value %s is same with domifstate", dict_key, domif_stat_dict[dict_key]) # Check multi_queue if enable_multiqueue: ifname_guest = utils_net.get_linux_ifname(session, iface_mac) for comb_size in (queue_size, queue_size - 1): logging.info("Setting multiqueue size to %s" % comb_size) session.cmd_status("ethtool -L %s combined %s" % (ifname_guest, comb_size)) ret, outp = session.cmd_status_output("ethtool -l %s" % ifname_guest) logging.debug("ethtool cmd output:%s" % outp) if not ret: pre_comb = re.search( "Pre-set maximums:[\s\S]*?Combined:.*?(\d+)", outp).group(1) cur_comb = re.search( "Current hardware settings:[\s\S]*?Combined:.*?(\d+)", outp).group(1) if int(pre_comb) != queue_size or int(cur_comb) != int( comb_size): test.fail( "Fail to check the combined size: setting: %s," "Pre-set: %s, Current-set: %s, queue_size: %s" % (comb_size, pre_comb, cur_comb, queue_size)) else: logging.info( "Getting correct Pre-set and Current set value" ) else: test.error("ethtool list fail: %s" % outp) session.close() if additional_guest: add_session.close() # Restart libvirtd and guest, then test again if restart_libvirtd: libvirtd.restart() if restart_vm: vm.destroy(gracefully=True) vm.start() if test_option_xml: run_xml_test(iface_mac) # Detach hot/cold-plugged interface at last if attach_device and not status_error: ret = virsh.detach_device(vm_name, iface_xml_obj.xml, flagstr="", ignore_status=True, debug=True) libvirt.check_exit_status(ret) except virt_vm.VMStartError as e: logging.info(str(e)) if not start_error: test.fail('VM failed to start\n%s' % e) finally: # Recover VM. logging.info("Restoring vm...") # Restore interface backend files if test_backend: if not os.path.exists(backend_tap): os.rename(backend["tap"], backend_tap) if not os.path.exists(backend_vhost): os.rename(backend["vhost"], backend_vhost) if rm_vhost_driver: # Restore vhost_net driver process.system("modprobe vhost_net", shell=True) if unprivileged_user: virsh.remove_domain(vm_name, **virsh_dargs) process.run('rm -f %s' % dst_disk, shell=True) if additional_vm: virsh.remove_domain(additional_vm.name, "--remove-all-storage") # Kill all omping server process on host process.system("pidof omping && killall omping", ignore_status=True, shell=True) if vm.is_alive(): vm.destroy(gracefully=True) vmxml_backup.sync() if need_vhostuser_env: utils_net.clean_ovs_env(selinux_mode=selinux_mode, page_size=orig_size, clean_ovs=True) if libvirtd_conf: libvirtd_conf.restore() libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
HP_page_list = enable_hugepage(vm_name, no_of_HPs, hp_unit=default_hp_unit, hp_node=hp_pin_nodes, pin=True, node_list=memnode_list, host_hp_size=host_hp_size, numa_pin=True) else: HP_page_list = enable_hugepage(vm_name, no_of_HPs, hp_unit=default_hp_unit, hp_node=hp_pin_nodes, host_hp_size=host_hp_size, pin=True) vmxml_mem = vm_xml.VMMemBackingXML() vmxml_hp = vm_xml.VMHugepagesXML() pagexml_list = [] for page in range(len(HP_page_list)): pagexml = vmxml_hp.PageXML() pagexml.update(HP_page_list[page]) pagexml_list.append(pagexml) vmxml_hp.pages = pagexml_list vmxml_mem.hugepages = vmxml_hp vmxml.mb = vmxml_mem vmxml.sync() # Hugepage enabled guest without pinning to node if enable_HP: if enable_numa_pin: # HP with Numa pin
def run(test, params, env): """ Test memory management of nvdimm """ vm_name = params.get('main_vm') check = params.get('check', '') qemu_checks = params.get('qemu_checks', '') def mount_hugepages(page_size): """ To mount hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ if page_size == 4: perm = "" else: perm = "pagesize=%dK" % page_size tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_status: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm) def setup_hugepages(page_size=2048, shp_num=4000): """ To setup hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc :param shp_num: number of hugepage, string type """ mount_hugepages(page_size) utils_memory.set_num_huge_pages(shp_num) config.hugetlbfs_mount = ["/dev/hugepages"] utils_libvirtd.libvirtd_restart() def restore_hugepages(page_size=4): """ To recover hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ mount_hugepages(page_size) config.restore() utils_libvirtd.libvirtd_restart() def create_mbxml(): """ Create memoryBacking xml for test """ mb_params = {k: v for k, v in params.items() if k.startswith('mbxml_')} logging.debug(mb_params) mb_xml = vm_xml.VMMemBackingXML() mb_xml.xml = "<memoryBacking></memoryBacking>" for attr_key in mb_params: val = mb_params[attr_key] logging.debug('Set mb params') setattr(mb_xml, attr_key.replace('mbxml_', ''), eval(val) if ':' in val else val) logging.debug(mb_xml) return mb_xml.copy() def create_cpuxml(): """ Create cpu xml for test """ cpu_params = { k: v for k, v in params.items() if k.startswith('cpuxml_') } logging.debug(cpu_params) cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu><numa/></cpu>" if 'cpuxml_numa_cell' in cpu_params: cpu_params['cpuxml_numa_cell'] = cpu_xml.dicts_to_cells( eval(cpu_params['cpuxml_numa_cell'])) for attr_key in cpu_params: val = cpu_params[attr_key] logging.debug('Set cpu params') setattr(cpu_xml, attr_key.replace('cpuxml_', ''), eval(val) if ':' in val else val) logging.debug(cpu_xml) return cpu_xml.copy() def create_dimm_xml(**mem_param): """ Create xml of dimm memory device """ mem_xml = utils_hotplug.create_mem_xml( pg_size=int(mem_param['source_pagesize']), tg_size=mem_param['target_size'], tg_sizeunit=mem_param['target_size_unit'], tg_node=mem_param['target_node'], mem_model="dimm") logging.debug(mem_xml) return mem_xml.copy() huge_pages = [ ast.literal_eval(x) for x in params.get("huge_pages", "").split() ] config = utils_config.LibvirtQemuConfig() page_size = params.get("page_size") discard = params.get("discard") setup_hugepages(int(page_size)) bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vm = env.get_vm(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Set cpu according to params cpu_xml = create_cpuxml() vmxml.cpu = cpu_xml # Set memoryBacking according to params mb_xml = create_mbxml() vmxml.mb = mb_xml # Update other vcpu, memory info according to params update_vm_args = { k: params[k] for k in params if k.startswith('setvm_') } logging.debug(update_vm_args) for key, value in list(update_vm_args.items()): attr = key.replace('setvm_', '') logging.debug('Set %s = %s', attr, value) setattr(vmxml, attr, int(value) if value.isdigit() else value) vmxml.sync() logging.debug(virsh.dumpxml(vm_name)) # hugepages setting if huge_pages: membacking = vm_xml.VMMemBackingXML() hugepages = vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(huge_pages)): pagexml = hugepages.PageXML() pagexml.update(huge_pages[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug(virsh.dumpxml(vm_name)) if check == "mem_dev" or check == "hot_plug": # Add dimm mem device to vm xml dimm_params = { k.replace('dimmxml_', ''): v for k, v in params.items() if k.startswith('dimmxml_') } dimm_xml = create_dimm_xml(**dimm_params) if params.get('dimmxml_mem_access'): dimm_xml.mem_access = dimm_params['mem_access'] vmxml.add_device(dimm_xml) logging.debug(virsh.dumpxml(vm_name)) test_vm = env.get_vm(vm_name) vmxml.sync() if test_vm.is_alive(): test_vm.destroy() virsh.start(vm_name, debug=True, ignore_status=False) test_vm.wait_for_login() if check == 'numa_cell' or check == 'mem_dev': # Check qemu command line one by one logging.debug("enter check") if discard == 'yes': libvirt.check_qemu_cmd_line(qemu_checks) elif libvirt.check_qemu_cmd_line(qemu_checks, True): test.fail("The unexpected [%s] exist in qemu cmd" % qemu_checks) if check == 'hot_plug': # Add dimm device to vm xml dimm_params2 = { k.replace('dimmxml2_', ''): v for k, v in params.items() if k.startswith('dimmxml2_') } dimm_xml2 = create_dimm_xml(**dimm_params2) if params.get('dimmxml2_mem_access'): dimm_xml2.mem_access = dimm_params2['mem_access'] result = virsh.attach_device(vm_name, dimm_xml2.xml, debug=True) ori_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices( 'memory') logging.debug('Starts with %d memory devices', len(ori_devices)) result = virsh.attach_device(vm_name, dimm_xml2.xml, debug=True) libvirt.check_exit_status(result) # After attach, there should be a memory device added devices_after_attach = vm_xml.VMXML.new_from_dumpxml( vm_name).get_devices('memory') logging.debug('After detach, vm has %d memory devices', len(devices_after_attach)) if len(ori_devices) != len(devices_after_attach) - 1: test.fail( 'Number of memory devices after attach is %d, should be %d' % (len(devices_after_attach), len(ori_devices) + 1)) alive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) dimm_detach = alive_vmxml.get_devices('memory')[-1] logging.debug(dimm_detach) # Hot-unplug dimm device result = virsh.detach_device(vm_name, dimm_detach.xml, debug=True) libvirt.check_exit_status(result) left_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices( 'memory') logging.debug(left_devices) if len(left_devices) != len(ori_devices): time.sleep(60) test.fail( 'Number of memory devices after detach is %d, should be %d' % (len(left_devices), len(ori_devices))) except virt_vm.VMStartError as e: test.fail("VM failed to start." "Error: %s" % str(e)) finally: if vm.is_alive(): vm.destroy(gracefully=False) bkxml.sync()