def restore_hugepages(page_size=4): """ To recover hugepages :param page_size: unit is libvirt/tests/src/svirt/default_dac_check.pykB, it can be 4,2048,1048576,etc """ mount_hugepages(page_size) utils_libvirtd.libvirtd_restart()
def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf f_obj = open(qemu_conf, "r") cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content f_obj = open(qemu_conf, "w") f_obj.write(left_cont) f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") f_obj.close() # make modification effect utils_libvirtd.libvirtd_restart() # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd)
def unix_transport_recover(): """ Recover the libvirtd on local. """ if os.path.exists(libvirtd_conf_bak_path): shutil.copy(libvirtd_conf_bak_path, libvirtd_conf_path) utils_libvirtd.libvirtd_restart()
def unix_transport_setup(): """ Setup a unix connect to local libvirtd. """ shutil.copy(libvirtd_conf_path, libvirtd_conf_bak_path) with open(libvirtd_conf_path, 'r') as libvirtdconf_file: line_list = libvirtdconf_file.readlines() conf_dict = { r'auth_unix_rw\s*=': 'auth_unix_rw="none"\n', } for key in conf_dict: pattern = key conf_line = conf_dict[key] flag = False for index in range(len(line_list)): line = line_list[index] if not re.search(pattern, line): continue else: line_list[index] = conf_line flag = True break if not flag: line_list.append(conf_line) with open(libvirtd_conf_path, 'w') as libvirtdconf_file: libvirtdconf_file.writelines(line_list) # restart libvirtd service utils_libvirtd.libvirtd_restart()
def do_operation(): """ Do operation in guest os with vf and check the os behavior after operation. """ if operation == "resume_suspend": try: virsh.suspend(vm.name, debug=True, ignore_status=False) virsh.resume(vm.name, debug=True, ignore_statue=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "reboot": try: if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() virsh.reboot(vm.name, ignore_status=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "save": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=True) if operation == "restart_libvirtd": detach_interface() utils_libvirtd.libvirtd_restart() interface = attach_interface()
def unix_transport_setup(): """ Setup a unix connect to local libvirtd. """ shutil.copy(libvirtd_conf_path, libvirtd_conf_bak_path) with open(libvirtd_conf_path, 'r') as libvirtdconf_file: line_list = libvirtdconf_file.readlines() conf_dict = {r'auth_unix_rw\s*=': 'auth_unix_rw="none"\n', } for key in conf_dict: pattern = key conf_line = conf_dict[key] flag = False for index in range(len(line_list)): line = line_list[index] if not re.search(pattern, line): continue else: line_list[index] = conf_line flag = True break if not flag: line_list.append(conf_line) with open(libvirtd_conf_path, 'w') as libvirtdconf_file: libvirtdconf_file.writelines(line_list) # restart libvirtd service utils_libvirtd.libvirtd_restart()
def restore_hugepages(page_size=4): """ To recover hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ mount_hugepages(page_size) config.restore() utils_libvirtd.libvirtd_restart()
def run(test, params, env): vm_name = params.get("main_vm") status_error = ("yes" == params.get("status_error", "no")) start_ga = ("yes" == params.get("start_ga", "yes")) prepare_channel = ("yes" == params.get("prepare_channel", "yes")) src_path = params.get("src_path") tgt_name = params.get("tgt_name", "org.qemu.guest_agent.0") restart_libvirtd = ("yes" == params.get("restart_libvirtd")) suspend_resume_guest = ("yes" == params.get("suspend_resume_guest")) hotunplug_ga = ("yes" == params.get("hotunplug_ga")) label = params.get("con_label") vm = env.get_vm(vm_name) if src_path: socket_file_dir = os.path.dirname(src_path) if not os.path.exists(socket_file_dir): os.mkdir(socket_file_dir) shutil.chown(socket_file_dir, "qemu", "qemu") utils_selinux.set_context_of_file(filename=socket_file_dir, context=label) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vmxml.remove_agent_channels() vmxml.sync() try: if prepare_channel: vm.prepare_guest_agent(start=start_ga, channel=True, source_path=src_path) if restart_libvirtd: utils_libvirtd.libvirtd_restart() if suspend_resume_guest: virsh.suspend(vm_name, debug=True) virsh.resume(vm_name, debug=True) if hotunplug_ga: ga_xml = get_ga_xml(vm, vm_name) result = virsh.detach_device(vm_name, ga_xml) if result.exit_status: test.fail("hotunplug guest agent device failed, %s" % result) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if vmxml.get_agent_channels(): test.fail("hotunplug guest agent device failed as " "guest agent xml still exists") else: if start_ga != check_ga_state(vm, vm_name): test.fail("guest agent device is not in correct state") check_ga_function(vm_name, status_error, hotunplug_ga) finally: vm.destroy() backup_xml.sync()
def setup_hugepages(page_size=2048, shp_num=1000): """ To setup hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc :param shp_num: number of hugepage, string type """ mount_hugepages(page_size) utils_memory.set_num_huge_pages(shp_num) utils_libvirtd.libvirtd_restart()
def do_extra_operations(operations="daemon-reload"): """ Do some extra operation after setting cgroup value :param operation: The operation to be executed """ if "daemon-reload" in operations: process.run("systemctl daemon-reload", ignore_status=False, shell=True) logging.debug("daemons reloaded after setting cgroup") if "restart-libvirtd" in operations: utils_libvirtd.libvirtd_restart() logging.debug("libvirtd restarted after setting cgroup") # Sleep 2 seconds to make sure daemons are reloaded or restarted time.sleep(2)
def validate_blockcommit_after_libvirtd_restart(): """Validate blockcommit after libvirtd restart""" logging.debug("phase three blockcommit .....") counts = 1 phase_three_blockcommit_options = " --active" libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_three_blockcommit_options, counts) time.sleep(3) # Before restart libvirtd mirror_content_before_restart = libvirt_disk.get_mirror_part_in_xml(vm, disk_target) logging.debug(mirror_content_before_restart) utils_libvirtd.libvirtd_restart() # After restart libvirtd mirror_content_after_restart = libvirt_disk.get_mirror_part_in_xml(vm, disk_target) logging.debug(mirror_content_after_restart) # Check whether mirror content is identical with previous one if mirror_content_before_restart != mirror_content_after_restart: test.fail("The mirror part content changed after libvirtd restarted") virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True)
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Check pool 'Capacity', 'Allocation' and 'Available' Create a over size vol in pool(expect fail), then check these values (21) Undefine the pool, and this should fail as pool is still active (22) Destroy the pool (23) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (24) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") new_pool_name = params.get("new_pool_name", "") build_option = params.get("build_option", "") iscsi_initiator = params.get("iscsi_initiator", "") same_source_test = "yes" == params.get("same_source_test", "no") customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn", "no") # The file for dumped pool xml poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("volume_name", "temp_vol_1") # Use pool name as VG name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) ip_protocal = params.get('ip_protocal', 'ipv4') if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") if not libvirt_version.version_compare(4, 7, 0): if pool_type == "iscsi-direct": test.cancel("iSCSI-direct pool is not supported in current" "libvirt version.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: test.fail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: test.fail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) utlv.check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if vol_name in item[0]: found = True break if found: logging.debug("Find volume '%s' in pool '%s'.", vol_name, pool_name) else: test.fail("Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def is_in_range(actual, expected, error_percent): deviation = 100 - (100 * (float(actual) / float(expected))) logging.debug("Deviation: %0.2f%%", float(deviation)) return float(deviation) <= float(error_percent) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: test.fail("Pool info dictionary is needed.") val_tup = ('Capacity', 'Allocation', 'Available') if check_point in val_tup and float(value.split()[0]): # As from bytes to GiB, could cause deviation, and it should not # exceed 1 percent. if is_in_range(float(pool_info[check_point].split()[0]), float(value.split()[0]), 1): logging.debug("Pool '%s' is '%s'.", check_point, value) else: test.fail("Pool '%s' isn't '%s'." % (check_point, value)) else: if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: test.fail("Pool '%s' isn't '%s'." % (check_point, value)) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Run Testcase pvt = utlv.PoolVolumeTest(test, params) kwargs = { 'image_size': '1G', 'pre_disk_vol': ['100M'], 'source_name': source_name, 'source_path': source_path, 'source_format': source_format, 'persistent': True, 'ip_protocal': ip_protocal, 'emulated_image': "emulated-image", 'pool_target': pool_target, 'iscsi_initiator': iscsi_initiator } params.update(kwargs) try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define pvt.pre_pool(**params) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=poolxml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Update pool name if new_pool_name: if "/" in new_pool_name: new_pool_name = new_pool_name.replace("/", "\/") logging.debug(new_pool_name) p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) p_xml.name = new_pool_name del p_xml.uuid poolxml = p_xml.xml logging.debug("XML after update pool name:\n%s" % p_xml) # Update host name if same_source_test: s_xml = p_xml.get_source() s_xml.host_name = "192.168.1.1" p_xml.set_source(s_xml) poolxml = p_xml.xml logging.debug("XML after update host name:\n%s" % p_xml) if customize_initiator_iqn: initiator_iqn = params.get("initiator_iqn", "iqn.2018-07.com.virttest:pool.target") p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) s_node = p_xml.xmltreefile.find('/source') i_node = ET.SubElement(s_node, 'initiator') ET.SubElement(i_node, 'iqn', {'name': initiator_iqn}) p_xml.xmltreefile.write() poolxml = p_xml.xml logging.debug('XML after add Multi-IQN:\n%s' % p_xml) # Step (4) # Undefine pool if not same_source_test: result = virsh.pool_undefine(pool_name) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(poolxml, debug=True) # Give error msg when exit status is not expected if "/" in new_pool_name and not result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) if "." in new_pool_name and result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) if same_source_test and not result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) utlv.check_exit_status(result, status_error) if not result.exit_status: # Step (6) # Buid pool # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if new_pool_name: pool_name = new_pool_name if pool_type != "logical": result = virsh.pool_build(pool_name, build_option, ignore_status=True) utlv.check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, debug=True, ignore_status=True) utlv.check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) utlv.check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # When libvirtd starts up, it'll check to see if any of the storage # pools have been activated externally. If so, then it'll mark the # pool as active. This is independent of autostart. # So a directory based storage pool is thus pretty much always active, # and so as the SCSI pool. if pool_type not in ["dir", 'scsi']: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result) check_vol_list(vol_name, pool_name) # Step (20) # Create an over size vol in pool(expect fail), then check pool: # 'Capacity', 'Allocation' and 'Available' # For NFS type pool, there's a bug(BZ#1077068) about allocate volume, # and glusterfs pool not support create volume, so not test them if pool_type != "netfs": vol_capacity = "10000G" vol_allocation = "10000G" result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw") utlv.check_exit_status(result, True) new_info = _pool.pool_info(pool_name) check_items = ["Capacity", "Allocation", "Available"] for i in check_items: check_pool_info(pool_info, i, new_info[i]) # Step (21) # Undefine pool, this should fail as the pool is active result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, expect_error=True) check_pool_list(pool_name, "", False) # Step (22) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (23) # Pool delete for 'dir' type pool if pool_type == "dir": for f in os.listdir(pool_target): os.remove(os.path.join(pool_target, f)) result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): test.fail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, True) # Step (24) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up try: pvt.cleanup_pool(**params) utlv.setup_or_cleanup_iscsi(False) except exceptions.TestFail as detail: logging.error(str(detail)) if multipathd_status: multipathd.start() if os.path.exists(poolxml): os.remove(poolxml)
def check_ipt_rules(check_ipv4=True, check_ipv6=False): """ Check iptables for network/interface """ br_name = ast.literal_eval(net_bridge)["name"] net_forward = ast.literal_eval(params.get("net_forward", "{}")) net_ipv4 = params.get("net_ipv4") net_ipv6 = params.get("net_ipv6") net_dev_in = "" net_dev_out = "" if "dev" in net_forward: net_dev_in = " -i %s" % net_forward["dev"] net_dev_out = " -o %s" % net_forward["dev"] if libvirt_version.version_compare(5, 1, 0): input_chain = "LIBVIRT_INP" output_chain = "LIBVIRT_OUT" postrouting_chain = "LIBVIRT_PRT" forward_filter = "LIBVIRT_FWX" forward_in = "LIBVIRT_FWI" forward_out = "LIBVIRT_FWO" else: input_chain = "INPUT" output_chain = "OUTPUT" postrouting_chain = "POSTROUTING" forward_filter = "FORWARD" forward_in = "FORWARD" forward_out = "FORWARD" ipt_rules = ( "%s -i %s -p udp -m udp --dport 53 -j ACCEPT" % (input_chain, br_name), "%s -i %s -p tcp -m tcp --dport 53 -j ACCEPT" % (input_chain, br_name), "{0} -i {1} -o {1} -j ACCEPT".format(forward_filter, br_name), "%s -o %s -j REJECT --reject-with icmp" % (forward_in, br_name), "%s -i %s -j REJECT --reject-with icmp" % (forward_out, br_name)) if check_ipv4: ipv4_rules = list(ipt_rules) ipv4_rules.extend( ["%s -i %s -p udp -m udp --dport 67 -j ACCEPT" % (input_chain, br_name), "%s -i %s -p tcp -m tcp --dport 67 -j ACCEPT" % (input_chain, br_name), "%s -o %s -p udp -m udp --dport 68 -j ACCEPT" % (output_chain, br_name), "%s -o %s -p udp -m udp --dport 68 " "-j CHECKSUM --checksum-fill" % (postrouting_chain, br_name)]) ctr_rule = "" nat_rules = [] if "mode" in net_forward and net_forward["mode"] == "nat": nat_port = ast.literal_eval(params.get("nat_port")) p_start = nat_port["start"] p_end = nat_port["end"] ctr_rule = " -m .* RELATED,ESTABLISHED" nat_rules = [("{0} -s {1} ! -d {1} -p tcp -j MASQUERADE" " --to-ports {2}-{3}".format(postrouting_chain, net_ipv4, p_start, p_end)), ("{0} -s {1} ! -d {1} -p udp -j MASQUERADE" " --to-ports {2}-{3}".format(postrouting_chain, net_ipv4, p_start, p_end)), ("{0} -s {1} ! -d {1}" " -j MASQUERADE".format(postrouting_chain, net_ipv4))] if nat_rules: ipv4_rules.extend(nat_rules) if (net_ipv4 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("%s -d %s%s -o %s%s -j ACCEPT" % (forward_in, net_ipv4, net_dev_in, br_name, ctr_rule)), ("%s -s %s -i %s%s -j ACCEPT" % (forward_out, net_ipv4, br_name, net_dev_out))] ipv4_rules.extend(rules) output = process.run('iptables-save', shell=True).stdout_text logging.debug("iptables: %s", output) if "mode" in net_forward and net_forward["mode"] == "open": if re.search(r"%s|%s" % (net_ipv4, br_name), output, re.M): test.fail("Find iptable rule for open mode") utils_libvirtd.libvirtd_restart() output_again = process.run('iptables-save', shell=True).stdout_text if re.search(r"%s|%s" % (net_ipv4, br_name), output_again, re.M): test.fail("Find iptable rule for open mode after restart " "libvirtd") else: logging.info("Can't find iptable rule for open mode as expected") else: for ipt in ipv4_rules: if not re.search(r"%s" % ipt, output, re.M): test.fail("Can't find iptable rule:\n%s" % ipt) return ipv4_rules if check_ipv6: ipv6_rules = list(ipt_rules) ipt6_rules.extend([ ("INPUT -i %s -p udp -m udp --dport 547 -j ACCEPT" % br_name)]) if (net_ipv6 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("%s -d %s%s -o %s -j ACCEPT" % (forward_in, net_ipv6, net_dev_in, br_name)), ("%s -s %s -i %s%s -j ACCEPT" % (forward_out, net_ipv6, br_name, net_dev_out))] ipv6_rules.extend(rules) output = process.run("ip6tables-save", shell=True).stdout_text logging.debug("ip6tables: %s", output) if "mode" in net_forward and net_forward["mode"] == "open": if re.search(r"%s|%s" % (net_ipv6, br_name), output, re.M): test.fail("Find ip6table rule for open mode") utils_libvirtd.libvirtd_restart() output_again = process.run('ip6tables-save', shell=True).stdout_text if re.search(r"%s|%s" % (net_ipv6, br_name), output_again, re.M): test.fail("Find ip6table rule for open mode after restart " "libvirtd") else: for ipt in ipv6_rules: if not re.search(r"%s" % ipt, output, re.M): test.fail("Can't find ip6table rule:\n%s" % ipt) return ipv6_rules
def run(test, params, env): """ Domain CPU management testing. 1. Prepare a domain for testing, install qemu-guest-ga if needed. 2. Plug vcpu for the domain. 3. Checking: 3.1. Virsh vcpucount. 3.2. Virsh vcpuinfo. 3.3. Current vcpu number in domain xml. 3.4. Virsh vcpupin and vcpupin in domain xml. 3.5. The vcpu number in domain. 3.6. Virsh cpu-stats. 4. Repeat step 3 to check again. 5. Control domain(save, managedsave, s3, s4, migrate, etc.). 6. Repeat step 3 to check again. 7. Recover domain(restore, wakeup, etc.). 8. Repeat step 3 to check again. 9. Unplug vcpu for the domain. 10. Repeat step 3 to check again. 11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip save/managedsave/migrate related actions). 12. Repeat step 3 to check again. 13. Repeat step 7 to recover domain. 14. Repeat step 3 to check again. 15. Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_operation = params.get("vm_operation", "null") vcpu_max_num = params.get("vcpu_max_num") vcpu_current_num = params.get("vcpu_current_num") vcpu_plug = "yes" == params.get("vcpu_plug", "no") vcpu_plug_num = params.get("vcpu_plug_num") vcpu_unplug = "yes" == params.get("vcpu_unplug", "no") vcpu_unplug_num = params.get("vcpu_unplug_num") setvcpu_option = params.get("setvcpu_option", "") agent_channel = "yes" == params.get("agent_channel", "yes") install_qemuga = "yes" == params.get("install_qemuga", "no") start_qemuga = "yes" == params.get("start_qemuga", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no") status_error = "yes" == params.get("status_error", "no") pin_before_plug = "yes" == params.get("pin_before_plug", "no") pin_after_plug = "yes" == params.get("pin_after_plug", "no") pin_before_unplug = "yes" == params.get("pin_before_unplug", "no") pin_after_unplug = "yes" == params.get("pin_after_unplug", "no") pin_vcpu = params.get("pin_vcpu") pin_cpu_list = params.get("pin_cpu_list", "x") check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no") # Init expect vcpu count values expect_vcpu_num = [ vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num, vcpu_current_num ] if check_after_plug_fail: expect_vcpu_num_bk = list(expect_vcpu_num) # Init expect vcpu pin values expect_vcpupin = {} # Init cpu-list for vcpupin host_cpu_count = utils.count_cpus() if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"): raise error.TestNAError("We need more cpus on host in this case for" " the cpu-list=%s. But current number of cpu" " on host is %s." % (pin_cpu_list, host_cpu_count)) cpus_list = utils.cpu_online_map() logging.info("Active cpus in host are %s", cpus_list) cpu_seq_str = "" for i in range(len(cpus_list) - 1): if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]): cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1]) break if pin_cpu_list == "x": pin_cpu_list = cpus_list[-1] if pin_cpu_list == "x-y": if cpu_seq_str: pin_cpu_list = cpu_seq_str else: pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0]) elif pin_cpu_list == "x,y": pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1]) elif pin_cpu_list == "x-y,^z": if cpu_seq_str: pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1] else: pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1], cpus_list[0]) else: # Just use the value get from cfg pass need_mkswap = False # Back up domain XML vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() try: # Customize domain vcpu number if vm.is_alive(): vm.destroy() if agent_channel: vmxml.set_agent_channel() else: vmxml.remove_agent_channels() vmxml.sync() vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num)) # Do not apply S3/S4 on power if 'power' not in cpu_util.get_cpu_arch(): vmxml.set_pm_suspend(vm_name, "yes", "yes") vm.start() # Create swap partition/file if nessesary if vm_operation == "s4": need_mkswap = not vm.has_swap() if need_mkswap: logging.debug("Creating swap partition") vm.create_swap_partition() # Prepare qemu guest agent if install_qemuga: vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga) vm.setenforce(0) else: # Remove qemu-guest-agent for negative test vm.remove_package('qemu-guest-agent') # Run test check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin) # plug vcpu if vcpu_plug: # Pin vcpu if pin_before_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) check_setvcpus_result(result, status_error) if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_plug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num if not status_error: if not online_new_vcpu(vm, vcpu_plug_num): raise error.TestFail("Fail to enable new added cpu") # Pin vcpu if pin_after_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if status_error and check_after_plug_fail: check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option) if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Unplug vcpu if vcpu_unplug: # Pin vcpu if pin_before_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) # As the vcpu will unplug later, so set expect_vcpupin to empty expect_vcpupin = {} result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) try: check_setvcpus_result(result, status_error) except error.TestNAError: raise error.TestWarn("Skip unplug vcpu as it is not supported") if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_unplug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num # Pin vcpu if pin_after_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option)
def run_virsh_numatune(test, params, env): """ Test numa tuning 1) Positive testing 1.1) get the current numa parameters for a running/shutoff guest 1.2) set the current numa parameters for a running/shutoff guest 1.2.1) set valid 'mode' parameters 1.2.2) set valid 'nodeset' parameters 2) Negative testing 2.1) get numa parameters 2.1.1) invalid options 2.1.2) stop cgroup service 2.2) set numa parameters 2.2.1) invalid 'mode' parameters 2.2.2) invalid 'nodeset' parameters 2.2.3) change 'mode' for a running guest and 'mode' is not 'strict' 2.2.4) change 'nodeset' for running guest with mode of 'interleave' 'interleave' or 'preferred' numa mode 2.2.5) stop cgroup service """ # Run test case vm_name = params.get("vms") vm = env.get_vm(vm_name) original_vm_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) cgconfig_service = utils_cgroup.CgconfigService() status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") cgconfig = params.get("cgconfig", "on") start_vm = params.get("start_vm", "no") change_parameters = params.get("change_parameters", "no") # Make sure vm is down if start not requested if start_vm == "no" and vm.is_alive(): vm.destroy() # positive and negative testing ######### try: if status_error == "no": if change_parameters == "no": get_numa_parameter(params) else: set_numa_parameter(params) if cgconfig == "off": # Need to shutdown a running guest before stopping cgconfig service # and will start the guest after restarting libvirtd service if vm.is_alive(): vm.destroy() if cgconfig_service.cgconfig_is_running(): cgconfig_service.cgconfig_stop() # Refresh libvirtd service to get latest cgconfig service change if libvirtd == "restart": utils_libvirtd.libvirtd_restart() # Recover previous running guest if (cgconfig == "off" and libvirtd == "restart" and not vm.is_alive() and start_vm == "yes"): vm.start() if status_error == "yes": if change_parameters == "no": get_numa_parameter(params) else: set_numa_parameter(params) # Recover cgconfig and libvirtd service if not cgconfig_service.cgconfig_is_running(): cgconfig_service.cgconfig_start() utils_libvirtd.libvirtd_restart() finally: vm.destroy() # Restore guest, first remove existing original_vm_xml.undefine() # Recover based on original XML original_vm_xml.define()
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Check pool 'Capacity', 'Allocation' and 'Available' Create a over size vol in pool(expect fail), then check these values (21) Undefine the pool, and this should fail as pool is still active (22) Destroy the pool (23) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (24) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") new_pool_name = params.get("new_pool_name", "") build_option = params.get("build_option", "") same_source_test = "yes" == params.get("same_source_test", "no") customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn", "no") # The file for dumped pool xml poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) ip_protocal = params.get('ip_protocal', 'ipv4') if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: test.fail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: test.fail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) utlv.check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if vol_name in item[0]: found = True break if found: logging.debug( "Find volume '%s' in pool '%s'.", vol_name, pool_name) else: test.fail( "Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def is_in_range(actual, expected, error_percent): deviation = 100 - (100 * (float(actual) / float(expected))) logging.debug("Deviation: %0.2f%%", float(deviation)) return float(deviation) <= float(error_percent) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: test.fail("Pool info dictionary is needed.") val_tup = ('Capacity', 'Allocation', 'Available') if check_point in val_tup and float(value.split()[0]): # As from bytes to GiB, could cause deviation, and it should not # exceed 1 percent. if is_in_range(float(pool_info[check_point].split()[0]), float(value.split()[0]), 1): logging.debug("Pool '%s' is '%s'.", check_point, value) else: test.fail("Pool '%s' isn't '%s'." % (check_point, value)) else: if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: test.fail("Pool '%s' isn't '%s'." % (check_point, value)) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Run Testcase pvt = utlv.PoolVolumeTest(test, params) emulated_image = "emulated-image" kwargs = {'image_size': '1G', 'pre_disk_vol': ['100M'], 'source_name': source_name, 'source_path': source_path, 'source_format': source_format, 'persistent': True, 'ip_protocal': ip_protocal} try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=poolxml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Update pool name if new_pool_name: if "/" in new_pool_name: new_pool_name = new_pool_name.replace("/", "\/") logging.debug(new_pool_name) p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) p_xml.name = new_pool_name del p_xml.uuid poolxml = p_xml.xml logging.debug("XML after update pool name:\n%s" % p_xml) # Update host name if same_source_test: s_xml = p_xml.get_source() s_xml.host_name = "192.168.1.1" p_xml.set_source(s_xml) poolxml = p_xml.xml logging.debug("XML after update host name:\n%s" % p_xml) if customize_initiator_iqn: initiator_iqn = params.get("initiator_iqn", "iqn.2018-07.com.virttest:pool.target") p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) s_node = p_xml.xmltreefile.find('/source') i_node = ET.SubElement(s_node, 'initiator') ET.SubElement(i_node, 'iqn', {'name': initiator_iqn}) p_xml.xmltreefile.write() poolxml = p_xml.xml logging.debug('XML after add Multi-IQN:\n%s' % p_xml) # Step (4) # Undefine pool if not same_source_test: result = virsh.pool_undefine(pool_name) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(poolxml, debug=True) # Give error msg when exit status is not expected if "/" in new_pool_name and not result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) if "." in new_pool_name and result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) if same_source_test and not result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) utlv.check_exit_status(result, status_error) if not result.exit_status: # Step (6) # Buid pool # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if new_pool_name: pool_name = new_pool_name if pool_type != "logical": result = virsh.pool_build(pool_name, build_option, ignore_status=True) utlv.check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, debug=True, ignore_status=True) utlv.check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) utlv.check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # When libvirtd starts up, it'll check to see if any of the storage # pools have been activated externally. If so, then it'll mark the # pool as active. This is independent of autostart. # So a directory based storage pool is thus pretty much always active, # and so as the SCSI pool. if pool_type not in ["dir", 'scsi']: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result) check_vol_list(vol_name, pool_name) # Step (20) # Create an over size vol in pool(expect fail), then check pool: # 'Capacity', 'Allocation' and 'Available' # For NFS type pool, there's a bug(BZ#1077068) about allocate volume, # and glusterfs pool not support create volume, so not test them if pool_type != "netfs": vol_capacity = "10000G" vol_allocation = "10000G" result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw") utlv.check_exit_status(result, True) new_info = _pool.pool_info(pool_name) check_items = ["Capacity", "Allocation", "Available"] for i in check_items: check_pool_info(pool_info, i, new_info[i]) # Step (21) # Undefine pool, this should fail as the pool is active result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, expect_error=True) check_pool_list(pool_name, "", False) # Step (22) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (23) # Pool delete for 'dir' type pool if pool_type == "dir": for f in os.listdir(pool_target): os.remove(os.path.join(pool_target, f)) result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): test.fail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, True) # Step (24) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) utlv.setup_or_cleanup_iscsi(False) except exceptions.TestFail as detail: logging.error(str(detail)) if multipathd_status: multipathd.start() if os.path.exists(poolxml): os.remove(poolxml)
def run(test, params, env): """ Test interface devices update """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) network_name = params.get('network_name', 'default') new_network_name = params.get("net_name") expect_error = "yes" == params.get("status_error", "no") expect_err_msg = params.get("expect_err_msg") iface_driver = params.get("iface_driver") iface_driver_host = params.get("iface_driver_host") iface_driver_guest = params.get("iface_driver_guest") iface_model = params.get("iface_model") iface_mtu = params.get("iface_mtu") iface_rom = params.get("iface_rom") iface_filter = params.get("iface_filter") iface_boot = params.get('iface_boot') iface_coalesce = params.get('iface_coalesce') new_iface_driver = params.get("new_iface_driver") new_iface_driver_host = params.get("new_iface_driver_host") new_iface_driver_guest = params.get("new_iface_driver_guest") new_iface_model = params.get("new_iface_model") new_iface_rom = params.get("new_iface_rom") new_iface_inbound = params.get("new_iface_inbound") new_iface_outbound = params.get("new_iface_outbound") new_iface_link = params.get("new_iface_link") new_iface_source = params.get("new_iface_source") new_iface_target = params.get("new_iface_target") new_iface_addr = params.get("new_iface_addr") new_iface_filter = params.get("new_iface_filter") new_iface_mtu = params.get("new_iface_mtu") new_iface_type = params.get("new_iface_type") create_new_net = "yes" == params.get("create_new_net") new_iface_alias = params.get("new_iface_alias") new_iface_coalesce = params.get('new_iface_coalesce') cold_update = "yes" == params.get("cold_update", "no") del_addr = "yes" == params.get("del_address") del_rom = "yes" == params.get("del_rom") del_filter = "yes" == params.get("del_filter") check_libvirtd = "yes" == params.get("check_libvirtd") new_iface_filter_parameters = eval( params.get("new_iface_filter_parameters", "{}")) rules = eval(params.get("rules", "{}")) del_mac = "yes" == params.get("del_mac", "no") del_coalesce = 'yes' == params.get('del_coalesce', 'no') del_net_bandwidth = 'yes' == params.get('del_net_bandwidth', 'no') # Backup the vm xml for recover at last vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) netxml_backup = network_xml.NetworkXML.new_from_net_dumpxml(network_name) try: # Prepare network netxml = network_xml.NetworkXML.new_from_net_dumpxml(network_name) logging.debug('Network xml before update:\n%s', netxml) if del_net_bandwidth: netxml.del_element('/bandwidth') logging.debug('Network xml after update:\n%s', netxml) # According to the different os find different file for rom if (iface_rom and "file" in eval(iface_rom) and "%s" in eval(iface_rom)['file']): if rpm.RpmBackend().check_installed('ipxe-roms-qemu', '20200823'): logging.debug("Update the file path since " "ipxe-20200823-5:") iface_rom_new = iface_rom.replace('qemu-kvm', 'ipxe/qemu') iface_rom = iface_rom_new if os.path.exists(eval(iface_rom)['file'] % "pxe"): iface_rom = iface_rom % "pxe" elif os.path.exists(eval(iface_rom)['file'] % "efi"): iface_rom = iface_rom % "efi" else: logging.error("Can not find suitable rom file") iface_dict_bef = {} iface_dict_aft = {} names = locals() # Collect need update items in 2 dicts for both start vm before and after update_list_bef = [ "driver", 'driver_host', 'driver_guest', "model", "mtu", "rom", "filter", 'boot', 'coalesce' ] for update_item_bef in update_list_bef: if names['iface_' + update_item_bef]: iface_dict_bef.update( {update_item_bef: names['iface_' + update_item_bef]}) update_list_aft = [ "driver", "driver_host", "driver_guest", "model", "rom", "inbound", "outbound", "link", "source", "target", "addr", "filter", "mtu", "type", "alias", "filter_parameters", "coalesce" ] for update_item_aft in update_list_aft: if names["new_iface_" + update_item_aft]: iface_dict_aft.update( {update_item_aft: names["new_iface_" + update_item_aft]}) logging.info("iface_dict_bef is %s, iface_dict_aft is %s", iface_dict_bef, iface_dict_aft) del_list = [ "del_addr", "del_rom", "del_filter", "del_mac", "del_coalesce" ] for del_item in del_list: if names[del_item]: iface_dict_aft.update({del_item: names[del_item]}) # Operations before updating vm's iface xml if iface_boot: disk_boot = params.get('disk_book', 1) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) # Remove os boot config vm_os = vmxml.os vm_os.del_boots() vmxml.os = vm_os # Add boot config to disk disk = vmxml.get_devices('disk')[0] target_dev = disk.target.get('dev', '') logging.debug('Will set boot order %s to device %s', disk_boot, target_dev) vmxml.set_boot_order_by_target_dev(target_dev, disk_boot) vmxml.sync() # Update vm interface with items in iface_dict_bef and start it if iface_dict_bef: libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict_bef) logging.info("vm xml is %s", vm.get_xml()) if not cold_update: vm.start() if iface_mtu: # Do check for mtu size after start vm target_dev = libvirt.get_interface_details(vm_name)[0]['interface'] cmd = "ip link show %s | grep 'mtu %s'" % (target_dev, eval(iface_mtu)['size']) def check_mtu(): """ Check the mtu setting take effect for interface """ ret = process.run(cmd, ignore_status=True, shell=True) if ret.exit_status: test.fail("Can not find mtu setting in cmd result") check_mtu() utils_libvirtd.libvirtd_restart() check_mtu() # Create new network if need if create_new_net: new_net_xml = libvirt.create_net_xml(new_network_name, params) new_net_xml.sync() # Do update for iface_driver logging.info('Creating new iface xml.') new_iface_xml = libvirt.modify_vm_iface(vm_name, "get_xml", iface_dict_aft) bef_pid = process.getoutput("pidof -s libvirtd") ret = virsh.update_device(vm_name, new_iface_xml, ignore_status=True, debug=True) libvirt.check_exit_status(ret, expect_error) if check_libvirtd: aft_pid = process.getoutput("pidof -s libvirtd") if aft_pid != bef_pid: test.fail("libvirtd crash after update-device!") else: logging.info("libvirtd do not crash after update-device!") if expect_error: real_err_msg = ret.stderr.strip() if not re.search(expect_err_msg, real_err_msg, re.IGNORECASE): test.fail("The real error msg:'%s' does not match expect one:" '%s' % (real_err_msg, expect_err_msg)) else: logging.info("Get expect result: %s", real_err_msg) else: if new_iface_inbound: iface_bandwidth = {} iface_bandwidth = vm_xml.VMXML.get_iftune_params(vm_name) for bound_para in ["inbound", "outbound"]: for tune_para in ["average", "peak", "burst"]: get_value = iface_bandwidth.get(bound_para).get( tune_para) expect_value = eval(names["new_iface_" + bound_para]).get(tune_para) logging.info("Get value for %s:%s is %s, expect is %s", bound_para, tune_para, get_value, expect_value) if get_value != expect_value: test.fail("Get value is not equal to expect") vmxml_aft = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_aft = list(vmxml_aft.get_iface_all().values())[0] if new_iface_link: iface_link_value = iface_aft.find('link').get('state') if iface_link_value == new_iface_link: logging.info("Find link state is %s in xml", new_iface_link) # Checking the statue in guest mac_addr = iface_aft.find('mac').get('address') state_map = "%s.*\n.*%s" % (iface_link_value.upper(), mac_addr) session = vm.wait_for_serial_login() logging.info("ip link output:%s", session.cmd_output("ip link")) if_name = utils_net.get_net_if(runner=session.cmd_output, state=state_map)[0] if not check_iface_link(session, mac_addr, new_iface_link): test.fail('iface link check inside vm failed.') session.close() if if_name: logging.info("Find iface state %s for %s", iface_link_value, mac_addr) else: test.fail( "Can not find iface with mac %s and state %s" % (mac_addr, iface_link_value)) else: test.fail( "Check fail to get link state, expect %s, but get %s" % (iface_link_value, new_iface_link)) if create_new_net and new_iface_source: iface_source_value = iface_aft.find('source').get('network') if iface_source_value == eval(new_iface_source)['network']: logging.info("Get %s in xml as set", iface_source_value) else: test.fail("Get source %s is not equal to set %s" % (iface_source_value, new_iface_source)) if new_iface_filter: iface_filter_value = iface_aft.find('filterref').get('filter') if iface_filter_value == new_iface_filter: logging.info("Get %s in xml as set", iface_filter_value) else: test.fail("Get filter %s is not equal to set %s" % (iface_filter_value, new_iface_filter)) if new_iface_filter_parameters: ebtables_outputs = process.run("ebtables -t nat -L", shell=True).stdout_text for rule in rules: if rule not in ebtables_outputs: test.fail( "Can not find the corresponding rule after update filter with parameters!" ) if del_filter: # if the filter is deleted, it should not exists in the xml and the rules should be deleted as well iface_filter_value = iface_aft.find('filterref') if iface_filter_value is not None: test.fail("After delete, the filter still exists: %s" % iface_filter_value) ebtables_outputs = process.run("ebtables -t nat -L", shell=True).stdout_text logging.debug("after nwfilter deleted, ebtables rules are %s" % ebtables_outputs) time.sleep(5) entries_num = re.findall(r'entries:\s+(\d)', ebtables_outputs) for i in entries_num: if i != '0': test.fail("After delete, the rules are still exists!") if new_iface_alias: iface_alias_value = iface_aft.find('alias').get('name') if iface_alias_value == eval(new_iface_alias)['name']: logging.info("Get %s in xml as set", iface_alias_value) else: test.fail("Get alias %s is not equal to set %s" % (iface_alias_value, new_iface_alias)) if 'update_coalesce' in params['name'] or new_iface_coalesce: iface_coalesce_val = iface_aft.find('coalesce').find( 'rx').find('frames').get('max') if iface_coalesce_val == str(eval(new_iface_coalesce)['max']): logging.info('coalesce update check PASS.') else: test.fail('coalesce value not updated.') if del_coalesce: if iface_aft.find('coalesce') is None: logging.info('coalesce delete check PASS.') else: test.fail('coalesce not deleted.') finally: vmxml_backup.sync() netxml_backup.sync() if create_new_net: new_net_xml.undefine()
def run(test, params, env): """ Test blkio tuning 1) Positive testing 1.1) get the current blkio parameters for a running/shutoff guest 1.2) set the current blkio parameters for a running/shutoff guest 2) Negative testing 2.1) get blkio parameters for a running/shutoff guest 2.2) set blkio parameters running/shutoff guest """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) cg = utils_cgroup.CgconfigService() cgconfig = params.get("cgconfig", "on") libvirtd = params.get("libvirtd", "on") start_vm = params.get("start_vm", "yes") status_error = params.get("status_error", "no") change_parameters = params.get("change_parameters", "no") original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Make sure vm is down if start not requested if start_vm == "no" and vm and vm.is_alive(): vm.destroy() cmd = "cat /sys/block/sda/queue/scheduler" iosche = results_stdout_52lts(process.run(cmd, shell=True)) logging.debug("iosche value is:%s", iosche) oldmode = re.findall("\[(.*?)\]", iosche)[0] with open('/sys/block/sda/queue/scheduler', 'w') as scf: if 'cfq' in iosche: scf.write('cfq') elif 'bfq' in iosche: scf.write('bfq') else: test.fail('Unknown scheduler in "/sys/block/sda/queue/scheduler"') test_dict = dict(params) test_dict['vm'] = vm # positive and negative testing cgstop = False try: if start_vm == "yes" and not vm.is_alive(): vm.start() vm.wait_for_login() if status_error == "no": if change_parameters == "no": get_blkio_parameter(test, test_dict, cgstop) else: set_blkio_parameter(test, test_dict, cgstop) if cgconfig == "off": # If running, then need to shutdown a running guest before # stopping cgconfig service and will start the guest after # restarting libvirtd service if cg.cgconfig_is_running(): if vm.is_alive(): vm.destroy() cg.cgconfig_stop() cgstop = True # If we stopped cg, then refresh libvirtd service # to get latest cgconfig service change; otherwise, # if no cg change restart of libvirtd is pointless if cgstop and libvirtd == "restart": try: utils_libvirtd.libvirtd_restart() finally: # Not running is not a good thing, but it does happen # and it will affect other tests if not utils_libvirtd.libvirtd_is_running(): test.fail("libvirt service is not running!") # Recover previous running guest if (cgconfig == "off" and libvirtd == "restart" and not vm.is_alive() and start_vm == "yes"): vm.start() if status_error == "yes": if change_parameters == "no": get_blkio_parameter(test, test_dict, cgstop) else: set_blkio_parameter(test, test_dict, cgstop) finally: # Restore guest original_vm_xml.sync() with open('/sys/block/sda/queue/scheduler', 'w') as scf: scf.write(oldmode) # If we stopped cg, then recover and refresh libvirtd to recognize if cgstop: cg.cgconfig_start() utils_libvirtd.libvirtd_restart()
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # Skip cases early vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: test.cancel("This test requires two VMs") # confirm no VM running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': test.cancel("one or more VMs are alive") err_range = float(params.get("mem_error_range", 1.25)) else: vm_names.append(params.get("main_vm")) if test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] try: for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError as e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) else: error_msg = "Test failed in positive case. error: %s\n" % e test.fail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError) as e: error_msg = "Test failed in positive case.\n error: %s\n" % e test.fail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_package.package_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) # unixbench test need 'patch' and 'perl' commands installed utils_package.package_install(["patch", "perl"], session, 360) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command, ignore_all_errors=True) # wait for autotest running on vm time.sleep(delay) def _is_unixbench_running(): cmd = "ps -ef | grep perl | grep Run" return not session.cmd_status(cmd) if not utils_misc.wait_for(_is_unixbench_running, timeout=240): test.cancel("Failed to run unixbench in guest," " please make sure some necessary" " packages are installed in guest," " such as gcc, tar, bzip2") logging.debug("Unixbench test is running in VM") if test_type == "contrast": # wait for vm finish starting completely time.sleep(delay) if not (mb_enable and not tlbfs_enable): logging.debug("starting analyzing the hugepage usage...") pid = vms[-1].get_pid() started_free = utils_memory.get_num_huge_pages_free() # Get the thp usage from /proc/pid/smaps started_anon = utils_memory.get_num_anon_huge_pages(pid) static_used = non_started_free - started_free hugepage_used = static_used * page_size if test_type == "contrast": # get qemu-kvm memory consumption by top cmd = "top -b -n 1|awk '$1 == %s {print $10}'" % pid rate = process.run(cmd, ignore_status=False, verbose=True, shell=True).stdout_text.strip() qemu_kvm_used = (utils_memory.memtotal() * float(rate)) / 100 logging.debug("rate: %s, used-by-qemu-kvm: %f, used-by-vm: %d", rate, qemu_kvm_used, hugepage_used) if abs(qemu_kvm_used - hugepage_used) > hugepage_used * (err_range - 1): test.fail("Error for hugepage usage") if test_type == "stress": if non_started_free <= started_free: logging.debug("hugepage usage:%d -> %d", non_started_free, started_free) test.fail("Error for hugepage usage with stress") if mb_enable is not True: if static_used > 0: test.fail("VM use static hugepage without" " memoryBacking element") if thp_enable is not True and started_anon > 0: test.fail("VM use transparent hugepage, while" " it's disabled") else: if tlbfs_enable is not True: if static_used > 0: test.fail("VM use static hugepage without tlbfs" " mounted") if thp_enable and started_anon <= 0: test.fail("VM doesn't use transparent" " hugepage") else: if shp_num > 0: if static_used <= 0: test.fail("VM doesn't use static" " hugepage") else: if static_used > 0: test.fail("VM use static hugepage," " while it's set to zero") if thp_enable is not True: if started_anon > 0: test.fail("VM use transparent hugepage," " while it's disabled") else: if shp_num == 0 and started_anon <= 0: test.fail("VM doesn't use transparent" " hugepage, while static" " hugepage is disabled") finally: # end up session for session in sessions: session.close() for vm in vms: if vm.is_alive(): vm.destroy() for vm_name in vm_names: if mb_enable: vm_xml.VMXML.del_memoryBacking_tag(vm_name) else: vm_xml.VMXML.set_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_memory.set_num_huge_pages(shp_orig_num) utils_memory.set_transparent_hugepage(thp_orig_status)
def run(test, params, env): """ Domain CPU management testing. 1. Prepare a domain for testing, install qemu-guest-ga if needed. 2. Checking for vcpu numbers in vcpucount, vcpuinfo, domain xml, vcpupin and inside domain. 3. Plug vcpu for the domain. 4. Repeat step 2 to check again. 5. Control domain(save, managedsave, s3, s4, etc.). 6. Repeat step 2 to check again. 7. Recover domain(restore, wakeup, etc.). 8. Repeat step 2 to check again. 9. Unplug vcpu for the domain. 10. Repeat step 2 to check again. 11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip save/managedsave related actions). 12. Repeat step 2 to check again. 13. Repeat step 7 to recover domain. 14. Repeat step 2 to check again. 15. Recover test environment. """ def manipulate_domain(vm_name, vm_operation, recover=False): """ Operate domain to given state or recover it. :params vm_name: Name of the VM domain :params vm_operation: Operation to be performed on VM domain like save, managedsave, suspend :params recover: flag to inform whether to set or reset vm_operation """ save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") if not recover: if vm_operation == "save": save_option = "" result = virsh.save(vm_name, save_file, save_option, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "managedsave": managedsave_option = "" result = virsh.managedsave(vm_name, managedsave_option, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s3": suspend_target = "mem" result = virsh.dompmsuspend(vm_name, suspend_target, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s4": suspend_target = "disk" result = virsh.dompmsuspend(vm_name, suspend_target, ignore_status=True, debug=True) libvirt.check_exit_status(result) # Wait domain state change: 'in shutdown' -> 'shut off' utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5) elif vm_operation == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "reboot": vm.reboot() vm_uptime_init = vm.uptime() else: logging.debug("No operation for the domain") else: if vm_operation == "save": if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) os.remove(save_file) else: test.error("No save file for domain restore") elif vm_operation in ["managedsave", "s4"]: result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s3": suspend_target = "mem" result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "suspend": result = virsh.resume(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "reboot": pass else: logging.debug("No need recover the domain") def online_new_vcpu(vm, vcpu_plug_num): """ For Fedora/RHEL7 guests, udev can not online hot-added CPUs automatically, (refer to BZ#968811 for details) so enable them manually. :params vm: VM object :params vcpu_plug_num: Hotplugged vcpu count """ cpu_is_online = [] session = vm.wait_for_login() for i in range(1, int(vcpu_plug_num)): cpu_is_online.append(False) cpu = "/sys/devices/system/cpu/cpu%s/online" % i cmd_s, cmd_o = session.cmd_status_output("cat %s" % cpu) logging.debug("cmd exist status: %s, cmd output %s", cmd_s, cmd_o) if cmd_s != 0: logging.error("Can not find cpu %s in domain", i) else: if cmd_o.strip() == "0": if session.cmd_status("echo 1 > %s" % cpu) == 0: cpu_is_online[i-1] = True else: logging.error("Fail to enable cpu %s online", i) else: cpu_is_online[i-1] = True session.close() return False not in cpu_is_online def check_setvcpus_result(cmd_result, expect_error): """ Check command result. For setvcpus, pass unsupported commands(plug or unplug vcpus) by checking command stderr. :params cmd_result: Command result :params expect_error: Whether to expect error True or False """ if cmd_result.exit_status != 0: if expect_error: logging.debug("Expect fail: %s", cmd_result.stderr) return # setvcpu/hotplug is only available as of qemu 1.5 and it's still # evolving. In general the addition of vcpu's may use the QMP # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands. # The removal of vcpu's may work in qemu 1.5 due to how cpu_set # can set vcpus online or offline; however, there doesn't appear # to be a complementary cpu-del feature yet, so we can add, but # not delete in 1.6. # A 1.6 qemu will not allow the cpu-add command to be run on # a configuration using <os> machine property 1.4 or earlier. # That is the XML <os> element with the <type> property having # an attribute 'machine' which is a tuple of 3 elements separated # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5". if re.search("unable to execute QEMU command 'cpu-add'", cmd_result.stderr): test.cancel("guest <os> machine property may be too" " old to allow hotplug") # A qemu older than 1.5 or an unplug for 1.6 will result in # the following failure. In general, any time libvirt determines # it cannot support adding or removing a vCPU... if re.search("cannot change vcpu count of this domain", cmd_result.stderr): test.cancel("Unsupport virsh setvcpu hotplug") # Maybe QEMU doesn't support unplug vcpu if re.search("Operation not supported: qemu didn't unplug the vCPUs", cmd_result.stderr): test.cancel("Your qemu unsupport unplug vcpu") # Qemu guest agent version could be too low if re.search("The command guest-get-vcpus has not been found", cmd_result.stderr): err_msg = "Your agent version is too low: %s" % cmd_result.stderr logging.warning(err_msg) test.cancel(err_msg) # Attempting to enable more vCPUs in the guest than is currently # enabled in the guest but less than the maximum count for the VM if re.search("requested vcpu count is greater than the count of " "enabled vcpus in the domain", cmd_result.stderr): logging.debug("Expect fail: %s", cmd_result.stderr) return # Otherwise, it seems we have a real error test.fail("Run failed with right command: %s" % cmd_result.stderr) else: if expect_error: test.fail("Expect fail but run successfully") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_uptime_init = 0 vm_operation = params.get("vm_operation", "null") vcpu_max_num = int(params.get("vcpu_max_num")) vcpu_current_num = int(params.get("vcpu_current_num")) vcpu_plug = "yes" == params.get("vcpu_plug", "no") vcpu_plug_num = int(params.get("vcpu_plug_num")) vcpu_unplug = "yes" == params.get("vcpu_unplug", "no") vcpu_unplug_num = int(params.get("vcpu_unplug_num")) vcpu_max_timeout = int(params.get("vcpu_max_timeout", "480")) setvcpu_option = params.get("setvcpu_option", "") agent_channel = "yes" == params.get("agent_channel", "yes") install_qemuga = "yes" == params.get("install_qemuga", "no") start_qemuga = "yes" == params.get("start_qemuga", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no") status_error = "yes" == params.get("status_error", "no") pin_before_plug = "yes" == params.get("pin_before_plug", "no") pin_after_plug = "yes" == params.get("pin_after_plug", "no") pin_before_unplug = "yes" == params.get("pin_before_unplug", "no") pin_after_unplug = "yes" == params.get("pin_after_unplug", "no") pin_vcpu = params.get("pin_vcpu") pin_cpu_list = params.get("pin_cpu_list", "x") check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no") with_stress = "yes" == params.get("run_stress", "no") iterations = int(params.get("test_itr", 1)) topology_correction = "yes" == params.get("topology_correction", "no") # Init expect vcpu count values expect_vcpu_num = {'max_config': vcpu_max_num, 'max_live': vcpu_max_num, 'cur_config': vcpu_current_num, 'cur_live': vcpu_current_num, 'guest_live': vcpu_current_num} if check_after_plug_fail: expect_vcpu_num_bk = expect_vcpu_num.copy() # Init expect vcpu pin values expect_vcpupin = {} result_failed = 0 # Init cpu-list for vcpupin host_cpu_count = os.sysconf('SC_NPROCESSORS_CONF') if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"): test.cancel("We need more cpus on host in this case for the cpu-list" "=%s. But current number of cpu on host is %s." % (pin_cpu_list, host_cpu_count)) cpus_list = cpu_util.cpu_online_list() logging.debug("Active cpus in host are %s", cpus_list) cpu_seq_str = "" for i in range(len(cpus_list) - 1): if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]): cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1]) break if pin_cpu_list == "x": pin_cpu_list = cpus_list[-1] if pin_cpu_list == "x-y": if cpu_seq_str: pin_cpu_list = cpu_seq_str else: pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0]) elif pin_cpu_list == "x,y": pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1]) elif pin_cpu_list == "x-y,^z": if cpu_seq_str: pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1] else: pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1], cpus_list[0]) else: # Just use the value get from cfg pass need_mkswap = False # Back up domain XML vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() try: # Customize domain vcpu number if vm.is_alive(): vm.destroy() if agent_channel: vmxml.set_agent_channel() else: vmxml.remove_agent_channels() vmxml.sync() vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num, topology_correction=topology_correction) # Do not apply S3/S4 on power cpu_arch = platform.machine() if cpu_arch in ('x86_64', 'i386', 'i686'): vmxml.set_pm_suspend(vm_name, "yes", "yes") vm.start() vm_uptime_init = vm.uptime() if with_stress: bt = utils_test.run_avocado_bg(vm, params, test) if not bt: test.cancel("guest stress failed to start") # Create swap partition/file if nessesary if vm_operation == "s4": need_mkswap = not vm.has_swap() if need_mkswap: logging.debug("Creating swap partition") vm.create_swap_partition() # Prepare qemu guest agent if install_qemuga: vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga) vm.setenforce(0) else: # Remove qemu-guest-agent for negative test vm.remove_package('qemu-guest-agent') # Run test for _ in range(iterations): if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num): logging.error("Expected vcpu check failed") result_failed += 1 # plug vcpu if vcpu_plug: # Pin vcpu if pin_before_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) check_setvcpus_result(result, status_error) if setvcpu_option == "--config": expect_vcpu_num['cur_config'] = vcpu_plug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num['guest_live'] = vcpu_plug_num else: expect_vcpu_num['cur_live'] = vcpu_plug_num expect_vcpu_num['guest_live'] = vcpu_plug_num if not status_error: if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(vcpu_plug_num, vm), vcpu_max_timeout, text="wait for vcpu online") or not online_new_vcpu(vm, vcpu_plug_num): test.fail("Fail to enable new added cpu") # Pin vcpu if pin_after_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if status_error and check_after_plug_fail: if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num_bk, {}, setvcpu_option): logging.error("Expected vcpu check failed") result_failed += 1 if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option): logging.error("Expected vcpu check failed") result_failed += 1 # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option): logging.error("Expected vcpu check failed") result_failed += 1 # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception as e: test.warn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num['cur_live'] = vcpu_plug_num expect_vcpu_num['guest_live'] = vcpu_plug_num elif setvcpu_option.count("--guest"): expect_vcpu_num['guest_live'] = vcpu_plug_num else: expect_vcpu_num['cur_live'] = vcpu_current_num expect_vcpu_num['guest_live'] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option): logging.error("Expected vcpu check failed") result_failed += 1 # Unplug vcpu # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable # when VM started , and it required that vcpu 0(id=1) is always # present and non-hotpluggable, which means we can't hotunplug these # vcpus directly. So we can either hotplug more vcpus before we do # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the # vcpus except vcpu 0, to make sure libvirt can find appropriate # hotpluggable vcpus to reach the desired target vcpu count. For # simple prepare step, here we choose to hotplug more vcpus. if vcpu_unplug: if setvcpu_option == "--live": logging.info("Hotplug vcpu to the maximum count to make" "sure all these new plugged vcpus are " "hotunpluggable") result = virsh.setvcpus(vm_name, vcpu_max_num, '--live', debug=True) libvirt.check_exit_status(result) # Pin vcpu if pin_before_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) # As the vcpu will unplug later, so set # expect_vcpupin to empty expect_vcpupin = {} # Operation of setvcpus is asynchronization, even if it return, # may not mean it is complete, a poll checking of guest vcpu numbers # need to be executed. # So for case of unpluging vcpus from max vcpu number to 1, when # setvcpus return, need continue to obverse if vcpu number is # continually to be unplugged to 1 gradually. result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) unsupport_str = utils_hotplug.vcpuhotunplug_unsupport_str() if unsupport_str and (unsupport_str in result.stderr): test.cancel("Vcpu hotunplug is not supported in this host:" "\n%s" % result.stderr) try: session = vm.wait_for_login() cmd = "lscpu | grep \"^CPU(s):\"" operation = "setvcpus" prev_output = -1 while True: ret, output = session.cmd_status_output(cmd) if ret: test.error("Run lscpu failed, output: %s" % output) output = output.split(":")[-1].strip() if int(prev_output) == int(output): break prev_output = output time.sleep(5) logging.debug("CPUs available from inside guest after %s - %s", operation, output) if int(output) != vcpu_unplug_num: test.fail("CPU %s failed as cpus are not " "reflected from inside guest" % operation) finally: if session: session.close() check_setvcpus_result(result, status_error) if setvcpu_option == "--config": expect_vcpu_num['cur_config'] = vcpu_unplug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num['guest_live'] = vcpu_unplug_num else: expect_vcpu_num['cur_live'] = vcpu_unplug_num expect_vcpu_num['guest_live'] = vcpu_unplug_num # Pin vcpu if pin_after_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option): logging.error("Expected vcpu check failed") result_failed += 1 # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option): logging.error("Expected vcpu check failed") result_failed += 1 # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time # (QEMU bug), here we wait for 10 mins then skip the # remaining part of tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception as e: test.warn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num['cur_live'] = vcpu_unplug_num expect_vcpu_num['guest_live'] = vcpu_unplug_num elif setvcpu_option.count("--guest"): expect_vcpu_num['guest_live'] = vcpu_unplug_num else: expect_vcpu_num['cur_live'] = vcpu_current_num expect_vcpu_num['guest_live'] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands if not utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option): logging.error("Expected vcpu check failed") result_failed += 1 if vm.uptime() < vm_uptime_init: test.fail("Unexpected VM reboot detected in between test") # Recover env finally: if need_mkswap: vm.cleanup_swap() if with_stress: bt.join(ignore_status=True) vm.destroy() backup_xml.sync() if not status_error: if result_failed > 0: test.fail("Test Failed")
def run(test, params, env): """ Domain CPU management testing. 1. Prepare a domain for testing, install qemu-guest-ga if needed. 2. Plug vcpu for the domain. 3. Checking: 3.1. Virsh vcpucount. 3.2. Virsh vcpuinfo. 3.3. Current vcpu number in domain xml. 3.4. Virsh vcpupin and vcpupin in domain xml. 3.5. The vcpu number in domain. 3.6. Virsh cpu-stats. 4. Repeat step 3 to check again. 5. Control domain(save, managedsave, s3, s4, migrate, etc.). 6. Repeat step 3 to check again. 7. Recover domain(restore, wakeup, etc.). 8. Repeat step 3 to check again. 9. Unplug vcpu for the domain. 10. Repeat step 3 to check again. 11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip save/managedsave/migrate related actions). 12. Repeat step 3 to check again. 13. Repeat step 7 to recover domain. 14. Repeat step 3 to check again. 15. Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_operation = params.get("vm_operation", "null") vcpu_max_num = params.get("vcpu_max_num") vcpu_current_num = params.get("vcpu_current_num") vcpu_plug = "yes" == params.get("vcpu_plug", "no") vcpu_plug_num = params.get("vcpu_plug_num") vcpu_unplug = "yes" == params.get("vcpu_unplug", "no") vcpu_unplug_num = params.get("vcpu_unplug_num") setvcpu_option = params.get("setvcpu_option", "") agent_channel = "yes" == params.get("agent_channel", "yes") install_qemuga = "yes" == params.get("install_qemuga", "no") start_qemuga = "yes" == params.get("start_qemuga", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no") status_error = "yes" == params.get("status_error", "no") pin_before_plug = "yes" == params.get("pin_before_plug", "no") pin_after_plug = "yes" == params.get("pin_after_plug", "no") pin_before_unplug = "yes" == params.get("pin_before_unplug", "no") pin_after_unplug = "yes" == params.get("pin_after_unplug", "no") pin_vcpu = params.get("pin_vcpu") pin_cpu_list = params.get("pin_cpu_list", "x") check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no") # Init expect vcpu count values expect_vcpu_num = [vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num, vcpu_current_num] if check_after_plug_fail: expect_vcpu_num_bk = list(expect_vcpu_num) # Init expect vcpu pin values expect_vcpupin = {} # Init cpu-list for vcpupin host_cpu_count = utils.count_cpus() if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"): raise error.TestNAError("We need more cpus on host in this case for" " the cpu-list=%s. But current number of cpu" " on host is %s." % (pin_cpu_list, host_cpu_count)) cpu_max = int(host_cpu_count) - 1 if pin_cpu_list == "x": pin_cpu_list = str(cpu_max) if pin_cpu_list == "x-y": pin_cpu_list = "0-%s" % cpu_max elif pin_cpu_list == "x,y": pin_cpu_list = "0,%s" % cpu_max elif pin_cpu_list == "x-y,^z": pin_cpu_list = "0-%s,^%s" % (cpu_max, cpu_max) else: # Just use the value get from cfg pass need_mkswap = False # Back up domain XML vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() try: # Customize domain vcpu number if vm.is_alive(): vm.destroy() if agent_channel: vmxml.set_agent_channel() else: vmxml.remove_agent_channels() vmxml.sync() vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num)) vmxml.set_pm_suspend(vm_name, "yes", "yes") vm.start() # Create swap partition/file if nessesary if vm_operation == "s4": need_mkswap = not vm.has_swap() if need_mkswap: logging.debug("Creating swap partition") vm.create_swap_partition() # Prepare qemu guest agent if install_qemuga: vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga) vm.setenforce(0) else: # Remove qemu-guest-agent for negative test vm.remove_package('qemu-guest-agent') # Run test check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin) # plug vcpu if vcpu_plug: # Pin vcpu if pin_before_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debue=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) check_setvcpus_result(result, status_error) if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_plug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num if not status_error: if not online_new_vcpu(vm, vcpu_plug_num): raise error.TestFail("Fail to enable new added cpu") # Pin vcpu if pin_after_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debue=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if status_error and check_after_plug_fail: check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option) if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Unplug vcpu if vcpu_unplug: # Pin vcpu if pin_before_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debue=True) libvirt.check_exit_status(result) # As the vcpu will unplug later, so set expect_vcpupin to empty expect_vcpupin = {} result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) try: check_setvcpus_result(result, status_error) except error.TestNAError: raise error.TestWarn("Skip unplug vcpu as it is not supported") if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_unplug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num # Pin vcpu if pin_after_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debue=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option)
def run(test, params, env): """ Test numa tuning 1) Positive testing 1.1) get the current numa parameters for a running/shutoff guest 1.2) set the current numa parameters for a running/shutoff guest 1.2.1) set valid 'mode' parameters 1.2.2) set valid 'nodeset' parameters 2) Negative testing 2.1) get numa parameters 2.1.1) invalid options 2.1.2) stop cgroup service 2.2) set numa parameters 2.2.1) invalid 'mode' parameters 2.2.2) invalid 'nodeset' parameters 2.2.3) change 'mode' for a running guest and 'mode' is not 'strict' 2.2.4) change 'nodeset' for running guest with mode of 'interleave' 'interleave' or 'preferred' numa mode 2.2.5) stop cgroup service """ try: utils_misc.find_command("numactl") except ValueError: raise error.TestNAError("Command 'numactl' is missing. You must " "install it.") # Run test case vm_name = params.get("vms") vm = env.get_vm(vm_name) original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) cg = utils_cgroup.CgconfigService() status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") cgconfig = params.get("cgconfig", "on") start_vm = params.get("start_vm", "no") change_parameters = params.get("change_parameters", "no") # Make sure vm is down if start not requested if start_vm == "no" and vm.is_alive(): vm.destroy() # positive and negative testing ######### cgstop = False try: if status_error == "no": if change_parameters == "no": get_numa_parameter(params, cgstop) else: set_numa_parameter(params, cgstop) if cgconfig == "off": # If running, then need to shutdown a running guest before # stopping cgconfig service and will start the guest after # restarting libvirtd service if cg.cgconfig_is_running(): if vm.is_alive(): vm.destroy() cg.cgconfig_stop() cgstop = True # If we stopped cg, then refresh libvirtd service # to get latest cgconfig service change; otherwise, # if no cg change restart of libvirtd is pointless if cgstop and libvirtd == "restart": try: utils_libvirtd.libvirtd_restart() finally: # Not running is not a good thing, but it does happen # and it will affect other tests if not utils_libvirtd.libvirtd_is_running(): raise error.TestNAError("libvirt service is not running!") # Recover previous running guest if (cgconfig == "off" and libvirtd == "restart" and not vm.is_alive() and start_vm == "yes"): vm.start() if status_error == "yes": if change_parameters == "no": get_numa_parameter(params, cgstop) else: set_numa_parameter(params, cgstop) finally: # Restore guest original_vm_xml.sync() # If we stopped cg, then recover and refresh libvirtd to recognize if cgstop: cg.cgconfig_start() utils_libvirtd.libvirtd_restart()
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # Skip cases early vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: raise error.TestNAError("This test requires two VMs") # confirm no VM running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': raise error.TestNAError("one or more VMs are alive") err_range = float(params.get("mem_error_range", 1.25)) else: vm_names.append(params.get("main_vm")) if test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] try: for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError, e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) else: error_msg = "Test failed in positive case. error: %s\n" % e raise error.TestFail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError), e: error_msg = "Test failed in positive case.\n error: %s\n" % e raise error.TestFail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_package.package_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) # unixbench test need 'patch' and 'perl' commands installed utils_package.package_install(["patch", "perl"], session, 360) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command, ignore_all_errors=True) # wait for autotest running on vm time.sleep(delay) def _is_unixbench_running(): cmd = "ps -ef | grep perl | grep Run" return not session.cmd_status(cmd) if not utils_misc.wait_for(_is_unixbench_running, timeout=240): raise error.TestNAError("Failed to run unixbench in guest," " please make sure some necessary" " packages are installed in guest," " such as gcc, tar, bzip2") logging.debug("Unixbench test is running in VM")
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Check pool 'Capacity', 'Allocation' and 'Available' Create a over size vol in pool(expect fail), then check these values (21) Undefine the pool, and this should fail as pool is still active (22) Destroy the pool (23) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (24) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) ip_protocal = params.get("ip_protocal", "ipv4") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": raise error.TestNAError("Gluster pool is not supported in current" " libvirt version.") def check_exit_status(result, expect_error=False): """ Check the exit status of virsh commands. :param result: Virsh command result object :param expect_error: Boolean value, expect command success or fail """ if not expect_error: if result.exit_status != 0: raise error.TestFail(result.stderr) else: logging.debug("Command output:\n%s", result.stdout.strip()) elif expect_error and result.exit_status == 0: raise error.TestFail("Expect fail, but run successfully.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if vol_name in item[0]: found = True break if found: logging.debug("Find volume '%s' in pool '%s'.", vol_name, pool_name) else: raise error.TestFail("Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: raise error.TestFail("Pool info dictionary is needed.") if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: raise error.TestFail("Pool '%s' isn't '%s'." % (check_point, value)) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Run Testcase pvt = utlv.PoolVolumeTest(test, params) emulated_image = "emulated-image" kwargs = { "image_size": "1G", "pre_disk_vol": ["1M"], "source_name": source_name, "source_path": source_path, "source_format": source_format, "persistent": True, "ip_protocal": ip_protocal, } try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (4) # Undefine pool result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(pool_xml) check_exit_status(result, status_error) # Step (6) # Buid pool, this step may fail for 'disk' and 'logical' types pool if pool_type not in ["disk", "logical"]: option = "" # Options --overwrite and --no-overwrite can only be used to # build a filesystem pool, but it will fail for now # if pool_type == "fs": # option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # When libvirtd starts up, it'll check to see if any of the storage # pools have been activated externally. If so, then it'll mark the # pool as active. This is independent of autostart. # So a directory based storage pool is thus pretty much always active, # and so as the SCSI pool. if pool_type != "scsi" and (pool_type != "dir" or libvirt_version.version_compare(1, 2, 15)): result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) check_exit_status(result) check_vol_list(vol_name, pool_name) # Step (20) # Create an over size vol in pool(expect fail), then check pool: # 'Capacity', 'Allocation' and 'Available' # For NFS type pool, there's a bug(BZ#1077068) about allocate volume, # and glusterfs pool not support create volume, so not test them if pool_type != "netfs": vol_capacity = "10000G" vol_allocation = "10000G" result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw") check_exit_status(result, True) new_info = _pool.pool_info(pool_name) check_pool_info(pool_info, "Capacity", new_info["Capacity"]) check_pool_info(pool_info, "Allocation", new_info["Allocation"]) check_pool_info(pool_info, "Available", new_info["Available"]) # Step (21) # Undefine pool, this should fail as the pool is active result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result, expect_error=True) check_pool_list(pool_name, "", False) # Step (22) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (23) # Pool delete for 'dir' type pool if pool_type == "dir": for f in os.listdir(pool_target): os.remove(os.path.join(pool_target, f)) result = virsh.pool_delete(pool_name, ignore_status=True) check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result, True) # Step (24) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) except error.TestFail, detail: logging.error(str(detail)) if multipathd_status: multipathd.start() if os.path.exists(pool_xml): os.remove(pool_xml)
def run(test, params, env): """ Domain CPU management testing. 1. Prepare a domain for testing, install qemu-guest-ga if needed. 2. Plug vcpu for the domain. 3. Checking: 3.1. Virsh vcpucount. 3.2. Virsh vcpuinfo. 3.3. Current vcpu number in domain xml. 3.4. Virsh vcpupin and vcpupin in domain xml. 3.5. The vcpu number in domain. 3.6. Virsh cpu-stats. 4. Repeat step 3 to check again. 5. Control domain(save, managedsave, s3, s4, migrate, etc.). 6. Repeat step 3 to check again. 7. Recover domain(restore, wakeup, etc.). 8. Repeat step 3 to check again. 9. Unplug vcpu for the domain. 10. Repeat step 3 to check again. 11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip save/managedsave/migrate related actions). 12. Repeat step 3 to check again. 13. Repeat step 7 to recover domain. 14. Repeat step 3 to check again. 15. Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_operation = params.get("vm_operation", "null") vcpu_max_num = params.get("vcpu_max_num") vcpu_current_num = params.get("vcpu_current_num") vcpu_plug = "yes" == params.get("vcpu_plug", "no") vcpu_plug_num = params.get("vcpu_plug_num") vcpu_unplug = "yes" == params.get("vcpu_unplug", "no") vcpu_unplug_num = params.get("vcpu_unplug_num") setvcpu_option = params.get("setvcpu_option", "") agent_channel = "yes" == params.get("agent_channel", "yes") install_qemuga = "yes" == params.get("install_qemuga", "no") start_qemuga = "yes" == params.get("start_qemuga", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no") status_error = "yes" == params.get("status_error", "no") pin_before_plug = "yes" == params.get("pin_before_plug", "no") pin_after_plug = "yes" == params.get("pin_after_plug", "no") pin_before_unplug = "yes" == params.get("pin_before_unplug", "no") pin_after_unplug = "yes" == params.get("pin_after_unplug", "no") pin_vcpu = params.get("pin_vcpu") pin_cpu_list = params.get("pin_cpu_list", "x") check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no") # Init expect vcpu count values expect_vcpu_num = [vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num, vcpu_current_num] if check_after_plug_fail: expect_vcpu_num_bk = list(expect_vcpu_num) # Init expect vcpu pin values expect_vcpupin = {} # Init cpu-list for vcpupin host_cpu_count = utils.count_cpus() if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"): raise error.TestNAError("We need more cpus on host in this case for" " the cpu-list=%s. But current number of cpu" " on host is %s." % (pin_cpu_list, host_cpu_count)) cpus_list = utils.cpu_online_map() logging.info("Active cpus in host are %s", cpus_list) cpu_seq_str = "" for i in range(len(cpus_list) - 1): if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]): cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1]) break if pin_cpu_list == "x": pin_cpu_list = cpus_list[-1] if pin_cpu_list == "x-y": if cpu_seq_str: pin_cpu_list = cpu_seq_str else: pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0]) elif pin_cpu_list == "x,y": pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1]) elif pin_cpu_list == "x-y,^z": if cpu_seq_str: pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1] else: pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1], cpus_list[0]) else: # Just use the value get from cfg pass need_mkswap = False # Back up domain XML vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() try: # Customize domain vcpu number if vm.is_alive(): vm.destroy() if agent_channel: vmxml.set_agent_channel() else: vmxml.remove_agent_channels() vmxml.sync() vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num)) # Do not apply S3/S4 on power if 'power' not in cpu_util.get_cpu_arch(): vmxml.set_pm_suspend(vm_name, "yes", "yes") vm.start() # Create swap partition/file if nessesary if vm_operation == "s4": need_mkswap = not vm.has_swap() if need_mkswap: logging.debug("Creating swap partition") vm.create_swap_partition() # Prepare qemu guest agent if install_qemuga: vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga) vm.setenforce(0) else: # Remove qemu-guest-agent for negative test vm.remove_package('qemu-guest-agent') # Run test check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin) # plug vcpu if vcpu_plug: # Pin vcpu if pin_before_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) check_setvcpus_result(result, status_error) if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_plug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num if not status_error: if not online_new_vcpu(vm, vcpu_plug_num): raise error.TestFail("Fail to enable new added cpu") # Pin vcpu if pin_after_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if status_error and check_after_plug_fail: check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option) if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Unplug vcpu # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable # when VM started , and it required that vcpu 0(id=1) is always # present and non-hotpluggable, which means we can't hotunplug these # vcpus directly. So we can either hotplug more vcpus before we do # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the # vcpus except vcpu 0, to make sure libvirt can find appropriate # hotpluggable vcpus to reach the desired target vcpu count. For # simple prepare step, here we choose to hotplug more vcpus. if vcpu_unplug: if setvcpu_option == "--live": logging.info("Hotplug vcpu to the maximum count to make sure" " all these new plugged vcpus are hotunpluggable") result = virsh.setvcpus(vm_name, vcpu_max_num, '--live', debug=True) libvirt.check_exit_status(result) # Pin vcpu if pin_before_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) # As the vcpu will unplug later, so set expect_vcpupin to empty expect_vcpupin = {} result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) try: check_setvcpus_result(result, status_error) except error.TestNAError: raise error.TestWarn("Skip unplug vcpu as it is not supported") if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_unplug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num # Pin vcpu if pin_after_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option)
def run(test, params, env): """ Test the tpm virtual devices 1. prepare a guest with different tpm devices 2. check whether the guest can be started 3. check the xml and qemu cmd line, even swtpm for vtpm 4. check tpm usage in guest os """ # Tpm passthrough supported since libvirt 1.0.5. if not libvirt_version.version_compare(1, 0, 5): test.cancel("Tpm device is not supported " "on current libvirt version.") # Tpm passthrough supported since qemu 2.12.0-49. if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False): test.cancel("Tpm device is not supported " "on current qemu version.") tpm_model = params.get("tpm_model") backend_type = params.get("backend_type") backend_version = params.get("backend_version") device_path = params.get("device_path") tpm_num = int(params.get("tpm_num", 1)) # After first start of vm with vtpm, do operations, check it still works vm_operate = params.get("vm_operate") # Sub-operation(e.g.domrename) under vm_operate(e.g.restart) vm_oprt = params.get("vm_oprt") secret_uuid = params.get("secret_uuid") secret_value = params.get("secret_value") # Change encryption state: from plain to encrypted, or reverse. encrypt_change = params.get("encrypt_change") secret_uuid = params.get("secret_uuid") prepare_secret = ("yes" == params.get("prepare_secret", "no")) remove_dev = ("yes" == params.get("remove_dev", "no")) multi_vms = ("yes" == params.get("multi_vms", "no")) # Remove swtpm state file rm_statefile = ("yes" == params.get("rm_statefile", "no")) test_suite = ("yes" == params.get("test_suite", "no")) restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no")) no_backend = ("yes" == params.get("no_backend", "no")) status_error = ("yes" == params.get("status_error", "no")) err_msg = params.get("xml_errmsg", "") loader = params.get("loader", "") nvram = params.get("nvram", "") uefi_disk_url = params.get("uefi_disk_url", "") download_file_path = os.path.join(data_dir.get_tmp_dir(), "uefi_disk.qcow2") # Check tpm chip on host for passthrough testing if backend_type == "passthrough": dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True) logging.debug("dmesg info about tpm:\n %s", dmesg_info) dmesg_error = re.search("No TPM chip found|TPM is disabled", dmesg_info) if dmesg_error: test.cancel(dmesg_error.group()) else: # Try to check host tpm chip version tpm_v = None if re.search("2.0 TPM", dmesg_info): tpm_v = "2.0" if not utils_package.package_install("tpm2-tools"): # package_install() return 'True' if succeed test.error("Failed to install tpm2-tools on host") else: if re.search("1.2 TPM", dmesg_info): tpm_v = "1.2" # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first if not utils_package.package_install("tpm-tools"): test.error("Failed to install tpm-tools on host") # Check host env for vtpm testing elif backend_type == "emulator": if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False): test.cancel("vtpm(emulator backend) is not supported " "on current qemu version.") # Install swtpm pkgs on host for vtpm emulation if not utils_package.package_install("swtpm*"): test.error("Failed to install swtpm swtpm-tools on host") def replace_os_disk(vm_xml, vm_name, nvram): """ Replace os(nvram) and disk(uefi) for x86 vtpm test :param vm_xml: current vm's xml :param vm_name: current vm name :param nvram: nvram file path of vm """ # Add loader, nvram in <os> nvram = nvram.replace("<VM_NAME>", vm_name) dict_os_attrs = {"loader_readonly": "yes", "secure": "yes", "loader_type": "pflash", "loader": loader, "nvram": nvram} vm_xml.set_os_attrs(**dict_os_attrs) logging.debug("Set smm=on in VMFeaturesXML") # Add smm in <features> features_xml = vm_xml.features features_xml.smm = "on" vm_xml.features = features_xml vm_xml.sync() # Replace disk with an uefi image if not utils_package.package_install("wget"): test.error("Failed to install wget on host") if uefi_disk_url.count("EXAMPLE"): test.error("Please provide the URL %s" % uefi_disk_url) else: download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path)) process.system(download_cmd, verbose=False, shell=True) vm = env.get_vm(vm_name) uefi_disk = {'disk_source_name': download_file_path} libvirt.set_vm_disk(vm, uefi_disk) vm_names = params.get("vms").split() vm_name = vm_names[0] vm = env.get_vm(vm_name) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() os_xml = getattr(vm_xml, "os") host_arch = platform.machine() if backend_type == "emulator" and host_arch == 'x86_64': if not utils_package.package_install("OVMF"): test.error("Failed to install OVMF or edk2-ovmf pkgs on host") if os_xml.xmltreefile.find('nvram') is None: replace_os_disk(vm_xml, vm_name, nvram) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy() vm2 = None if multi_vms: if len(vm_names) > 1: vm2_name = vm_names[1] vm2 = env.get_vm(vm2_name) vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name) vm2_xml_backup = vm2_xml.copy() else: # Clone additional vms if needed try: utils_path.find_command("virt-clone") except utils_path.CmdNotFoundError: if not utils_package.package_install(["virt-install"]): test.cancel("Failed to install virt-install on host") vm2_name = "vm2_" + utils_misc.generate_random_string(5) ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name, True, timeout=360, debug=True) if ret_clone.exit_status: test.error("Need more than one domains, but error occured when virt-clone.") vm2 = vm.clone(vm2_name) vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name) if vm2.is_alive(): vm2.destroy() service_mgr = service.ServiceManager() def check_dumpxml(vm_name): """ Check whether the added devices are shown in the guest xml :param vm_name: current vm name """ logging.info("------Checking guest dumpxml------") if tpm_model: pattern = '<tpm model="%s">' % tpm_model else: # The default tpm model is "tpm-tis" pattern = '<tpm model="tpm-tis">' # Check tpm model xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) logging.debug("xml after add tpm dev is %s", xml_after_adding_device) if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s tpm device xml " "in the guest xml file." % tpm_model) # Check backend type pattern = '<backend type="%s"' % backend_type if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend type xml for tpm dev " "in the guest xml file." % backend_type) # Check backend version if backend_version: check_ver = backend_version if backend_version != 'none' else '2.0' pattern = '"emulator" version="%s"' % check_ver if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend version xml for tpm dev " "in the guest xml file." % check_ver) # Check device path if backend_type == "passthrough": pattern = '<device path="/dev/tpm0"' if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s device path xml for tpm dev " "in the guest xml file." % device_path) # Check encryption secret if prepare_secret: pattern = '<encryption secret="%s" />' % encryption_uuid if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s secret uuid xml for tpm dev " "in the guest xml file." % encryption_uuid) logging.info('------PASS on guest dumpxml check------') def check_qemu_cmd_line(vm, vm_name, domid): """ Check whether the added devices are shown in the qemu cmd line :param vm: current vm :param vm_name: current vm name :param domid: domain id for checking vtpm socket file """ logging.info("------Checking qemu cmd line------") if not vm.get_pid(): test.fail('VM pid file missing.') with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file: cmdline = cmdline_file.read() logging.debug("Qemu cmd line info:\n %s", cmdline) # Check tpm model pattern_list = ["-device.%s" % tpm_model] # Check backend type if backend_type == "passthrough": dev_num = re.search(r"\d+", device_path).group() backend_segment = "id=tpm-tpm%s" % dev_num else: # emulator backend backend_segment = "id=tpm-tpm0,chardev=chrtpm" pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment)) # Check chardev socket for vtpm if backend_type == "emulator": pattern_list.append("-chardev.socket,id=chrtpm," "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)) for pattern in pattern_list: if not re.search(pattern, cmdline): if not remove_dev: test.fail("Can not find the %s for tpm device " "in qemu cmd line." % pattern) elif remove_dev: test.fail("%s still exists after remove vtpm and restart" % pattern) logging.info("------PASS on qemu cmd line check------") def check_swtpm(domid, domuuid, vm_name): """ Check swtpm cmdline and files for vtpm. :param domid: domain id for checking vtpm files :param domuuid: domain uuid for checking vtpm state file :param vm_name: current vm name """ logging.info("------Checking swtpm cmdline and files------") # Check swtpm cmdline swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name) if not swtpm_pid: if not remove_dev: test.fail('swtpm pid file missing.') else: return elif remove_dev: test.fail('swtpm pid file still exists after remove vtpm and restart') with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file: cmdline = cmdline_file.read() logging.debug("Swtpm cmd line info:\n %s", cmdline) pattern_list = ["--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"] if prepare_secret: pattern_list.extend(["--key", "--migration-key"]) for pattern in pattern_list: if not re.search(pattern, cmdline): test.fail("Can not find the %s for tpm device " "in swtpm cmd line." % pattern) # Check swtpm files file_list = ["/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)] file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid) file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name) file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" % (domid, vm_name)) for swtpm_file in file_list: if not os.path.exists(swtpm_file): test.fail("Swtpm file: %s does not exist" % swtpm_file) logging.info("------PASS on Swtpm cmdline and files check------") def get_tpm2_tools_cmd(session=None): """ Get tpm2-tools pkg version and return corresponding getrandom cmd :session: guest console session :return: tpm2_getrandom cmd usage """ cmd = 'rpm -q tpm2-tools' get_v_tools = session.cmd(cmd) if session else process.run(cmd).stdout_text v_tools_list = get_v_tools.strip().split('-') if session: logging.debug("The tpm2-tools version is %s", v_tools_list[2]) v_tools = int(v_tools_list[2].split('.')[0]) return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex" def get_host_tpm_bef(tpm_v): """ Test host tpm function and identify its real version before passthrough Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or tpm2-tools to try the function. :param tpm_v: host tpm version get from dmesg info :return: host tpm version """ logging.info("------Checking host tpm device before passthrough------") # Try tcsd tool for suspected tpm1.2 chip on host tpm_real_v = tpm_v if tpm_v != "2.0": if not service_mgr.start('tcsd'): # service_mgr.start() return 'True' if succeed if tpm_v == "1.2": test.fail("Host tcsd.serivce start failed") else: # Means tpm_v got nothing from dmesg, log failure here and # go to next 'if' to try tpm2.0 tools. logging.info("Host tcsd.serivce start failed") else: tpm_real_v = "1.2" logging.info("Host tpm version info:") result = process.run("tpm_version", ignore_status=False) logging.debug("[host]# tpm_version\n %s", result.stdout) time.sleep(2) service_mgr.stop('tcsd') if tpm_v != "1.2": # Try tpm2.0 tools if not utils_package.package_install("tpm2-tools"): test.error("Failed to install tpm2-tools on host") tpm2_getrandom_cmd = get_tpm2_tools_cmd() if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status: test.cancel("Both tcsd and tpm2-tools can not work, " "pls check your host tpm version and test env.") else: tpm_real_v = "2.0" logging.info("------PASS on host tpm device check------") return tpm_real_v def test_host_tpm_aft(tpm_real_v): """ Test host tpm function after passthrough :param tpm_real_v: host tpm real version indentified from testing """ logging.info("------Checking host tpm device after passthrough------") if tpm_real_v == "1.2": if service_mgr.start('tcsd'): time.sleep(2) service_mgr.stop('tcsd') test.fail("Host tpm should not work after passthrough to guest.") else: logging.info("Expected failure: Tpm is being used by guest.") elif tpm_real_v == "2.0": tpm2_getrandom_cmd = get_tpm2_tools_cmd() if not process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status: test.fail("Host tpm should not work after passthrough to guest.") else: logging.info("Expected failure: Tpm is being used by guest.") logging.info("------PASS on host tpm device check------") def test_guest_tpm(expect_version, session, expect_fail): """ Test tpm function in guest :param expect_version: guest tpm version, as host version, or emulator specified :param session: Guest session to be tested :param expect_fail: guest tpm is expectedly fail to work """ logging.info("------Checking guest tpm device work------") if expect_version == "1.2": # Install tpm-tools and test by tcsd method if not utils_package.package_install(["tpm-tools"], session, 360): test.error("Failed to install tpm-tools package in guest") else: status, output = session.cmd_status_output("systemctl start tcsd") logging.debug("Command output: %s", output) if status: if expect_fail: test.cancel("tpm-crb passthrough only works with host tpm2.0, " "but your host tpm version is 1.2") else: test.fail("Failed to start tcsd.service in guest") else: dev_output = session.cmd_output("ls /dev/|grep tpm") logging.debug("Command output: %s", dev_output) status, output = session.cmd_status_output("tpm_version") logging.debug("Command output: %s", output) if status: test.fail("Guest tpm can not work") else: # If expect_version is tpm2.0, install and test by tpm2-tools if not utils_package.package_install(["tpm2-tools"], session, 360): test.error("Failed to install tpm2-tools package in guest") else: tpm2_getrandom_cmd = get_tpm2_tools_cmd(session) status1, output1 = session.cmd_status_output("ls /dev/|grep tpm") logging.debug("Command output: %s", output1) status2, output2 = session.cmd_status_output(tpm2_getrandom_cmd) logging.debug("Command output: %s", output2) if status1 or status2: if not expect_fail: test.fail("Guest tpm can not work") else: d_status, d_output = session.cmd_status_output("date") if d_status: test.fail("Guest OS doesn't work well") logging.debug("Command output: %s", d_output) elif expect_fail: test.fail("Expect fail but guest tpm still works") logging.info("------PASS on guest tpm device work check------") def run_test_suite_in_guest(session): """ Run kernel test suite for guest tpm. :param session: Guest session to be tested """ logging.info("------Checking kernel test suite for guest tpm------") boot_info = session.cmd('uname -r').strip().split('.') kernel_version = '.'.join(boot_info[:2]) # Download test suite per current guest kernel version parent_path = "https://cdn.kernel.org/pub/linux/kernel" if float(kernel_version) < 5.3: major_version = "5" file_version = "5.3" else: major_version = boot_info[0] file_version = kernel_version src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version, file_version) download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz") output = session.cmd_output(download_cmd, timeout=480) logging.debug("Command output: %s", output) # Install neccessary pkgs to build test suite if not utils_package.package_install(["tar", "make", "gcc", "rsync", "python2"], session, 360): test.fail("Failed to install specified pkgs in guest OS.") # Unzip the downloaded test suite status, output = session.cmd_status_output("tar xvJf /root/linux.tar.xz -C /root") if status: test.fail("Uzip failed: %s" % output) # Specify using python2 to run the test suite per supporting test_path = "/root/linux-%s/tools/testing/selftests" % file_version sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path output = session.cmd_output(sed_cmd) logging.debug("Command output: %s", output) # Build and and run the .sh files of test suite status, output = session.cmd_status_output("make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360) logging.debug("Command output: %s", output) if status: test.fail("Failed to run test suite in guest OS.") for test_sh in ["test_smoke.sh", "test_space.sh"]: pattern = "ok .* selftests: tpm2: %s" % test_sh if not re.search(pattern, output) or ("not ok" in output): test.fail("test suite check failed.") logging.info("------PASS on kernel test suite check------") def reuse_by_vm2(tpm_dev): """ Try to add same tpm to a second guest, when it's being used by one guest. :param tpm_dev: tpm device to be added into guest xml """ logging.info("------Trying to add same tpm to a second domain------") vm2_xml.remove_all_device_by_type('tpm') vm2_xml.add_device(tpm_dev) vm2_xml.sync() ret = virsh.start(vm2_name, ignore_status=True, debug=True) if backend_type == "passthrough": if ret.exit_status: logging.info("Expected failure when try to passthrough a tpm" " that being used by another guest") return test.fail("Reuse a passthroughed tpm should not succeed.") elif ret.exit_status: # emulator backend test.fail("Vtpm for each guest should not interfere with each other") try: tpm_real_v = None sec_uuids = [] new_name = "" virsh_dargs = {"debug": True, "ignore_status": False} vm_xml.remove_all_device_by_type('tpm') tpm_dev = Tpm() if tpm_model: tpm_dev.tpm_model = tpm_model if not no_backend: backend = tpm_dev.Backend() if backend_type != 'none': backend.backend_type = backend_type if backend_type == "passthrough": tpm_real_v = get_host_tpm_bef(tpm_v) logging.debug("The host tpm real version is %s", tpm_real_v) if device_path: backend.device_path = device_path if backend_type == "emulator": if backend_version != 'none': backend.backend_version = backend_version if prepare_secret: auth_sec_dict = {"sec_ephemeral": "no", "sec_private": "yes", "sec_desc": "sample vTPM secret", "sec_usage": "vtpm", "sec_name": "VTPM_example"} encryption_uuid = libvirt.create_secret(auth_sec_dict) if secret_value != 'none': virsh.secret_set_value(encryption_uuid, "open sesame", encode=True, debug=True) sec_uuids.append(encryption_uuid) if encrypt_change != 'encrpt': # plain_to_encrypt will not add encryption on first start if secret_uuid == 'invalid': encryption_uuid = encryption_uuid[:-1] backend.encryption_secret = encryption_uuid if secret_uuid == "change": auth_sec_dict["sec_desc"] = "sample2 vTPM secret" auth_sec_dict["sec_name"] = "VTPM_example2" new_encryption_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(new_encryption_uuid, "open sesame", encode=True, debug=True) sec_uuids.append(new_encryption_uuid) if secret_uuid == 'nonexist': backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tpm_dev.backend = backend logging.debug("tpm dev xml to add is:\n %s", tpm_dev) for num in range(tpm_num): vm_xml.add_device(tpm_dev, True) ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True) expected_match = "" if not err_msg: expected_match = "Domain .*%s.* defined from %s" % (vm_name, vm_xml.xml) libvirt.check_result(ret, err_msg, "", False, expected_match) if err_msg: # Stop test when get expected failure return if vm_operate != "restart": check_dumpxml(vm_name) # For default model, no need start guest to test if tpm_model: expect_fail = False try: vm.start() except VMStartError as detail: if secret_value == 'none' or secret_uuid == 'nonexist': logging.debug("Expected failure: %s", detail) return else: test.fail(detail) domuuid = vm.get_uuid() if vm_operate or restart_libvirtd: # Make sure OS works before vm operate or restart libvirtd session = vm.wait_for_login() test_guest_tpm("2.0", session, False) session.close() if restart_libvirtd: utils_libvirtd.libvirtd_restart() swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid if vm_operate == "resume": virsh.suspend(vm_name, **virsh_dargs) time.sleep(3) virsh.resume(vm_name, **virsh_dargs) elif vm_operate == "snapshot": virsh.snapshot_create_as(vm_name, "sp1 --memspec file=/tmp/testvm_sp1", **virsh_dargs) elif vm_operate in ["restart", "create"]: vm.destroy() if vm_operate == "create": virsh.undefine(vm_name, options="--nvram", **virsh_dargs) if os.path.exists(swtpm_statedir): test.fail("Swtpm state dir: %s still exist after vm undefine" % swtpm_statedir) virsh.create(vm_xml.xml, **virsh_dargs) else: if vm_oprt == "domrename": new_name = "vm_" + utils_misc.generate_random_string(5) virsh.domrename(vm_name, new_name, **virsh_dargs) new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache) vm = new_vm vm_name = new_name elif secret_value == 'change': logging.info("Changing secret value...") virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True) elif not restart_libvirtd: # remove_dev or do other vm operations during restart vm_xml.remove_all_device_by_type('tpm') if secret_uuid == "change" or encrypt_change: # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary if encrypt_change == 'plain': # Change from encrypted state to plain:redefine a tpm dev without encryption tpm_dev = Tpm() tpm_dev.tpm_model = tpm_model backend = tpm_dev.Backend() backend.backend_type = backend_type backend.backend_version = backend_version else: # Use a new secret's uuid if secret_uuid == "change": encryption_uuid = new_encryption_uuid backend.encryption_secret = encryption_uuid tpm_dev.backend = backend logging.debug("The new tpm dev xml to add for restart vm is:\n %s", tpm_dev) vm_xml.add_device(tpm_dev, True) if encrypt_change in ['encrpt', 'plain']: # Avoid sync() undefine removing the state file vm_xml.define() else: vm_xml.sync() if rm_statefile: swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir logging.debug("Removing state file: %s", swtpm_statefile) os.remove(swtpm_statefile) ret = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error and ret.exit_status != 0: return if not remove_dev: check_dumpxml(vm_name) elif vm_operate == 'managedsave': virsh.managedsave(vm_name, **virsh_dargs) time.sleep(5) if secret_value == 'change': logging.info("Changing secret value...") virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True) if rm_statefile: swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir logging.debug("Removing state file: %s", swtpm_statefile) os.remove(swtpm_statefile) ret = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error and ret.exit_status != 0: return domid = vm.get_id() check_qemu_cmd_line(vm, vm_name, domid) if backend_type == "passthrough": if tpm_real_v == "1.2" and tpm_model == "tpm-crb": expect_fail = True expect_version = tpm_real_v test_host_tpm_aft(tpm_real_v) else: # emulator backend if remove_dev: expect_fail = True expect_version = backend_version check_swtpm(domid, domuuid, vm_name) session = vm.wait_for_login() if test_suite: run_test_suite_in_guest(session) else: test_guest_tpm(expect_version, session, expect_fail) session.close() if multi_vms: reuse_by_vm2(tpm_dev) if backend_type != "passthrough": #emulator backend check_dumpxml(vm2_name) domid = vm2.get_id() domuuid = vm2.get_uuid() check_qemu_cmd_line(vm2, vm2_name, domid) check_swtpm(domid, domuuid, vm2_name) session = vm2.wait_for_login() test_guest_tpm(backend_version, session, expect_fail) session.close() finally: # Remove renamed domain if it exists if new_name: virsh.remove_domain(new_name, "--nvram", debug=True) if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name) # Remove snapshot if exists if vm_operate == "snapshot": snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") if os.path.exists("/tmp/testvm_sp1"): os.remove("/tmp/testvm_sp1") # Clear guest os if test_suite: session = vm.wait_for_login() logging.info("Removing dir /root/linux-*") output = session.cmd_output("rm -rf /root/linux-*") logging.debug("Command output:\n %s", output) session.close() if vm_operate == "create": vm.define(vm_xml.xml) vm_xml_backup.sync(options="--nvram --managed-save") # Remove swtpm log file in case of impact on later runs if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name) for sec_uuid in set(sec_uuids): virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True) if vm2: if len(vm_names) > 1: vm2_xml_backup.sync(options="--nvram") else: virsh.remove_domain(vm2_name, "--nvram --remove-all-storage", debug=True) if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name)
def run(test, params, env): """ Test scenarios: virsh blockcommit with relative path 1) Prepare test environment. 2) Create relative path backing chain 3) Do virsh blockcommit 4) Check result. 5) Recover the environments """ def check_chain_backing_files(disk_src_file, expect_backing_list): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_list: backing chain lists. """ # Validate source image doesn't have backing files after active blockcommit qemu_img_info_backing_chain = libvirt_disk.get_chain_backing_files(disk_src_file) logging.debug("The actual qemu-img qemu_img_info_backing_chain:%s\n", qemu_img_info_backing_chain) logging.debug("The actual qemu-img expect_backing_list:%s\n", expect_backing_list) if qemu_img_info_backing_chain != expect_backing_list: test.fail("The backing files by qemu-img is not identical in expected backing list") def check_top_image_in_xml(expected_top_image): """ check top image in src file :param expected_top_image: expect top image """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] == disk_target: disk_xml = disk.xmltreefile break logging.debug("disk xml in top: %s\n", disk_xml) for attr in ['file', 'name', 'dev']: src_file = disk_xml.find('source').get(attr) if src_file: break if src_file not in expected_top_image: test.fail("Current top img %s is not the same with expected: %s" % (src_file, expected_top_image)) def check_blockcommit_with_bandwidth(chain_list): """ Check blockcommit with bandwidth param chain_list: list, expected backing chain list """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] == disk_target: disk_xml = disk break logging.debug("disk xml in check_blockcommit_with_bandwidth: %s\n", disk_xml.xmltreefile) backingstore_list = disk_xml.get_backingstore_list() parse_source_file_list = [elem.find('source').get('file') or elem.find('source').get('name') for elem in backingstore_list] logging.debug("expected backing chain list is %s", chain_list) logging.debug("parse source list is %s", parse_source_file_list) # Check whether relative path has been kept for i in range(0, len(chain_list)-1): if chain_list[i] not in parse_source_file_list[i]: test.fail("The relative path parsed from disk xml is different with pre-expected ones") def check_file_not_exists(root_dir, file_name, reverse=False): """ Check whether file exists in certain folder :param root_dir: preset root directory :param file_name: input file name :param reverse: whether reverse the condition """ files_path = [os.path.join(root_dir, f) for f in os.listdir(root_dir) if os.path.isfile(os.path.join(root_dir, f)) ] logging.debug("all files in folder: %s \n", files_path) if not files_path: test.fail("Failed to get snapshot files in preset folder") elif reverse: if file_name not in files_path: test.fail("snapshot file:%s can not be found" % file_name) else: if file_name in files_path: test.fail("snapshot file:%s can not be deleted" % file_name) def check_backing_chain_file_not_exists(disk_src_file, file_name, reverse=False): """ Check whether file exists in source file's backing chain :param disk_src_file: disk source with backing chain files :param file_name: input file name :param reverse: whether reverse this condition """ qemu_img_info_backing_chain = libvirt_disk.get_chain_backing_files(disk_src_file) if reverse: if file_name not in qemu_img_info_backing_chain: test.fail("%s can not be found in backing chain file" % file_name) else: if file_name in qemu_img_info_backing_chain: test.fail("%s should not be in backing chain file" % file_name) def fill_vm_with_contents(): """ Fill contents in VM """ logging.info("Filling VM contents...") try: session = vm.wait_for_login() status, output = session.cmd_status_output( "dd if=/dev/urandom of=/tmp/bigfile bs=1M count=200") logging.info("Fill contents in VM:\n%s", output) session.close() except Exception as e: logging.error(str(e)) def create_lvm_pool(): """ create lvm pool""" pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) pvt.pre_pool(**params) capacity = "5G" for i in range(1, 5): vol_name = 'vol%s' % i path = "%s/%s" % (pool_target, vol_name) virsh.vol_create_as(vol_name, pool_name, capacity, capacity, "qcow2", debug=True) cmd = "qemu-img create -f %s %s %s" % ("qcow2", path, capacity) process.run(cmd, ignore_status=False, shell=True) volume_path_list.append(path) capacity = "2G" def setup_iscsi_env(): """ Setup iscsi environment""" libvirt.setup_or_cleanup_iscsi(is_setup=False) emulated_size = params.get("image_size", "10G") iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size=emulated_size, portal_ip="127.0.0.1") cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" % ("127.0.0.1", "3260", iscsi_target, lun_num, emulated_size)) process.run(cmd, shell=True) blk_source_image_after_converted = "iscsi://%s:%s/%s/%s" % ("127.0.0.1", "3260", iscsi_target, lun_num) # Convert the image from qcow2 to raw convert_disk_cmd = ("qemu-img convert" " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted)) process.run(convert_disk_cmd, ignore_status=False, shell=True) replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files( vm, pre_set_root_dir, blk_source_image_after_converted, disk_format) params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_source_protocol': 'file'}) return replace_disk_image, blk_source_image_after_converted, backing_chain_list def setup_rbd_env(): """ Set up rbd environment""" params.update( {"virt_disk_device_target": disk_target, "ceph_image_file": first_src_file}) libvirt_ceph_utils.create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=True) ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") blk_source_image_after_converted = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) replace_disk_image, backing_chain_list = libvirt_disk.make_relative_path_backing_files( vm, pre_set_root_dir, blk_source_image_after_converted, disk_format) params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_format': 'qcow2', 'disk_source_protocol': 'file'}) return replace_disk_image, blk_source_image_after_converted, backing_chain_list def setup_volume_pool_env(): """Setup volume pool environment""" params.update( {"virt_disk_device_target": disk_target}) create_lvm_pool() blk_source_image_after_converted = ("%s" % volume_path_list[0]) # Convert the image from qcow2 to volume convert_disk_cmd = ("qemu-img convert" " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted)) process.run(convert_disk_cmd, ignore_status=False, shell=True) params.update({'disk_source_name': blk_source_image_after_converted, 'disk_type': 'block', 'disk_format': 'qcow2', 'disk_source_protocol': 'file'}) libvirt.set_vm_disk(vm, params, tmp_dir) vm.wait_for_login().close() vm.destroy(gracefully=False) replace_disk_image, backing_chain_list = libvirt_disk.make_syslink_path_backing_files( pre_set_root_dir, volume_path_list, disk_format) params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_format': 'qcow2', 'disk_source_protocol': 'file'}) blk_source_image_after_converted = os.path.join(pre_set_root_dir, syslink_top_img) skip_first_one = True return replace_disk_image, blk_source_image_after_converted, skip_first_one, backing_chain_list def validate_blockcommit_after_libvirtd_restart(): """Validate blockcommit after libvirtd restart""" logging.debug("phase three blockcommit .....") counts = 1 phase_three_blockcommit_options = " --active" libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_three_blockcommit_options, counts) time.sleep(3) # Before restart libvirtd mirror_content_before_restart = libvirt_disk.get_mirror_part_in_xml(vm, disk_target) logging.debug(mirror_content_before_restart) utils_libvirtd.libvirtd_restart() # After restart libvirtd mirror_content_after_restart = libvirt_disk.get_mirror_part_in_xml(vm, disk_target) logging.debug(mirror_content_after_restart) # Check whether mirror content is identical with previous one if mirror_content_before_restart != mirror_content_after_restart: test.fail("The mirror part content changed after libvirtd restarted") virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True) def prepare_case_scenarios(snap_del_disks, base_file): """ Prepare case scenarios :param snap_del_disks: snapshot list :param base_file: base file for snapshot """ index = len(snap_del_disks) - 1 option = "--top %s --base %s --delete --verbose --wait" scenarios = {} scenarios.update({"middle-to-middle": {'blkcomopt': option % (snap_del_disks[index - 1], snap_del_disks[index - 2]), 'top': snap_del_disks[index - 1], 'base': snap_del_disks[index - 2]}}) scenarios.update({"middle-to-base": {'blkcomopt': option % (snap_del_disks[index - 1], base_file), 'top': snap_del_disks[index - 1], 'base': base_file}}) scenarios.update({"top-to-middle": {'blkcomopt': option % (snap_del_disks[index], snap_del_disks[index - 2]) + " --active", 'top': snap_del_disks[index], 'base': snap_del_disks[index - 2]}}) scenarios.update({"top-to-base": {'blkcomopt': "--top %s --delete --verbose --wait --active --pivot" % (snap_del_disks[index]), "top": snap_del_disks[index], "base": snap_del_disks[index]}}) scenarios.update({"abort-top-job": {'blkcomopt': "--top %s --delete --verbose --wait --active --pivot --bandwidth 1" % (snap_del_disks[index]), "top": snap_del_disks[index], "base": snap_del_disks[index]}}) return scenarios def loop_case_in_scenarios(scenarios): """ Loop case scenarios :param scenarios: scenario list """ # loop each scenario for case, opt in list(scenarios.items()): logging.debug("Begin scenario: %s testing....................", case) reverse = False if vm.is_alive(): vm.destroy(gracefully=False) # Reset VM to initial state vmxml_backup.sync("--snapshots-metadata") vm.start() snap_del_disks = libvirt_disk.make_external_disk_snapshots(vm, disk_target, snapshot_prefix, snapshot_take) tmp_option = opt.get('blkcomopt') top_file = opt.get('top') base_file = opt.get('base') if 'abort' in case: fill_vm_with_contents() ignite_blockcommit_thread = threading.Thread(target=virsh.blockcommit, args=(vm_name, disk_target, tmp_option,), kwargs={'ignore_status': True, 'debug': True}) ignite_blockcommit_thread.start() ignite_blockcommit_thread.join(2) virsh.blockjob(vm_name, disk_target, " --abort", ignore_status=False) reverse = True else: libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', tmp_option, 1) # Need pivot to make effect if "--active" in tmp_option and "--pivot" not in tmp_option: virsh.blockjob(vm_name, disk_target, '--pivot', ignore_status=True) check_file_not_exists(pre_set_root_dir, top_file, reverse=reverse) if 'top' not in case: check_backing_chain_file_not_exists(snap_del_disks[len(snap_del_disks) - 1], top_file) libvirt_disk.cleanup_snapshots(vm, snap_del_disks) del snap_del_disks[:] vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") virsh_dargs = {'debug': True} status_error = ("yes" == params.get("status_error", "no")) restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no")) validate_delete_option = ("yes" == params.get("validate_delete_option", "no")) tmp_dir = data_dir.get_data_dir() top_inactive = ("yes" == params.get("top_inactive")) base_option = params.get("base_option", "none") bandwidth = params.get("blockcommit_bandwidth", "") disk_target = params.get("disk_target", "vda") disk_format = params.get("disk_format", "qcow2") disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") pool_name = params.get("pool_name") pool_target = params.get("pool_target") pool_type = params.get("pool_type") emulated_image = params.get("emulated_image") syslink_top_img = params.get("syslink_top_img") snapshot_take = int(params.get("snapshot_take", "4")) snapshot_prefix = params.get("snapshot_prefix", "snapshot") first_src_file = libvirt_disk.get_first_disk_source(vm) blk_source_image = os.path.basename(first_src_file) pre_set_root_dir = os.path.dirname(first_src_file) snapshot_external_disks = [] skip_first_one = False snap_del_disks = [] volume_path_list = [] kkwargs = params.copy() pvt = libvirt.PoolVolumeTest(test, params) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) try: if vm.is_alive(): vm.destroy(gracefully=False) if disk_src_protocol == 'iscsi': replace_disk_image, blk_source_image_after_converted, backing_chain_list = setup_iscsi_env() if disk_src_protocol == "rbd": replace_disk_image, blk_source_image_after_converted, backing_chain_list = setup_rbd_env() if disk_src_protocol == "pool": replace_disk_image, blk_source_image_after_converted, skip_first_one, backing_chain_list = setup_volume_pool_env() libvirt.set_vm_disk(vm, params, tmp_dir) # get a vm session before snapshot session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) # Check backing files check_chain_backing_files(replace_disk_image, backing_chain_list) if vm_state == "paused": vm.pause() # Do phase one blockcommit phase_one_blockcommit_options = "--active --verbose --shallow --pivot --keep-relative" counts = len(backing_chain_list) if bandwidth and base_option == "base": phase_one_blockcommit_options = "--top vda[1] --base vda[3] --keep-relative --bandwidth %s --active" % bandwidth if restart_libvirtd: utils_libvirtd.libvirtd_restart() if base_option == "shallow": libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts) elif base_option == "base": counts = 1 libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts) check_blockcommit_with_bandwidth(backing_chain_list[::-1]) virsh.blockjob(vm_name, disk_target, '--abort', ignore_status=True) # Pivot commits to bottom one of backing chain phase_one_blockcommit_options = "--active --verbose --shallow --pivot --keep-relative" counts = len(backing_chain_list) libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_one_blockcommit_options, counts) #Check top image after phase one block commit check_top_image_in_xml(blk_source_image_after_converted) # Do snapshots _, snapshot_external_disks = libvirt_disk.create_reuse_external_snapshots( vm, pre_set_root_dir, skip_first_one, disk_target) # Set blockcommit_options phase_two_blockcommit_options = "--verbose --keep-relative --shallow --active --pivot" # Run phase two blockcommit with snapshots counts = len(snapshot_external_disks) - 1 libvirt_disk.do_blockcommit_repeatedly(vm, 'vda', phase_two_blockcommit_options, counts) #Check top image after phase two block commit check_top_image_in_xml(snapshot_external_disks) # Run dependent restart_libvirtd case if restart_libvirtd: validate_blockcommit_after_libvirtd_restart() # Run dependent validate_delete_option case if validate_delete_option: # Run blockcommit with snapshots to validate delete option # Test scenarios can be referred from https://bugzilla.redhat.com/show_bug.cgi?id=1008350 logging.debug("Blockcommit with delete option .....") base_file = first_src_file # Get first attempt snapshot lists if vm.is_alive(): vm.destroy(gracefully=False) # Reset VM to initial state vmxml_backup.sync("--snapshots-metadata") vm.start() snap_del_disks = libvirt_disk.make_external_disk_snapshots(vm, disk_target, snapshot_prefix, snapshot_take) scenarios = prepare_case_scenarios(snap_del_disks, base_file) libvirt_disk.cleanup_snapshots(vm, snap_del_disks) del snap_del_disks[:] loop_case_in_scenarios(scenarios) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Delete reuse external disk if exists for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) # Delete snapshot disk libvirt_disk.cleanup_snapshots(vm, snap_del_disks) # Clean up created folders for folder in [chr(letter) for letter in range(ord('a'), ord('a') + 4)]: rm_cmd = "rm -rf %s" % os.path.join(pre_set_root_dir, folder) process.run(rm_cmd, shell=True) # Remove ceph config file if created if disk_src_protocol == "rbd": libvirt_ceph_utils.create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=False) elif disk_src_protocol == 'iscsi' or 'iscsi_target' in locals(): libvirt.setup_or_cleanup_iscsi(is_setup=False) elif disk_src_protocol == 'pool': pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) rm_cmd = "rm -rf %s" % pool_target process.run(rm_cmd, shell=True) # Recover images xattr if having some dirty_images = libvirt_disk.get_images_with_xattr(vm) if dirty_images: libvirt_disk.clean_images_with_xattr(dirty_images) test.error("VM's image(s) having xattr left")
def run(test, params, env): """ Test command: virsh managedsave-xxx including virsh managedsave-edit virsh managedsave-dumpxml virsh managedsave-define ... """ vm_name = params.get('main_vm') checkpoint = params.get('checkpoint', '') error_msg = params.get('error_msg', '') ms_extra_options = params.get('ms_extra_options', '') pre_state = params.get('pre_state', '') status_error = 'yes' == params.get('status_error', 'no') vm = env.get_vm(vm_name) bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': False} def start_and_login_vm(): """ Start vm and login, after which vm is accessible """ vm.start() vm.wait_for_login().close() try: if checkpoint == 'dumpxml': # Check managedsave-dumpxml start_and_login_vm() virsh.managedsave(vm_name, **virsh_dargs) virsh.managedsave_dumpxml(vm_name, **virsh_dargs) tmp_dir = data_dir.get_tmp_dir() save_img_xml = os.path.join(tmp_dir, 'save_img.xml') managed_save_xml = os.path.join(tmp_dir, 'managed_save.xml') virsh.save_image_dumpxml(MANAGEDSAVE_FILE % vm_name, ' > %s' % save_img_xml, **virsh_dargs) virsh.managedsave_dumpxml(vm_name, ' > %s' % managed_save_xml, **virsh_dargs) result_need_check = process.run('diff %s %s' % (save_img_xml, managed_save_xml), shell=True, verbose=True) if checkpoint == 'secure_info': # Check managedsave-dumpxml with option --security-info vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vm_xml.VMXML.set_graphics_attr(vm_name, {'passwd': '123456'}) start_and_login_vm() virsh.managedsave(vm_name, **virsh_dargs) default_xml = virsh.managedsave_dumpxml(vm_name, **virsh_dargs).stdout_text if 'passwd' in default_xml: test.fail('Found "passwd" in dumped vm xml. ' 'Secure info like "passwd" should not be dumped.') secure_xml = virsh.managedsave_dumpxml(vm_name, '--security-info', **virsh_dargs).stdout_text if 'passwd' not in secure_xml: test.fail('Not found "passwd" in dumped vm xml.' 'Secure info like "passwd" should be dumped ' 'with option "--security-info"') if checkpoint == 'define': # Make change to a managedsave-dumped xml and redefine vm # and check if the change take effect start_option = '--paused' if pre_state == 'paused' else '' virsh.start(vm_name, start_option, **virsh_dargs) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug(vmxml.devices) disk = vmxml.get_devices('disk')[0] img_path = disk.source.attrs['file'] logging.info('Original image path: %s', img_path) # Copy old image to new image new_img_path = os.path.join(data_dir.get_tmp_dir(), 'test.img') shutil.copyfile(img_path, new_img_path) virsh.managedsave(vm_name, **virsh_dargs) xmlfile = os.path.join(data_dir.get_tmp_dir(), 'managedsave.xml') virsh.managedsave_dumpxml(vm_name, '>%s' % xmlfile, **virsh_dargs) # Make change to xmlfile and managedsave-define with it with open(xmlfile) as file_xml: updated_xml = file_xml.read().replace(img_path, new_img_path) with open(xmlfile, 'w') as file_xml: file_xml.write(updated_xml) virsh.managedsave_define(vm_name, xmlfile, ms_extra_options, **virsh_dargs) virsh.start(vm_name, **virsh_dargs) xml_after_define = virsh.dumpxml(vm_name, **virsh_dargs).stdout_text if 'test.img' not in xml_after_define: test.fail('Not found "test.img" in vm xml after managedsave-define.' 'Modification to xml did not take effect.') if checkpoint == 'no_save': # Start a guest but do not managedsave it start_and_login_vm() virsh.dom_list('--all --managed-save', **virsh_dargs) if checkpoint == 'rm_after_save': # Remove saved file after managedsave a vm start_and_login_vm() virsh.managedsave(vm_name, **virsh_dargs) os.remove(MANAGEDSAVE_FILE % vm_name) if checkpoint == 'not_saved_corrupt': # Do not managedsave a vm, but create a fake managedsaved file by # 'touch' a file start_and_login_vm() virsh.dom_list('--all --managed-save', **virsh_dargs) process.run('touch %s' % MANAGEDSAVE_FILE % vm_name, verbose=True) params['clean_managed_save'] = True if checkpoint == 'exclusive_option': virsh.managedsave(vm_name, **virsh_dargs) # Test managedsave-edit, managedsave_dumpxml, managedsave-define if params.get('check_cmd_error', '') == 'yes': ms_command = params.get('ms_command', '') if ms_command == 'edit': result_need_check = virsh.managedsave_edit(vm_name, ms_extra_options, debug=True) if ms_command == 'dumpxml': result_need_check = virsh.managedsave_dumpxml(vm_name, ms_extra_options, debug=True) if ms_command == 'define': result_need_check = virsh.managedsave_define(vm_name, bkxml.xml, ms_extra_options, debug=True) # If needs to check result, check it if 'result_need_check' in locals(): logging.info('Check command result.') libvirt.check_exit_status(result_need_check, status_error) if error_msg: libvirt.check_result(result_need_check, [error_msg]) finally: if params.get('clean_managed_save'): os.remove(MANAGEDSAVE_FILE % vm_name) utils_libvirtd.libvirtd_restart() virsh.managedsave_remove(vm_name, debug=True) bkxml.sync()
def check_ipt_rules(check_ipv4=True, check_ipv6=False): """ Check iptables for network/interface """ br_name = ast.literal_eval(net_bridge)["name"] net_forward = ast.literal_eval(params.get("net_forward", "{}")) net_ipv4 = params.get("net_ipv4") net_ipv6 = params.get("net_ipv6") ipt_rules = ("INPUT -i %s -p udp -m udp --dport 53 -j ACCEPT" % br_name, "INPUT -i %s -p tcp -m tcp --dport 53 -j ACCEPT" % br_name, "INPUT -i %s -p udp -m udp --dport 67 -j ACCEPT" % br_name, "INPUT -i %s -p tcp -m tcp --dport 67 -j ACCEPT" % br_name, "FORWARD -i {0} -o {0} -j ACCEPT".format(br_name), "FORWARD -o %s -j REJECT --reject-with icmp" % br_name, "FORWARD -i %s -j REJECT --reject-with icmp" % br_name, "OUTPUT -o %s -p udp -m udp --dport 68 -j ACCEPT" % br_name) net_dev_in = "" net_dev_out = "" if "dev" in net_forward: net_dev_in = " -i %s" % net_forward["dev"] net_dev_out = " -o %s" % net_forward["dev"] if check_ipv4: ipv4_rules = list(ipt_rules) ctr_rule = "" nat_rules = [] if "mode" in net_forward and net_forward["mode"] == "nat": nat_port = ast.literal_eval(params.get("nat_port")) p_start = nat_port["start"] p_end = nat_port["end"] ctr_rule = " -m .* RELATED,ESTABLISHED" nat_rules = [("POSTROUTING -s {0} ! -d {0} -p tcp -j MASQUERADE" " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)), ("POSTROUTING -s {0} ! -d {0} -p udp -j MASQUERADE" " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)), ("POSTROUTING -s {0} ! -d {0} -p udp" " -j MASQUERADE".format(net_ipv4))] if nat_rules: ipv4_rules.extend(nat_rules) if (net_ipv4 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("FORWARD -d %s%s -o %s%s -j ACCEPT" % (net_ipv4, net_dev_in, br_name, ctr_rule)), ("FORWARD -s %s -i %s%s -j ACCEPT" % (net_ipv4, br_name, net_dev_out))] ipv4_rules.extend(rules) output = to_text(process.system_output('iptables-save')) logging.debug("iptables: %s", output) if "mode" in net_forward and net_forward["mode"] == "open": if re.search(r"%s|%s" % (net_ipv4, br_name), output, re.M): test.fail("Find iptable rule for open mode") utils_libvirtd.libvirtd_restart() output_again = to_text(process.system_output('iptables-save')) if re.search(r"%s|%s" % (net_ipv4, br_name), output_again, re.M): test.fail("Find iptable rule for open mode after restart " "libvirtd") else: logging.info("Can't find iptable rule for open mode as expected") else: for ipt in ipv4_rules: if not re.search(r"%s" % ipt, output, re.M): test.fail("Can't find iptable rule:\n%s" % ipt) return ipv4_rules if check_ipv6: ipv6_rules = list(ipt_rules) if (net_ipv6 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("FORWARD -d %s%s -o %s -j ACCEPT" % (net_ipv6, net_dev_in, br_name)), ("FORWARD -s %s -i %s%s -j ACCEPT" % (net_ipv6, br_name, net_dev_out))] ipv6_rules.extend(rules) output = to_text(process.system_output("ip6tables-save")) logging.debug("iptables: %s", output) for ipt in ipv6_rules: if not output.count(ipt): test.fail("Can't find ipbtable rule:\n%s" % ipt) return ipv6_rules
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Check pool 'Capacity', 'Allocation' and 'Available' Create a over size vol in pool(expect fail), then check these values (21) Undefine the pool, and this should fail as pool is still active (22) Destroy the pool (23) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (24) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) ip_protocal = params.get('ip_protocal', 'ipv4') if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": raise error.TestNAError("Gluster pool is not supported in current" " libvirt version.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) utlv.check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if vol_name in item[0]: found = True break if found: logging.debug("Find volume '%s' in pool '%s'.", vol_name, pool_name) else: raise error.TestFail("Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def is_in_range(actual, expected, error_percent): deviation = 100 - (100 * (float(actual) / float(expected))) logging.debug("Deviation: %0.2f%%", float(deviation)) return float(deviation) <= float(error_percent) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: raise error.TestFail("Pool info dictionary is needed.") val_tup = ('Capacity', 'Allocation', 'Available') if check_point in val_tup and float(value.split()[0]): # As from bytes to GiB, could cause deviation, and it should not # exceed 1 percent. if is_in_range(float(pool_info[check_point].split()[0]), float(value.split()[0]), 1): logging.debug("Pool '%s' is '%s'.", check_point, value) else: raise error.TestFail("Pool '%s' isn't '%s'." % (check_point, value)) else: if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: raise error.TestFail("Pool '%s' isn't '%s'." % (check_point, value)) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Run Testcase pvt = utlv.PoolVolumeTest(test, params) emulated_image = "emulated-image" kwargs = { 'image_size': '1G', 'pre_disk_vol': ['1M'], 'source_name': source_name, 'source_path': source_path, 'source_format': source_format, 'persistent': True, 'ip_protocal': ip_protocal } try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (4) # Undefine pool result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(pool_xml) utlv.check_exit_status(result, status_error) # Step (6) # Buid pool, this step may fail for 'disk' and 'logical' types pool if pool_type not in ["disk", "logical"]: option = "" # Options --overwrite and --no-overwrite can only be used to # build a filesystem pool, but it will fail for now # if pool_type == "fs": # option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) utlv.check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # When libvirtd starts up, it'll check to see if any of the storage # pools have been activated externally. If so, then it'll mark the # pool as active. This is independent of autostart. # So a directory based storage pool is thus pretty much always active, # and so as the SCSI pool. if pool_type not in ["dir", 'scsi']: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result) check_vol_list(vol_name, pool_name) # Step (20) # Create an over size vol in pool(expect fail), then check pool: # 'Capacity', 'Allocation' and 'Available' # For NFS type pool, there's a bug(BZ#1077068) about allocate volume, # and glusterfs pool not support create volume, so not test them if pool_type != "netfs": vol_capacity = "10000G" vol_allocation = "10000G" result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw") utlv.check_exit_status(result, True) new_info = _pool.pool_info(pool_name) check_pool_info(pool_info, "Capacity", new_info['Capacity']) check_pool_info(pool_info, "Allocation", new_info['Allocation']) check_pool_info(pool_info, "Available", new_info['Available']) # Step (21) # Undefine pool, this should fail as the pool is active result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, expect_error=True) check_pool_list(pool_name, "", False) # Step (22) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (23) # Pool delete for 'dir' type pool if pool_type == "dir": for f in os.listdir(pool_target): os.remove(os.path.join(pool_target, f)) result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, True) # Step (24) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) except error.TestFail, detail: logging.error(str(detail)) if multipathd_status: multipathd.start() if os.path.exists(pool_xml): os.remove(pool_xml)
def run(test, params, env): """ Test emulatorpin tuning 1) Positive testing 1.1) get the current emulatorpin parameters for a running/shutoff guest 1.2) set the current emulatorpin parameters for a running/shutoff guest 2) Negative testing 2.1) get emulatorpin parameters for a running/shutoff guest 2.2) set emulatorpin parameters running/shutoff guest """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) cgconfig = params.get("cgconfig", "on") cpulist = params.get("emulatorpin_cpulist") status_error = params.get("status_error", "no") change_parameters = params.get("change_parameters", "no") # Backup original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) test_dicts = dict(params) test_dicts['vm'] = vm host_cpus = int(open('/proc/cpuinfo').read().count('processor')) test_dicts['host_cpus'] = host_cpus cpu_list = None if cpulist: cpu_list = cpus_parser(cpulist) test_dicts['cpu_list'] = cpu_list logging.debug("CPU list is %s", cpu_list) # If the physical CPU N doesn't exist, it's an expected error if cpu_list and max(cpu_list) > host_cpus - 1: test_dicts["status_error"] = "yes" cg = utils_cgroup.CgconfigService() if cgconfig == "off": if cg.cgconfig_is_running(): cg.cgconfig_stop() # positive and negative testing ######### try: if status_error == "no": if change_parameters == "no": get_emulatorpin_parameter(test_dicts) else: set_emulatorpin_parameter(test_dicts) if status_error == "yes": if change_parameters == "no": get_emulatorpin_parameter(test_dicts) else: set_emulatorpin_parameter(test_dicts) finally: # Recover cgconfig and libvirtd service if not cg.cgconfig_is_running(): cg.cgconfig_start() utils_libvirtd.libvirtd_restart() # Recover vm. vmxml_backup.sync()
def run_virsh_numatune(test, params, env): """ Test numa tuning 1) Positive testing 1.1) get the current numa parameters for a running/shutoff guest 1.2) set the current numa parameters for a running/shutoff guest 1.2.1) set valid 'mode' parameters 1.2.2) set valid 'nodeset' parameters 2) Negative testing 2.1) get numa parameters 2.1.1) invalid options 2.1.2) stop cgroup service 2.2) set numa parameters 2.2.1) invalid 'mode' parameters 2.2.2) invalid 'nodeset' parameters 2.2.3) change 'mode' for a running guest and 'mode' is not 'strict' 2.2.4) change 'nodeset' for running guest with mode of 'interleave' 'interleave' or 'preferred' numa mode 2.2.5) stop cgroup service """ # Run test case vm_name = params.get("vms") vm = env.get_vm(vm_name) original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) cgconfig_service = utils_cgroup.CgconfigService() status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") cgconfig = params.get("cgconfig", "on") start_vm = params.get("start_vm", "no") change_parameters = params.get("change_parameters", "no") # Make sure vm is down if start not requested if start_vm == "no" and vm.is_alive(): vm.destroy() # positive and negative testing ######### try: if status_error == "no": if change_parameters == "no": get_numa_parameter(params) else: set_numa_parameter(params) if cgconfig == "off": # Need to shutdown a running guest before stopping cgconfig service # and will start the guest after restarting libvirtd service if vm.is_alive(): vm.destroy() if cgconfig_service.cgconfig_is_running(): cgconfig_service.cgconfig_stop() # Refresh libvirtd service to get latest cgconfig service change if libvirtd == "restart": utils_libvirtd.libvirtd_restart() # Recover previous running guest if (cgconfig == "off" and libvirtd == "restart" and not vm.is_alive() and start_vm == "yes"): vm.start() if status_error == "yes": if change_parameters == "no": get_numa_parameter(params) else: set_numa_parameter(params) # Recover cgconfig and libvirtd service if not cgconfig_service.cgconfig_is_running(): cgconfig_service.cgconfig_start() utils_libvirtd.libvirtd_restart() finally: # Restore guest original_vm_xml.sync()
device_source_path = "" if source_path: device_source_path = test.virtdir # Prepare test environment. qemu_conf_bak = None if test_disks_format: qemu_conf = "/etc/libvirt/qemu.conf" qemu_conf_bak = os.path.join(test.tmpdir, "qemu.conf.bak") shutil.copy(qemu_conf, qemu_conf_bak) cmd = ("sed -i '/^allow_disk_format_probing/d' %s;" " echo 'allow_disk_format_probing = 1' >> %s" % (qemu_conf, qemu_conf)) if utils.run(cmd, ignore_status=True).exit_status: raise error.TestNAError("Enable disk format probing failed") utils_libvirtd.libvirtd_restart() # Create virtual device file. disks = [] try: for i in range(len(device_source_names)): if test_disk_type_dir: # If we testing disk type dir option, # it needn't to create disk image disks.append({ "format": "dir", "source": device_source_names[i] }) else: path = "%s/%s.%s" % (device_source_path, device_source_names[i], device_formats[i])
def run(test, params, env): """ Test numa tuning 1) Positive testing 1.1) get the current numa parameters for a running/shutoff guest 1.2) set the current numa parameters for a running/shutoff guest 1.2.1) set valid 'mode' parameters 1.2.2) set valid 'nodeset' parameters 2) Negative testing 2.1) get numa parameters 2.1.1) invalid options 2.1.2) stop cgroup service 2.2) set numa parameters 2.2.1) invalid 'mode' parameters 2.2.2) invalid 'nodeset' parameters 2.2.3) change 'mode' for a running guest and 'mode' is not 'strict' 2.2.4) change 'nodeset' for running guest with mode of 'interleave' 'interleave' or 'preferred' numa mode 2.2.5) stop cgroup service """ try: utils_misc.find_command("numactl") except ValueError: raise error.TestNAError("Command 'numactl' is missing. You must " "install it.") # Run test case #vm_name = params.get("vms") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) original_vm_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) cg = utils_cgroup.CgconfigService() status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") cgconfig = params.get("cgconfig", "on") start_vm = params.get("start_vm", "no") change_parameters = params.get("change_parameters", "no") # Make sure vm is down if start not requested if start_vm == "no" and vm.is_alive(): vm.destroy() # positive and negative testing ######### cgstop = False try: if status_error == "no": if change_parameters == "no": get_numa_parameter(params, cgstop) else: set_numa_parameter(params, cgstop) if cgconfig == "off": # If running, then need to shutdown a running guest before # stopping cgconfig service and will start the guest after # restarting libvirtd service if cg.cgconfig_is_running(): if vm.is_alive(): vm.destroy() cg.cgconfig_stop() cgstop = True # If we stopped cg, then refresh libvirtd service # to get latest cgconfig service change; otherwise, # if no cg change restart of libvirtd is pointless if cgstop and libvirtd == "restart": try: utils_libvirtd.libvirtd_restart() finally: # Not running is not a good thing, but it does happen # and it will affect other tests if not utils_libvirtd.libvirtd_is_running(): raise error.TestNAError("libvirt service is not running!") # Recover previous running guest if (cgconfig == "off" and libvirtd == "restart" and not vm.is_alive() and start_vm == "yes"): vm.start() if status_error == "yes": if change_parameters == "no": get_numa_parameter(params, cgstop) else: set_numa_parameter(params, cgstop) finally: # Restore guest original_vm_xml.sync() # If we stopped cg, then recover and refresh libvirtd to recognize if cgstop: cg.cgconfig_start() utils_libvirtd.libvirtd_restart()
def run(test, params, env): """ Test interface devices update """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) new_network_name = params.get("net_name") expect_error = "yes" == params.get("status_error", "no") expect_err_msg = params.get("expect_err_msg") iface_driver = params.get("iface_driver") iface_model = params.get("iface_model") iface_mtu = params.get("iface_mtu") iface_rom = params.get("iface_rom") new_iface_driver = params.get("new_iface_driver") new_iface_driver_host = params.get("new_iface_driver_host") new_iface_driver_guest = params.get("new_iface_driver_guest") new_iface_model = params.get("new_iface_model") new_iface_rom = params.get("new_iface_rom") new_iface_inbound = params.get("new_iface_inbound") new_iface_outbound = params.get("new_iface_outbound") new_iface_link = params.get("new_iface_link") new_iface_source = params.get("new_iface_source") new_iface_target = params.get("new_iface_target") new_iface_addr = params.get("new_iface_addr") new_iface_filter = params.get("new_iface_filter") new_iface_mtu = params.get("new_iface_mtu") new_iface_type = params.get("new_iface_type") create_new_net = "yes" == params.get("create_new_net") new_iface_alias = params.get("new_iface_alias") cold_update = "yes" == params.get("cold_update", "no") del_addr = "yes" == params.get("del_address") del_rom = "yes" == params.get("del_rom") # Backup the vm xml for recover at last vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # According to the different os find different file for rom if (iface_rom and "file" in eval(iface_rom) and "%s" in eval(iface_rom)['file']): if os.path.exists(eval(iface_rom)['file'] % "pxe"): iface_rom = iface_rom % "pxe" elif os.path.exists(eval(iface_rom)['file'] % "efi"): iface_rom = iface_rom % "efi" else: logging.error("Can not find suitable rom file") iface_dict_bef = {} iface_dict_aft = {} names = locals() # Collect need update items in 2 dicts for both start vm before and after update_list_bef = [ "driver", "model", "mtu", "rom" ] for update_item_bef in update_list_bef: if names['iface_'+update_item_bef]: iface_dict_bef.update({update_item_bef: names['iface_'+update_item_bef]}) update_list_aft = [ "driver", "driver_host", "driver_guest", "model", "rom", "inbound", "outbound", "link", "source", "target", "addr", "filter", "mtu", "type", "alias"] for update_item_aft in update_list_aft: if names["new_iface_"+update_item_aft]: iface_dict_aft.update({update_item_aft: names["new_iface_"+update_item_aft]}) logging.info("iface_dict_bef is %s, iface_dict_aft is %s", iface_dict_bef, iface_dict_aft) del_list = ["del_addr", "del_rom"] for del_item in del_list: if names[del_item]: iface_dict_aft.update({del_item: "True"}) # Update vm interface with items in iface_dict_bef and start it if iface_dict_bef: libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict_bef) logging.info("vm xml is %s", vm.get_xml()) if not cold_update: vm.start() if iface_mtu: # Do check for mtu size after start vm target_dev = libvirt.get_interface_details(vm_name)[0]['interface'] cmd = "ip link show %s | grep 'mtu %s'" % (target_dev, eval(iface_mtu)['size']) def check_mtu(): """ Check the mtu setting take effect for interface """ ret = process.run(cmd, ignore_status=True, shell=True) if ret.exit_status: test.fail("Can not find mtu setting in cmd result") check_mtu() utils_libvirtd.libvirtd_restart() check_mtu() # Create new network if need if create_new_net: new_net_xml = libvirt.create_net_xml(new_network_name, params) new_net_xml.sync() # Do update for iface_driver new_iface_xml = libvirt.modify_vm_iface(vm_name, "get_xml", iface_dict_aft) ret = virsh.update_device(vm_name, new_iface_xml, ignore_status=True, debug=True) libvirt.check_exit_status(ret, expect_error) if expect_error: real_err_msg = ret.stderr.strip() if not re.search(expect_err_msg, real_err_msg, re.IGNORECASE): test.fail("The real error msg:'%s' does not match expect one:" '%s' % (real_err_msg, expect_err_msg)) else: logging.info("Get expect result: %s", real_err_msg) else: if new_iface_inbound: iface_bandwidth = {} iface_bandwidth = vm_xml.VMXML.get_iftune_params(vm_name) for bound_para in ["inbound", "outbound"]: for tune_para in ["average", "peak", "burst"]: get_value = iface_bandwidth.get(bound_para).get(tune_para) expect_value = eval(names["new_iface_"+bound_para]).get(tune_para) logging.info("Get value for %s:%s is %s, expect is %s", bound_para, tune_para, get_value, expect_value) if get_value != expect_value: test.fail("Get value is not equal to expect") vmxml_aft = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_aft = list(vmxml_aft.get_iface_all().values())[0] if new_iface_link: iface_link_value = iface_aft.find('link').get('state') if iface_link_value == new_iface_link: logging.info("Find link state is %s in xml", new_iface_link) # Checking the statue in guest mac_addr = iface_aft.find('mac').get('address') state_map = "%s.*\n.*%s" % (iface_link_value.upper(), mac_addr) session = vm.wait_for_serial_login() logging.info("ip link output:%s", session.cmd_output("ip link")) if_name = utils_net.get_net_if(runner=session.cmd_output, state=state_map)[0] session.close() if if_name: logging.info("Find iface state %s for %s", iface_link_value, mac_addr) else: test.fail("Can not find iface with mac %s and state %s" % (mac_addr, iface_link_value)) else: test.fail("Check fail to get link state, expect %s, but get %s" % (iface_link_value, new_iface_link)) if create_new_net and new_iface_source: iface_source_value = iface_aft.find('source').get('network') if iface_source_value == eval(new_iface_source)['network']: logging.info("Get %s in xml as set", iface_source_value) else: test.fail("Get source %s is not equal to set %s" % (iface_source_value, new_iface_source)) if new_iface_filter: iface_filter_value = iface_aft.find('filterref').get('filter') if iface_filter_value == new_iface_filter: logging.info("Get %s in xml as set", iface_filter_value) else: test.fail("Get filter %s is not equal to set %s" % (iface_filter_value, new_iface_filter)) if new_iface_alias: iface_alias_value = iface_aft.find('alias').get('name') if iface_alias_value == eval(new_iface_alias)['name']: logging.info("Get %s in xml as set", iface_alias_value) else: test.fail("Get alias %s is not equal to set %s" % (iface_alias_value, new_iface_alias)) finally: vmxml_backup.sync() if create_new_net: new_net_xml.undefine()
def check_ipt_rules(check_ipv4=True, check_ipv6=False): """ Check iptables for network/interface """ br_name = ast.literal_eval(net_bridge)["name"] net_forward = ast.literal_eval(params.get("net_forward", "{}")) net_ipv4 = params.get("net_ipv4") net_ipv6 = params.get("net_ipv6") net_dev_in = "" net_dev_out = "" if "dev" in net_forward: net_dev_in = " -i %s" % net_forward["dev"] net_dev_out = " -o %s" % net_forward["dev"] ipt_rules = ( "INPUT -i %s -p udp -m udp --dport 53 -j ACCEPT" % br_name, "INPUT -i %s -p tcp -m tcp --dport 53 -j ACCEPT" % br_name, "FORWARD -i {0} -o {0} -j ACCEPT".format(br_name), "FORWARD -o %s -j REJECT --reject-with icmp" % br_name, "FORWARD -i %s -j REJECT --reject-with icmp" % br_name) if check_ipv4: ipv4_rules = list(ipt_rules) ipv4_rules.extend( ["INPUT -i %s -p udp -m udp --dport 67 -j ACCEPT" % br_name, "INPUT -i %s -p tcp -m tcp --dport 67 -j ACCEPT" % br_name, "OUTPUT -o %s -p udp -m udp --dport 68 -j ACCEPT" % br_name, "POSTROUTING -o %s -p udp -m udp --dport 68 " "-j CHECKSUM --checksum-fill" % br_name]) ctr_rule = "" nat_rules = [] if "mode" in net_forward and net_forward["mode"] == "nat": nat_port = ast.literal_eval(params.get("nat_port")) p_start = nat_port["start"] p_end = nat_port["end"] ctr_rule = " -m .* RELATED,ESTABLISHED" nat_rules = [("POSTROUTING -s {0} ! -d {0} -p tcp -j MASQUERADE" " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)), ("POSTROUTING -s {0} ! -d {0} -p udp -j MASQUERADE" " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)), ("POSTROUTING -s {0} ! -d {0}" " -j MASQUERADE".format(net_ipv4))] if nat_rules: ipv4_rules.extend(nat_rules) if (net_ipv4 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("FORWARD -d %s%s -o %s%s -j ACCEPT" % (net_ipv4, net_dev_in, br_name, ctr_rule)), ("FORWARD -s %s -i %s%s -j ACCEPT" % (net_ipv4, br_name, net_dev_out))] ipv4_rules.extend(rules) output = to_text(process.system_output('iptables-save')) logging.debug("iptables: %s", output) if "mode" in net_forward and net_forward["mode"] == "open": if re.search(r"%s|%s" % (net_ipv4, br_name), output, re.M): test.fail("Find iptable rule for open mode") utils_libvirtd.libvirtd_restart() output_again = to_text(process.system_output('iptables-save')) if re.search(r"%s|%s" % (net_ipv4, br_name), output_again, re.M): test.fail("Find iptable rule for open mode after restart " "libvirtd") else: logging.info("Can't find iptable rule for open mode as expected") else: for ipt in ipv4_rules: if not re.search(r"%s" % ipt, output, re.M): test.fail("Can't find iptable rule:\n%s" % ipt) return ipv4_rules if check_ipv6: ipv6_rules = list(ipt_rules) ipt6_rules.extend([ ("INPUT -i %s -p udp -m udp --dport 547 -j ACCEPT" % br_name)]) if (net_ipv6 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("FORWARD -d %s%s -o %s -j ACCEPT" % (net_ipv6, net_dev_in, br_name)), ("FORWARD -s %s -i %s%s -j ACCEPT" % (net_ipv6, br_name, net_dev_out))] ipv6_rules.extend(rules) output = to_text(process.system_output("ip6tables-save")) logging.debug("ip6tables: %s", output) if "mode" in net_forward and net_forward["mode"] == "open": if re.search(r"%s|%s" % (net_ipv6, br_name), output, re.M): test.fail("Find ip6table rule for open mode") utils_libvirtd.libvirtd_restart() output_again = to_text(process.system_output('ip6tables-save')) if re.search(r"%s|%s" % (net_ipv6, br_name), output_again, re.M): test.fail("Find ip6table rule for open mode after restart " "libvirtd") else: for ipt in ipv6_rules: if not re.search(r"%s" % ipt, output, re.M): test.fail("Can't find ip6table rule:\n%s" % ipt) return ipv6_rules
vm.destroy(gracefully=False) # Back up xml file. vm_xml_file = os.path.join(test.tmpdir, "vm.xml") virsh.dumpxml(vm_name, extra="--inactive", to_file=vm_xml_file) # Get device path. device_source_path = "" if source_path: device_source_path = test.virtdir # Prepare test environment. qemu_config = LibvirtQemuConfig() if test_disks_format: qemu_config.allow_disk_format_probing = True utils_libvirtd.libvirtd_restart() # Create virtual device file. disks = [] try: for i in range(len(device_source_names)): if test_disk_type_dir: # If we testing disk type dir option, # it needn't to create disk image disks.append({"format": "dir", "source": device_source_names[i]}) else: path = "%s/%s.%s" % (device_source_path, device_source_names[i], device_formats[i]) disk = prepare_disk(path, device_formats[i]) if disk:
def run(test, params, env): """ Test hpt resizing """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) status_error = 'yes' == params.get('status_error', 'no') error_msg = eval(params.get('error_msg', '[]')) hpt_attrs = eval(params.get('hpt_attrs', '{}')) hpt_order_path = params.get('hpt_order_path', '') cpu_attrs = eval(params.get('cpu_attrs', '{}')) numa_cell = eval(params.get('numa_cell', '{}')) hugepage = 'yes' == params.get('hugepage', 'no') maxpagesize = int(params.get('maxpagesize', 0)) check_hp = 'yes' == params.get('check_hp', 'no') qemu_check = params.get('qemu_check', '') skip_p8 = 'yes' == params.get('skip_p8', 'no') def set_hpt(vmxml, sync, **attrs): """ Set resizing value to vm xml :param vmxml: xml of vm to be manipulated :param sync: whether to sync vmxml after :param attrs: attrs to set to hpt xml """ if vmxml.xmltreefile.find('/features'): features_xml = vmxml.features else: features_xml = vm_xml.VMFeaturesXML() hpt_xml = vm_xml.VMFeaturesHptXML() for attr in attrs: setattr(hpt_xml, attr, attrs[attr]) features_xml.hpt = hpt_xml vmxml.features = features_xml logging.debug(vmxml) if sync: vmxml.sync() def set_cpu(vmxml, **attrs): """ Set cpu attrs for vmxml according to given attrs :param vmxml: xml of vm to be manipulated :param attrs: attrs to set to cpu xml """ if vmxml.xmltreefile.find('cpu'): cpu = vmxml.cpu else: cpu = vm_xml.VMCPUXML() if 'numa_cell' in attrs: cpu.xmltreefile.create_by_xpath('/numa') cpu.numa_cell = attrs['numa_cell'] for key in attrs: setattr(cpu, key, attrs[key]) vmxml.cpu = cpu vmxml.sync() def set_memory(vmxml): """ Set memory attributes in vm xml """ vmxml.max_mem_rt = int(params.get('max_mem_rt', 30670848)) vmxml.max_mem_rt_slots = int(params.get('max_mem_rt_slots', 16)) vmxml.max_mem_rt_unit = params.get('max_mem_rt_unit', 'KiB') logging.debug(numa_cell) if numa_cell: # Remove cpu topology to avoid that it doesn't match vcpu count if vmxml.get_cpu_topology(): new_cpu = vmxml.cpu new_cpu.del_topology() vmxml.cpu = new_cpu vmxml.vcpu = max([int(cell['cpus'][-1]) for cell in numa_cell]) + 1 vmxml.sync() def check_hpt_order(session, resizing=''): """ Return htp order in hpt_order file by default If 'resizing' is disabled, test updating htp_order """ if not hpt_order_path: test.cancel('No hpt order path provided.') hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) if resizing == 'disabled': cmd_result = session.cmd_status_output( 'echo %d > %s' % (hpt_order + 1, hpt_order_path)) result = process.CmdResult(stderr=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result, True) libvirt.check_result(result, error_msg) return hpt_order def check_hp_in_vm(session, page_size): """ Check if hugepage size is correct inside vm :param session: the session of the running vm :param page_size: the expected pagesize to be checked inside vm """ expect = False if int(page_size) == 65536 else True meminfo = session.cmd_output('cat /proc/meminfo|grep Huge') logging.info('meminfo: \n%s', meminfo) pattern = 'Hugepagesize:\s+%d\s+kB' % int(page_size / 1024) logging.info('"%s" should %s be found in meminfo output', pattern, '' if expect else 'not') result = expect == bool(re.search(pattern, meminfo)) if not result: test.fail('meminfo output not meet expectation') # Check PAGE_SIZE in another way if not expect: conf_page_size = session.cmd_output('getconf PAGE_SIZE') logging.debug('Output of "getconf PAGE_SIZE": %s', conf_page_size) if int(conf_page_size) != int(page_size): test.fail( 'PAGE_SIZE not correct, should be %r, actually is %r' % (page_size, conf_page_size)) bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: arch = platform.machine() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) resizing = hpt_attrs.get('resizing') # Test on ppc64le hosts if arch.lower() == 'ppc64le': cpu_arch = cpu.get_cpu_arch() logging.debug('cpu_arch is: %s', cpu_arch) if skip_p8 and cpu_arch == 'power8': test.cancel('This case is not for POWER8') if maxpagesize and not utils_misc.compare_qemu_version(3, 1, 0): test.cancel('Qemu version is too low, ' 'does not support maxpagesize setting') if maxpagesize == 16384 and cpu_arch == 'power9': test.cancel('Power9 does not support 16M pagesize.') set_hpt(vmxml, True, **hpt_attrs) if cpu_attrs or numa_cell: if numa_cell: cpu_attrs['numa_cell'] = numa_cell set_cpu(vmxml, **cpu_attrs) if hugepage: vm_mem = vmxml.max_mem host_hp_size = utils_memory.get_huge_page_size() # Make 100m extra memory just to be safe hp_count = max((vm_mem + 102400) // host_hp_size, 1200) vm_xml.VMXML.set_memoryBacking_tag(vm_name, hpgs=True) # Set up hugepage env mnt_source, hp_path, fstype = 'hugetlbfs', '/dev/hugepages', 'hugetlbfs' if not os.path.isdir(hp_path): process.run('mkdir %s' % hp_path, verbose=True) utils_memory.set_num_huge_pages(hp_count) if utils_misc.is_mounted(mnt_source, hp_path, fstype, verbose=True): utils_misc.umount(mnt_source, hp_path, fstype, verbose=True) utils_misc.mount(mnt_source, hp_path, fstype, verbose=True) # Restart libvirtd service to make sure mounted hugepage # be recognized utils_libvirtd.libvirtd_restart() if resizing == 'enabled': set_memory(vmxml) logging.debug('vmxml: \n%s', vmxml) # Start vm and check if start succeeds result = virsh.start(vm_name, debug=True) libvirt.check_exit_status(result, expect_error=status_error) # if vm is not suposed to start, terminate test if status_error: libvirt.check_result(result, error_msg) return libvirt.check_qemu_cmd_line(qemu_check) session = vm.wait_for_login() hpt_order = check_hpt_order(session, resizing) # Check hugepage inside vm if check_hp: check_hp_in_vm(session, maxpagesize * 1024) if resizing == 'enabled': mem_xml = utils_hotplug.create_mem_xml( tg_size=int(params.get('mem_size', 2048000)), tg_sizeunit=params.get('size_unit', 'KiB'), tg_node=int(params.get('mem_node', 0)), mem_model=params.get('mem_model', 'dimm')) logging.debug(mem_xml) # Attach memory device to the guest for 12 times # that will reach the maxinum memory limitation for i in range(12): virsh.attach_device(vm_name, mem_xml.xml, debug=True, ignore_status=False) xml_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(xml_after_attach) # Check dumpxml of the guest, # check if each device has its alias for i in range(12): pattern = "alias\s+name=[\'\"]dimm%d[\'\"]" % i logging.debug('Searching for %s', pattern) if not re.search(pattern, str( xml_after_attach.xmltreefile)): test.fail('Missing memory alias: %s' % pattern) # Test on non-ppc64le hosts else: set_hpt(vmxml, sync=False, **hpt_attrs) result = virsh.define(vmxml.xml) libvirt.check_exit_status(result, status_error) libvirt.check_result(result, error_msg) finally: bk_xml.sync() if hugepage: utils_misc.umount('hugetlbfs', '/dev/hugepages', 'hugetlbfs') utils_memory.set_num_huge_pages(0)
def run(test, params, env): """ Test virsh domdisplay command, return the graphic url This test covered vnc and spice type, also readonly and readwrite mode If have --include-passwd option, also need to check passwd list in result """ if not virsh.has_help_command('domdisplay'): raise error.TestNAError("This version of libvirt doesn't support " "domdisplay test") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("domdisplay_options", "") graphic = params.get("domdisplay_graphic", "vnc") readonly = ("yes" == params.get("readonly", "no")) passwd = params.get("domdisplay_passwd") is_ssl = ("yes" == params.get("domdisplay_ssl", "no")) is_domid = ("yes" == params.get("domdisplay_domid", "no")) is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no")) qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_file = os.path.join(test.tmpdir, "qemu.conf.bk") def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf f_obj = open(qemu_conf, "r") cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content f_obj = open(qemu_conf, "w") f_obj.write(left_cont) f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") f_obj.close() # make modification effect utils_libvirtd.libvirtd_restart() # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd) try: graphic_count = len(vmxml_backup.get_graphics_devices()) if is_ssl: # Do backup for qemu.conf in tmp_file shutil.copyfile(qemu_conf, tmp_file) prepare_ssl_env() if graphic_count: Graphics.del_graphic(vm_name) Graphics.add_graphic(vm_name, passwd, "spice", True) else: if not graphic_count: Graphics.add_graphic(vm_name, passwd, graphic) # Only change graphic type and passwd Graphics.change_graphic_type_passwd(vm_name, graphic, passwd) vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() dom_id = virsh.domid(vm_name).stdout.strip() dom_uuid = virsh.domuuid(vm_name).stdout.strip() if is_domid: vm_name = dom_id if is_domuuid: vm_name = dom_uuid # Do test result = virsh.domdisplay(vm_name, options, readonly=readonly, debug=True) logging.debug("result is %s", result) if result.exit_status: if not status_error: raise error.TestFail("Fail to get domain display info. Error:" "%s." % result.stderr.strip()) else: logging.info("Get domain display info failed as expected. " "Error:%s.", result.stderr.strip()) return elif status_error: raise error.TestFail("Expect fail, but succeed indeed!") output = result.stdout.strip() # Different result depends on the domain xml listen address if output.find("localhost:") >= 0: expect_addr = "localhost" else: expect_addr = "127.0.0.1" # Get active domain xml info vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info") logging.debug("xml is %s", vmxml_act.get_xmltreefile()) graphic_act = vmxml_act.devices.by_device_tag('graphics')[0] port = graphic_act.port # Do judgement for result if graphic == "vnc": expect = "vnc://%s:%s" % (expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: tlsport = graphic_act.tlsPort expect = "spice://%s:%s?tls-port=%s" % \ (expect_addr, port, tlsport) elif graphic == "spice": expect = "spice://%s:%s" % (expect_addr, port) if options != "" and passwd is not None: # have --include-passwd and have passwd in xml if graphic == "vnc": expect = "vnc://:%s@%s:%s" % \ (passwd, expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: expect = expect + "&password="******"spice": expect = expect + "?password="******"Get correct display:%s", output) else: raise error.TestFail("Expect %s, but get %s" % (expect, output)) finally: # Domain xml recovery vmxml_backup.sync() if is_ssl: # qemu.conf recovery shutil.move(tmp_file, qemu_conf) utils_libvirtd.libvirtd_restart()
def run_virsh_dump(test, params, env): """ Test command: virsh dump. This command can dump the core of a domain to a file for analysis. 1. Positive testing 1.1 Dump domain with valid options. 1.2 Avoid file system cache when dumping. 1.3 Compress the dump images to valid/invalid formats. 2. Negative testing 2.1 Dump domain to a non-exist directory. 2.2 Dump domain with invalid option. 2.3 Dump a shut-off domain. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(params["main_vm"]) options = params.get("dump_options") dump_file = params.get("dump_file", "vm.core") if os.path.dirname(dump_file) is "": dump_file = os.path.join(test.tmpdir, dump_file) dump_image_format = params.get("dump_image_format") start_vm = params.get("start_vm") status_error = params.get("status_error", "no") qemu_conf = "/etc/libvirt/qemu.conf" # prepare the vm state if vm.is_alive() and start_vm == "no": vm.destroy() if vm.is_dead() and start_vm == "yes": vm.start() def check_domstate(actual, options): """ Check the domain status according to dump options. """ if options.find('live') >= 0: domstate = "running" if options.find('crash') >= 0 or options.find('reset') >0: domstate = "running" elif options.find('crash') >=0: domstate = "shut off" if options.find('reset') >= 0: domstate = "running" elif options.find('reset') >= 0: domstate = "running" else: domstate = "running" if start_vm == "no": domstate = "shut off" logging.debug("Domain should %s after run dump %s", domstate, options) if domstate == actual: return True else: return False def check_dump_format(dump_image_format, dump_file): """ Check the format of dumped file. If 'dump_image_format' is not specified or invalid in qemu.conf, then the file shoule be normal raw file, otherwise it shoud be compress to specified format, the supported compress format including: lzop, gzip, bzip2, and xz. """ valid_format = ["lzop", "gzip", "bzip2", "xz"] if len(dump_image_format) == 0 or dump_image_format not in valid_format: logging.debug("No need check the dumped file format") return True else: file_cmd = "file %s" % dump_file (status, output) = commands.getstatusoutput(file_cmd) if status == 0: logging.debug("Run file %s output: %s", dump_file, output) actual_format = output.split(" ")[1] if actual_format == dump_image_format: if dump_image_format in valid_format: logging.info("Compress dumped file to %s successfully", dump_image_format) return True else: logging.error("Compress dumped file to %s fail", dump_image_format) return False else: logging.error("Fail to check dumped file %s", dump_file) return False # Configure dump_image_format in /etc/libvirt/qemu.conf. if len(dump_image_format) != 0: conf_cmd = ("echo dump_image_format = \\\"%s\\\" >> %s" % (dump_image_format, qemu_conf)) if os.system(conf_cmd): logging.error("Config dump_image_format to %s fail", dump_image_format) utils_libvirtd.libvirtd_restart() # Deal with bypass-cache option if options.find('bypass-cache') >= 0: thread.start_new_thread(check_bypass,(dump_file,)) # Guarantee check_bypass function has run before dump time.sleep(5) # Run virsh command cmd_result = virsh.dump(vm_name, dump_file, options, ignore_status=True, debug=True) status = cmd_result.exit_status # Check libvirtd status if utils_libvirtd.libvirtd_is_running(): if check_domstate(vm.state(), options): if status_error == "yes": if status == 0: raise error.TestFail("Expect fail, but run successfully") if status_error == "no": if status != 0: raise error.TestFail("Expect succeed, but run fail") else: if os.path.exists(dump_file): if check_dump_format(dump_image_format, dump_file): logging.info("Successfully dump domain to %s", dump_file) else: raise error.TestFail("The format of dumped file " "is wrong.") else: raise error.TestFail("Fail to find domain dumped file.") else: raise error.TestFail("Domain status check fail.") else: raise error.TestFail("Libvirtd service is dead.") if os.path.isfile(dump_file): os.remove(dump_file) if len(dump_image_format) != 0: clean_qemu_conf = "sed -i '$d' %s " % qemu_conf if os.system(clean_qemu_conf): raise error.TestFail("Fail to recover %s", qemu_conf)
def run_virsh_net_autostart(test, params, env): """ Test command: virsh net-autostart. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = "yes" == params.get("status_error", "no") net_ref = params.get("net_autostart_net_ref", "netname") disable = "yes" == params.get("net_autostart_disable", "no") extra = params.get("net_autostart_extra", "") # extra cmd-line params. # Make easy to maintain virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) try: default_xml = backup['default'] except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") # To guarantee cleanup will be executed try: # Remove all network before test for netxml in backup.values(): netxml.orbital_nuclear_strike() # Prepare default property for network # Transeint network can not be set autostart # So confirm persistent is true for test default_xml['persistent'] = True netname = "default" netuuid = default_xml.uuid # Set network 'default' to inactive # Since we do not reboot host to check(instead of restarting libvirtd) # If default network is active, we cann't check "--disable". # Because active network will not be inactive after restarting libvirtd # even we set autostart to False. While inactive network will be active # after restarting libvirtd if we set autostart to True default_xml['active'] = False currents = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) current_state = virsh_instance.net_state_dict() logging.debug("Current network(s): %s", current_state) # Prepare options and arguments if net_ref == "netname": net_ref = netname elif net_ref == "netuuid": net_ref = netuuid if disable: net_ref += " --disable" # Run test case # Use function in virsh module directly for both normal and error test result = virsh.net_autostart(net_ref, extra, **virsh_dargs) logging.debug(result) status = result.exit_status # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Check if autostart or disable is successful with libvirtd restart. # TODO: Since autostart is designed for host reboot, # we'd better check it with host reboot. utils_libvirtd.libvirtd_restart() # Reopen default_xml virsh_instance = virsh.VirshPersistent(**virsh_dargs) currents = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) current_state = virsh_instance.net_state_dict() logging.debug("Current network(s): %s", current_state) default_xml = currents['default'] is_active = default_xml['active'] finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): # If network is transient if ((not backup_state[netxml.name]['persistent']) and backup_state[netxml.name]['active']): netxml.create() continue # autostart = True requires persistent = True first! for state in ['persistent', 'autostart', 'active']: try: netxml[state] = backup_state[netxml.name][state] except xcepts.LibvirtXMLError: pass # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Check Result if status_error: if status == 0: raise error.TestFail("Run successfully with wrong command!") else: if disable: if status or is_active: raise error.TestFail("Disable autostart failed.") else: if status or (not is_active): raise error.TestFail("Set network autostart failed.")
def run_virsh_setmem(test, params, env): """ Test command: virsh setmem. 1) Prepare vm environment. 2) Handle params 3) Prepare libvirtd status. 4) Run test command and wait for current memory's stable. 5) Recover environment. 4) Check result. TODO: support new libvirt with more options. """ def vm_proc_meminfo(session): proc_meminfo = session.cmd_output("cat /proc/meminfo") # verify format and units are expected return int(re.search(r"MemTotal:\s+(\d+)\s+kB", proc_meminfo).group(1)) def make_domref(domarg, vm_ref, domid, vm_name, domuuid): # Specify domain as argument or parameter if domarg == "yes": dom_darg_key = "domainarg" else: dom_darg_key = "domain" # How to reference domain if vm_ref == "domid": dom_darg_value = domid elif vm_ref == "domname": dom_darg_value = vm_name elif vm_ref == "domuuid": dom_darg_value = domuuid elif vm_ref == "none": dom_darg_value = None elif vm_ref == "emptystring": dom_darg_value = '""' else: # stick in value directly dom_darg_value = vm_ref return {dom_darg_key: dom_darg_value} def make_sizeref(sizearg, mem_ref, original_mem): if sizearg == "yes": size_darg_key = "sizearg" else: size_darg_key = "size" if mem_ref == "halfless": size_darg_value = "%d" % (original_mem / 2) elif mem_ref == "halfmore": size_darg_value = "%d" % int(original_mem * 1.5) # no fraction elif mem_ref == "same": size_darg_value = "%d" % original_mem elif mem_ref == "emptystring": size_darg_value = '""' elif mem_ref == "zero": size_darg_value = "0" elif mem_ref == "toosmall": size_darg_value = "1024" elif mem_ref == "toobig": size_darg_value = "1099511627776" # (KiB) One Petabyte elif mem_ref == "none": size_darg_value = None else: # stick in value directly size_darg_value = mem_ref return {size_darg_key: size_darg_value} def is_in_range(actual, expected, error_percent): deviation = 100 - (100 * (float(actual) / float(expected))) logging.debug("Deviation: %0.2f%%" % float(deviation)) return float(deviation) <= float(error_percent) def is_old_libvirt(): regex = r"\s+\[--size\]\s+" return bool(not virsh.has_command_help_match("setmem", regex)) def print_debug_stats( original_inside_mem, original_outside_mem, test_inside_mem, test_outside_mem, expected_mem, delta_percentage ): dbgmsg = ( "Original inside mem : %d KiB\n" "Expected inside mem : %d KiB\n" "Actual inside mem : %d KiB\n" "Inside mem deviation : %0.2f%%\n" "Original outside mem : %d KiB\n" "Expected outside mem : %d KiB\n" "Actual outside mem : %d KiB\n" "Outside mem deviation: %0.2f%%\n" "Acceptable deviation %0.2f%%" % ( original_inside_mem, expected_mem, test_inside_mem, 100 - (100 * (float(test_inside_mem) / float(expected_mem))), original_outside_mem, expected_mem, test_outside_mem, 100 - (100 * (float(test_outside_mem) / float(expected_mem))), float(delta_percentage), ) ) for dbgline in dbgmsg.splitlines(): logging.debug(dbgline) # MAIN TEST CODE ### # Process cartesian parameters vm_ref = params.get("setmem_vm_ref", "") mem_ref = params.get("setmem_mem_ref", "") flags = params.get("setmem_flags", "") status_error = params.get("status_error", "no") old_libvirt_fail = params.get("setmem_old_libvirt_fail", "no") quiesce_delay = int(params.get("setmem_quiesce_delay", "1")) domarg = params.get("setmem_domarg", "no") sizearg = params.get("setmem_sizearg", "no") libvirt = params.get("libvirt", "on") delta_percentage = float(params.get("setmem_delta_per", "10")) start_vm = params.get("start_vm", "yes") vm_name = params.get("main_vm") paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no") # Gather environment parameters vm = env.get_vm(params["main_vm"]) if start_vm == "yes": if paused_after_start_vm: vm.resume() session = vm.wait_for_login() original_inside_mem = vm_proc_meminfo(session) session.close() if paused_after_start_vm: vm.pause() else: session = None # Retrieve known mem value, convert into kilobytes original_inside_mem = int(params.get("mem", "1024")) * 1024 original_outside_mem = vm.get_used_mem() domid = vm.get_id() domuuid = vm.get_uuid() uri = vm.connect_uri old_libvirt = is_old_libvirt() if old_libvirt: logging.info("Running test on older libvirt") use_kilobytes = True else: logging.info("Running test on newer libvirt") use_kilobytes = False # Argument pattern is complex, build with dargs dargs = {"flagstr": flags, "use_kilobytes": use_kilobytes, "uri": uri, "ignore_status": True, "debug": True} dargs.update(make_domref(domarg, vm_ref, domid, vm_name, domuuid)) dargs.update(make_sizeref(sizearg, mem_ref, original_outside_mem)) # Prepare libvirtd status if libvirt == "off": utils_libvirtd.libvirtd_stop() else: # make sure it's running utils_libvirtd.libvirtd_restart() if status_error == "yes" or old_libvirt_fail == "yes": logging.info("Error Test: Expecting an error to occur!") result = virsh.setmem(**dargs) status = result.exit_status # Recover libvirtd status if libvirt == "off": utils_libvirtd.libvirtd_start() if status is 0: logging.info("Waiting %d seconds for VM memory to settle", quiesce_delay) # It takes time for kernel to settle on new memory # and current clean pages is not predictable. Therefor, # extremely difficult to determine quiescence, so # sleep one second per error percent is reasonable option. time.sleep(quiesce_delay) # Gather stats if not running error test if status_error == "no" and old_libvirt_fail == "no": if vm.state() == "shut off": vm.start() # Make sure it's never paused vm.resume() session = vm.wait_for_login() # Actual results test_inside_mem = vm_proc_meminfo(session) session.close() test_outside_mem = vm.get_used_mem() # Expected results for both inside and outside if sizearg == "yes": expected_mem = int(dargs["sizearg"]) else: expected_mem = int(dargs["size"]) print_debug_stats( original_inside_mem, original_outside_mem, test_inside_mem, test_outside_mem, expected_mem, delta_percentage ) if status is 0: # Restore original memory restore_status = virsh.setmem(domainarg=vm_name, sizearg=original_outside_mem, ignore_status=True).exit_status if restore_status is not 0: logging.warning("Failed to restore VM's original memory to %s KiB" % original_outside_mem) else: # virsh setmem failed, no need to restore pass # Don't care about memory comparison on error test if status_error == "no" and old_libvirt_fail == "no": outside_in_range = is_in_range(test_outside_mem, expected_mem, delta_percentage) inside_in_range = is_in_range(test_inside_mem, expected_mem, delta_percentage) if status is not 0 or not outside_in_range or not inside_in_range: msg = "test conditions not met: " if status is not 0: msg += "Non-zero virsh setmem exit code. " # maybe multiple if not outside_in_range: # errors msg += "Outside memory deviated. " if not inside_in_range: msg += "Inside memory deviated. " raise error.TestFail(msg) return # Normal test passed elif status_error == "no" and old_libvirt_fail == "yes": if status is 0: if old_libvirt: raise error.TestFail("Error test did not result in an error") else: if not old_libvirt: raise error.TestFail("Newer libvirt failed when it should not") else: # Verify an error test resulted in error if status is 0: raise error.TestFail("Error test did not result in an error")
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() if test_type == "contrast": range = float(params.get("mem_error_range", 1.25)) elif test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: raise error.TestNAError("Hugepage Stress Test need two VM(s).") # confirm no VM(s) running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': raise error.TestNAError("one or more VM(s) is living.") else: vm_names.append(params.get("main_vm")) # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError, e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) pass # jump out of for-loop else: error_msg = "Test failed in positive case. error: %s\n" % e raise error.TestFail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError), e: error_msg = "Test failed in positive case.\n error: %s\n" % e raise error.TestFail(error_msg)
def run(test, params, env): """ Test command: virsh dump. This command can dump the core of a domain to a file for analysis. 1. Positive testing 1.1 Dump domain with valid options. 1.2 Avoid file system cache when dumping. 1.3 Compress the dump images to valid/invalid formats. 2. Negative testing 2.1 Dump domain to a non-exist directory. 2.2 Dump domain with invalid option. 2.3 Dump a shut-off domain. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(vm_name) options = params.get("dump_options") dump_file = params.get("dump_file", "vm.core") if os.path.dirname(dump_file) is "": dump_file = os.path.join(test.tmpdir, dump_file) dump_image_format = params.get("dump_image_format") start_vm = params.get("start_vm") == "yes" paused_after_start_vm = params.get("paused_after_start_vm") == "yes" status_error = params.get("status_error", "no") == "yes" timeout = int(params.get("timeout", "5")) qemu_conf = "/etc/libvirt/qemu.conf" def check_domstate(actual, options): """ Check the domain status according to dump options. """ if options.find('live') >= 0: domstate = "running" if options.find('crash') >= 0 or options.find('reset') > 0: domstate = "running" if paused_after_start_vm: domstate = "paused" elif options.find('crash') >= 0: domstate = "shut off" if options.find('reset') >= 0: domstate = "running" elif options.find('reset') >= 0: domstate = "running" if paused_after_start_vm: domstate = "paused" else: domstate = "running" if paused_after_start_vm: domstate = "paused" if not start_vm: domstate = "shut off" logging.debug("Domain should %s after run dump %s", domstate, options) return (domstate == actual) def check_dump_format(dump_image_format, dump_file): """ Check the format of dumped file. If 'dump_image_format' is not specified or invalid in qemu.conf, then the file shoule be normal raw file, otherwise it shoud be compress to specified format, the supported compress format including: lzop, gzip, bzip2, and xz. """ valid_format = ["lzop", "gzip", "bzip2", "xz"] if len(dump_image_format) == 0 or dump_image_format not in valid_format: logging.debug("No need check the dumped file format") return True else: file_cmd = "file %s" % dump_file (status, output) = commands.getstatusoutput(file_cmd) if status: logging.error("Fail to check dumped file %s", dump_file) return False logging.debug("Run file %s output: %s", dump_file, output) actual_format = output.split(" ")[1] if actual_format.lower() != dump_image_format.lower(): logging.error("Compress dumped file to %s fail: %s" % (dump_image_format, actual_format)) return False else: return True # Configure dump_image_format in /etc/libvirt/qemu.conf. if len(dump_image_format): conf_cmd = ("echo dump_image_format = \\\"%s\\\" >> %s" % (dump_image_format, qemu_conf)) if os.system(conf_cmd): logging.error("Config dump_image_format to %s fail", dump_image_format) utils_libvirtd.libvirtd_restart() if not utils_libvirtd.libvirtd_is_running(): raise error.TestNAError("libvirt service is not running!") # Deal with bypass-cache option child_pid = 0 if options.find('bypass-cache') >= 0: pid = os.fork() if pid: # Guarantee check_bypass function has run before dump child_pid = pid try: wait_pid_active(pid, timeout) finally: os.kill(child_pid, signal.SIGUSR1) else: check_bypass(dump_file) # Wait for parent process over while True: time.sleep(1) # Run virsh command cmd_result = virsh.dump(vm_name, dump_file, options, ignore_status=True, debug=True) status = cmd_result.exit_status try: logging.info("Start check result") if not check_domstate(vm.state(), options): raise error.TestFail("Domain status check fail.") if status_error: if not status: raise error.TestFail("Expect fail, but run successfully") else: if status: raise error.TestFail("Expect succeed, but run fail") if not os.path.exists(dump_file): raise error.TestFail("Fail to find domain dumped file.") if check_dump_format(dump_image_format, dump_file): logging.info("Successfully dump domain to %s", dump_file) else: raise error.TestFail("The format of dumped file is wrong.") finally: if child_pid: os.kill(child_pid, signal.SIGUSR1) if os.path.isfile(dump_file): os.remove(dump_file) if len(dump_image_format): clean_qemu_conf = "sed -i '$d' %s " % qemu_conf if os.system(clean_qemu_conf): raise error.TestFail("Fail to recover %s", qemu_conf)
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Destroy the pool (21) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (22) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm cleanup_env = [False, False, False] def check_exit_status(result, expect_error=False): """ Check the exit status of virsh commands. :param result: Virsh command result object :param expect_error: Boolean value, expect command success or fail """ if not expect_error: if result.exit_status != 0: raise error.TestFail(result.stderr) else: logging.debug("Command output:\n%s", result.stdout.strip()) elif expect_error and result.exit_status == 0: raise error.TestFail("Expect fail, but run successfully.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if vol_name in item[0]: found = True break if found: logging.debug( "Find volume '%s' in pool '%s'.", vol_name, pool_name) else: raise error.TestFail( "Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: raise error.TestFail("Pool info dictionary is needed.") if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: raise error.TestFail("Pool '%s' isn't '%s'." % (check_point, value)) # Run Testcase try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define result = utils_test.libvirt.define_pool(pool_name, pool_type, pool_target, cleanup_env) check_exit_status(result, status_error) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (4) # Undefine pool result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(pool_xml) check_exit_status(result, status_error) # Step (6) # Buid pool, this step may fail for 'disk' and 'logical' types pool if pool_type not in ["disk", "logical"]: option = "" # Options --overwrite and --no-overwrite can only be used to # build a filesystem pool, but it will fail for now # if pool_type == "fs": # option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # If the filesystem cntaining the directory is mounted, then the # directory will show as running, which means the local 'dir' pool # don't need start after restart libvirtd if pool_type != "dir": result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) check_exit_status(result) check_vol_list(vol_name, pool_name) # Step(20) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (21) # Pool delete for 'dir' type pool if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) result = virsh.pool_delete(pool_name, ignore_status=True) check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result, True) # Step (22) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = utils.system_output(cmd) lv_utils.vg_remove(vg_name) utils.run("pvremove %s" % pv_name) if cleanup_env[1]: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utils_test.libvirt.setup_or_cleanup_nfs(False)
def run(test, params, env): """ 1. prepare env 2. check if nwfilter binding 3. run test 4. destroy vm and restore the status """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) status_error = "yes" == params.get("status_error") vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) filter_name = params.get("filter_name") is_nwfilter_define = "yes" == params.get("is_nwfilter_define") vnet0_binding = os.path.join(data_dir.get_tmp_dir(), "vnet0_binding.xml") filter_binding_name = params.get("filter_binding_name") failed_msg = params.get("expected_failed") target_dev = params.get("target_dev") source_network = params.get("source_network") source_bridge = params.get("source_bridge") alias_name = params.get("alias_name") def set_env(): """ prepare the vm interface xml this xml can both use in two senario. but little different for two senario """ vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] vmxml.del_device(iface_xml) new_iface = interface.Interface('network') new_iface.xml = iface_xml.xml new_iface.type_name = "network" iface_target = {'dev': target_dev} new_iface.target = iface_target source = {'network': source_network, 'bridge': source_bridge} new_iface.source = source filterrefs_dict = {} filterrefs_dict['name'] = filter_name filterrefs_dict['parameters'] = [] new_filterref = new_iface.new_filterref(**filterrefs_dict) new_iface.filterref = new_filterref alias_dict = {'name': alias_name} new_iface.alias = alias_dict vmxml.add_device(new_iface) logging.debug("new interface xml is: %s" % new_iface) vmxml.sync() return new_iface def check_binding_port(cmd_res, match, is_match=True): """ check the list binding ports """ list_res = cmd_res.stdout_text.strip() if list_res and re.search(match, list_res): if not is_match: test.fail("expected not match %s" % match) elif is_match: test.fail("expected match %s but not match" % match) try: # set new interface env new_iface = set_env() # create binding dump file virsh.start(vm_name, debug=True) ret = virsh.nwfilter_binding_dumpxml(new_iface.target['dev'], to_file=vnet0_binding, debug=True) utlv.check_exit_status(ret, status_error) binding = nwfilter_binding.NwfilterBinding() binding.xml = vnet0_binding filterrefs_dict = {} filterrefs_dict['name'] = filter_binding_name filterrefs_dict['parameters'] = [{ 'name': "MAC", 'value': new_iface.mac_address }] binding.filterref = binding.new_filterref(**filterrefs_dict) logging.debug("binding is %s" % binding) # list filter if not is_nwfilter_define: virsh.nwfilter_binding_delete(new_iface.target['dev'], debug=True) if is_nwfilter_define: ret = virsh.nwfilter_binding_list(debug=True) utlv.check_exit_status(ret, status_error) check_binding_port(ret, filter_name, is_match=True) ret_create = virsh.nwfilter_binding_create(binding.xml, debug=True) # two Senario if is_nwfilter_define: utlv.check_result(ret_create, failed_msg) elif not is_nwfilter_define: # get params for senario2 check_cmd = params.get("check_cmd") expected_match = params.get("expected_match") filter_binding_copy = params.get("filter_binding_copy") ret = virsh.nwfilter_binding_list(debug=True) check_binding_port(ret, filter_binding_name, is_match=True) utlv.check_cmd_expected(check_cmd, expected_match, True) utils_libvirtd.libvirtd_restart() ret = virsh.nwfilter_binding_list(debug=True) check_binding_port(ret, filter_binding_name, is_match=True) # use check command to check result utlv.check_cmd_expected(check_cmd, expected_match, True) new_binding = nwfilter_binding.NwfilterBinding() new_binding.xml = binding.xml filterrefs_dict = {} filterrefs_dict['name'] = filter_binding_copy filterrefs_dict['parameters'] = [{ 'name': "MAC", 'value': new_iface.mac_address }] binding.filterref = binding.new_filterref(**filterrefs_dict) logging.debug("binding is %s" % new_binding) ret_create = virsh.nwfilter_binding_create(new_binding.xml, debug=True) utlv.check_result(ret_create, failed_msg) finally: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync()
def run(test, params, env): """ Test emulatorpin tuning 1) Positive testing 1.1) get the current emulatorpin parameters for a running/shutoff guest 1.2) set the current emulatorpin parameters for a running/shutoff guest 2) Negative testing 2.1) get emulatorpin parameters for a running/shutoff guest 2.2) set emulatorpin parameters running/shutoff guest """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) cgconfig = params.get("cgconfig", "on") cpulist = params.get("emulatorpin_cpulist") status_error = params.get("status_error", "no") change_parameters = params.get("change_parameters", "no") # Backup original vm vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() emulatorpin_placement = params.get("emulatorpin_placement", "") if emulatorpin_placement: vm.destroy() vmxml.placement = emulatorpin_placement vmxml.sync() vm.start() test_dicts = dict(params) test_dicts['vm'] = vm host_cpus = utils.count_cpus() test_dicts['host_cpus'] = host_cpus cpu_max = int(host_cpus) - 1 cpu_list = None # Assemble cpu list for positive test if status_error == "no": if cpulist is None: pass elif cpulist == "x": cpulist = random.choice(utils.cpu_online_map()) elif cpulist == "x-y": cpulist = "0-%s" % cpu_max elif cpulist == "x,y": cpulist = ','.join(random.sample(utils.cpu_online_map(), 2)) elif cpulist == "x-y,^z": cpulist = "0-%s,^%s" % (cpu_max, cpu_max) elif cpulist == "-1": cpulist = "-1" elif cpulist == "out_of_max": cpulist = str(cpu_max + 1) else: raise error.TestNAError("CPU-list=%s is not recognized." % cpulist) test_dicts['emulatorpin_cpulist'] = cpulist if cpulist: cpu_list = cpus_parser(cpulist) test_dicts['cpu_list'] = cpu_list logging.debug("CPU list is %s", cpu_list) cg = utils_cgroup.CgconfigService() if cgconfig == "off": if cg.cgconfig_is_running(): cg.cgconfig_stop() # positive and negative testing ######### try: if status_error == "no": if change_parameters == "no": get_emulatorpin_parameter(test_dicts) else: set_emulatorpin_parameter(test_dicts) if status_error == "yes": if change_parameters == "no": get_emulatorpin_parameter(test_dicts) else: set_emulatorpin_parameter(test_dicts) finally: # Recover cgconfig and libvirtd service if not cg.cgconfig_is_running(): cg.cgconfig_start() utils_libvirtd.libvirtd_restart() # Recover vm. vmxml_backup.sync()
def run(test, params, env): """ Test command: virsh managedsave-xxx including virsh managedsave-edit virsh managedsave-dumpxml virsh managedsave-define ... """ vm_name = params.get('main_vm') checkpoint = params.get('checkpoint', '') error_msg = params.get('error_msg', '') virsh_opt = params.get('virsh_opt', '') ms_extra_options = params.get('ms_extra_options', '') pre_state = params.get('pre_state', '') status_error = 'yes' == params.get('status_error', 'no') vm = env.get_vm(vm_name) bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': False} def start_and_login_vm(): """ Start vm and login, after which vm is accessible """ vm.start() vm.wait_for_login().close() try: if checkpoint == 'dumpxml': # Check managedsave-dumpxml start_and_login_vm() virsh.managedsave(vm_name, **virsh_dargs) virsh.managedsave_dumpxml(vm_name, **virsh_dargs) tmp_dir = data_dir.get_tmp_dir() save_img_xml = os.path.join(tmp_dir, 'save_img.xml') managed_save_xml = os.path.join(tmp_dir, 'managed_save.xml') virsh.save_image_dumpxml(MANAGEDSAVE_FILE % vm_name, ' > %s' % save_img_xml, **virsh_dargs) virsh.managedsave_dumpxml(vm_name, ' > %s' % managed_save_xml, **virsh_dargs) result_need_check = process.run('diff %s %s' % (save_img_xml, managed_save_xml), shell=True, verbose=True) if checkpoint == 'secure_info': # Check managedsave-dumpxml with option --security-info vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Add graphics vnc if guest doesn't have if not vmxml.get_devices(device_type="graphics"): logging.debug("Guest doesn't have graphic, add one") graphics.Graphics.add_graphic(vm_name, graphic="vnc") vm_xml.VMXML.set_graphics_attr(vm_name, {'passwd': '123456'}) start_and_login_vm() virsh.managedsave(vm_name, **virsh_dargs) default_xml = virsh.managedsave_dumpxml(vm_name, **virsh_dargs).stdout_text if 'passwd' in default_xml: test.fail('Found "passwd" in dumped vm xml. ' 'Secure info like "passwd" should not be dumped.') secure_xml = virsh.managedsave_dumpxml(vm_name, '--security-info', **virsh_dargs).stdout_text if 'passwd' not in secure_xml: test.fail('Not found "passwd" in dumped vm xml.' 'Secure info like "passwd" should be dumped ' 'with option "--security-info"') if checkpoint == 'define': # Make change to a managedsave-dumped xml and redefine vm # and check if the change take effect start_option = '--paused' if pre_state == 'paused' else '' virsh.start(vm_name, start_option, **virsh_dargs) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug(vmxml.devices) disk = vmxml.get_devices('disk')[0] img_path = disk.source.attrs['file'] logging.info('Original image path: %s', img_path) # Copy old image to new image new_img_path = os.path.join(data_dir.get_tmp_dir(), 'test.img') shutil.copyfile(img_path, new_img_path) virsh.managedsave(vm_name, **virsh_dargs) xmlfile = os.path.join(data_dir.get_tmp_dir(), 'managedsave.xml') virsh.managedsave_dumpxml(vm_name, '>%s' % xmlfile, **virsh_dargs) # Make change to xmlfile and managedsave-define with it with open(xmlfile) as file_xml: updated_xml = file_xml.read().replace(img_path, new_img_path) with open(xmlfile, 'w') as file_xml: file_xml.write(updated_xml) virsh.managedsave_define(vm_name, xmlfile, ms_extra_options, **virsh_dargs) virsh.start(vm_name, **virsh_dargs) xml_after_define = virsh.dumpxml(vm_name, **virsh_dargs).stdout_text if 'test.img' not in xml_after_define: test.fail( 'Not found "test.img" in vm xml after managedsave-define.' 'Modification to xml did not take effect.') if checkpoint == 'no_save': # Start a guest but do not managedsave it start_and_login_vm() virsh.dom_list('--all --managed-save', **virsh_dargs) if checkpoint == 'rm_after_save': # Remove saved file after managedsave a vm start_and_login_vm() virsh.managedsave(vm_name, **virsh_dargs) os.remove(MANAGEDSAVE_FILE % vm_name) if checkpoint == 'not_saved_corrupt': # Do not managedsave a vm, but create a fake managedsaved file by # 'touch' a file start_and_login_vm() virsh.dom_list('--all --managed-save', **virsh_dargs) process.run('touch %s' % MANAGEDSAVE_FILE % vm_name, verbose=True) params['clean_managed_save'] = True if checkpoint == 'readonly': start_and_login_vm() virsh.managedsave(vm_name, **virsh_dargs) if checkpoint == 'exclusive_option': virsh.managedsave(vm_name, **virsh_dargs) # Test managedsave-edit, managedsave_dumpxml, managedsave-define if params.get('check_cmd_error', '') == 'yes': ms_command = params.get('ms_command', '') if ms_command == 'edit': result_need_check = virsh.managedsave_edit(vm_name, ms_extra_options, timeout=60, virsh_opt=virsh_opt, debug=True) if ms_command == 'dumpxml': result_need_check = virsh.managedsave_dumpxml( vm_name, ms_extra_options, virsh_opt=virsh_opt, debug=True) if ms_command == 'define': result_need_check = virsh.managedsave_define( vm_name, bkxml.xml, ms_extra_options, virsh_opt=virsh_opt, debug=True) # If needs to check result, check it if 'result_need_check' in locals(): logging.info('Check command result.') libvirt.check_exit_status(result_need_check, status_error) if error_msg: libvirt.check_result(result_need_check, [error_msg]) finally: if params.get('clean_managed_save'): os.remove(MANAGEDSAVE_FILE % vm_name) utils_libvirtd.libvirtd_restart() virsh.managedsave_remove(vm_name, debug=True) bkxml.sync()
def run(test, params, env): """ Test command: virsh rename. The command can rename a domain. 1.Prepare test environment. 2.Perform virsh rename operation. 3.Recover test environment. 4.Confirm the test result. """ # Get specific parameter value vm_name = params.get("main_vm") vm = env.get_vm(vm_name) domuuid = vm.get_uuid() vm_ref = params.get("domrename_vm_ref", "name") status_error = "yes" == params.get("status_error", "no") new_name = params.get("vm_new_name", "new") pre_vm_state = params.get("domrename_vm_state", "shutoff") domain_option = params.get("dom_opt", "") new_name_option = params.get("newname_opt", "") add_vm = "yes" == params.get("add_vm", "no") # Replace the varaiables if vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid if new_name == "vm2_name": vm2_name = ("%s" % vm_name[:-1]) + "2" new_name = vm2_name # Build input params dom_param = ' '.join([domain_option, vm_ref]) new_name_param = ' '.join([new_name_option, new_name]) if vm.is_alive(): vm.destroy() # Backup for recovery. vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("vm xml is %s", vmxml_backup) # Clone additional vms if needed if add_vm: try: utils_path.find_command("virt-clone") except utils_path.CmdNotFoundError: if not utils_package.package_install(["virt-install"]): test.cancel("Failed to install virt-install on host") ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name, True, timeout=360) if ret_clone.exit_status: test.fail("Error occured when clone a second vm!") vm2 = libvirt_vm.VM(vm2_name, vm.params, vm.root_dir, vm.address_cache) virsh.dom_list("--name --all", debug=True) # Create object instance for renamed domain new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache) # Prepare vm state if pre_vm_state not in ["shutoff", "autostart"]: vm.start() if pre_vm_state == "paused": vm.pause() logging.debug("Domain state is now: %s", vm.state()) elif pre_vm_state == "managed_saved": vm.managedsave() elif pre_vm_state == "with_snapshot": virsh.snapshot_create_as(vm_name, "snap1 --disk-only", debug=True) vm.destroy(gracefully=False) try: if pre_vm_state == "autostart": virsh.autostart(dom_param, "", debug=True) virsh.dom_list("--all --autostart", debug=True) logging.debug("files under '/etc/libvirt/qemu/autostart/' are %s", os.listdir('/etc/libvirt/qemu/autostart/')) result = virsh.domrename(dom_param, new_name_param, ignore_status=True, debug=True) # Raise unexpected pass or fail libvirt.check_exit_status(result, status_error) # Return expected failure for negative tests if status_error: logging.debug("Expected failure: %s", result.stderr) return # Checkpoints after domrename succeed else: list_ret = virsh.dom_list("--name --all", debug=True).stdout.strip().splitlines() domname_ret = virsh.domname(domuuid, debug=True).stdout.strip() if new_name not in list_ret or vm_name in list_ret: test.fail("New name does not affect in virsh list") if domname_ret != new_name: test.fail( "New domain name does not affect in virsh domname uuid") if pre_vm_state != "autostart": # Try to start vm with the new name new_vm.start() else: utils_libvirtd.libvirtd_restart() list_autostart = virsh.dom_list("--autostart", debug=True).stdout logging.debug( "files under '/etc/libvirt/qemu/autostart/' are %s", os.listdir('/etc/libvirt/qemu/autostart/')) process.run("file /etc/libvirt/qemu/autostart/%s.xml" % vm_name, verbose=True) if new_name not in list_autostart: test.fail( "Domain isn't autostarted after restart libvirtd," "or becomes a never 'autostart' one.") finally: # Remove additional vms if add_vm and vm2.exists() and result.exit_status: virsh.remove_domain(vm2_name, "--remove-all-storage") # Undefine newly renamed domain if new_vm.exists(): if new_vm.is_alive(): new_vm.destroy(gracefully=False) new_vm.undefine() # Recover domain state if pre_vm_state != "shutoff": if pre_vm_state == "with_snapshot": libvirt.clean_up_snapshots(vm_name) else: if pre_vm_state == "managed_saved": vm.start() vm.destroy(gracefully=False) # Restore VM vmxml_backup.sync()
def run(test, params, env): """ Test emulatorpin tuning 1) Positive testing 1.1) get the current emulatorpin parameters for a running/shutoff guest 1.2) set the current emulatorpin parameters for a running/shutoff guest 2) Negative testing 2.1) get emulatorpin parameters for a running/shutoff guest 2.2) set emulatorpin parameters running/shutoff guest """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) cgconfig = params.get("cgconfig", "on") cpulist = params.get("emulatorpin_cpulist") status_error = params.get("status_error", "no") change_parameters = params.get("change_parameters", "no") # Backup original vm vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() emulatorpin_placement = params.get("emulatorpin_placement", "") if emulatorpin_placement: vm.destroy() vmxml.placement = emulatorpin_placement vmxml.sync() try: vm.start() except VMStartError as detail: # Recover the VM and failout early vmxml_backup.sync() logging.debug("Used VM XML:\n %s", vmxml) test.fail("VM Fails to start: %s", detail) test_dicts = dict(params) test_dicts['vm'] = vm host_cpus = cpu.online_cpus_count() test_dicts['host_cpus'] = host_cpus cpu_max = int(host_cpus) - 1 cpu_list = None # Assemble cpu list for positive test if status_error == "no": if cpulist is None: pass elif cpulist == "x": cpu_online_map = list(map(str, cpu.cpu_online_list())) cpulist = random.choice(cpu_online_map) elif cpulist == "x-y": # By default, emulator is pined to all cpus, and element # 'cputune/emulatorpin' may not exist in VM's XML. # And libvirt will do nothing if pin emulator to the same # cpus, that means VM's XML still have that element. # So for testing, we should avoid that value(0-$cpu_max). if cpu_max < 2: cpulist = "0-0" else: cpulist = "0-%s" % (cpu_max - 1) elif cpulist == "x,y": cpu_online_map = list(map(str, cpu.cpu_online_list())) cpulist = ','.join(random.sample(cpu_online_map, 2)) elif cpulist == "x-y,^z": cpulist = "0-%s,^%s" % (cpu_max, cpu_max) elif cpulist == "-1": cpulist = "-1" elif cpulist == "out_of_max": cpulist = str(cpu_max + 1) else: test.cancel("CPU-list=%s is not recognized." % cpulist) test_dicts['emulatorpin_cpulist'] = cpulist if cpulist: cpu_list = cpus_parser(cpulist) test_dicts['cpu_list'] = cpu_list logging.debug("CPU list is %s", cpu_list) cg = utils_cgroup.CgconfigService() if cgconfig == "off": if cg.cgconfig_is_running(): cg.cgconfig_stop() # positive and negative testing ######### try: if status_error == "no": if change_parameters == "no": get_emulatorpin_parameter(test_dicts, test) else: set_emulatorpin_parameter(test_dicts, test) if status_error == "yes": if change_parameters == "no": get_emulatorpin_parameter(test_dicts, test) else: set_emulatorpin_parameter(test_dicts, test) finally: # Recover cgconfig and libvirtd service if not cg.cgconfig_is_running(): cg.cgconfig_start() utils_libvirtd.libvirtd_restart() # Recover vm. vmxml_backup.sync()
def run_virsh_autostart(test, params, env): """ Test command: virsh autostart Set(or disable) autostart for a domain """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) persistent_vm = "yes" == params.get("persistent_vm", "yes") readonly_mode = "yes" == params.get("readonly_mode", "no") autostart_vm = "yes" == params.get("autostart_vm", "no") autostart_extra = params.get("autostart_extra", "") status_error = "yes" == params.get("status_error", "no") # Prepare transient/persistent vm original_xml = vm.backup_xml() if not persistent_vm and vm.is_persistent(): vm.undefine() elif persistent_vm and not vm.is_persistent(): vm.define(original_xml) original_autost = vm.is_autostart() logging.debug("Original VM %s autostart: %s", vm_name, original_autost) options = " " if not autostart_vm: options = "--disable " if autostart_extra: options += autostart_extra # Readonly mode ro_flag = False if readonly_mode: ro_flag = True # Result check def autostart_check(): """ Check if the VM autostart """ res = False if autostart_vm and vm.is_autostart() and vm.is_alive(): logging.debug("VM autostart as expected") res = True if not autostart_vm and not vm.is_autostart() and vm.is_dead(): logging.debug("VM not autostart as expected") res = True return res # Run test try: # Make sure the VM is inactive(except transient VM) if vm.is_persistent() and vm.is_alive(): vm.destroy() cmd_result = virsh.autostart(vm_name, options, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status # Restart libvirtd and sleep 2 utils_libvirtd.libvirtd_restart() if not status_error: if status: raise error.TestFail(err) elif not autostart_check(): raise error.TestFail("Autostart check fail") elif status_error and status == 0: raise error.TestFail("Expect fail, but run successfully.") finally: # Recover env vm.destroy() if not vm.is_persistent(): virsh.define(original_xml) os.remove(original_xml) if original_autost and not vm.is_autostart(): virsh.autostart(vm_name, "") elif not original_autost and vm.is_autostart(): virsh.autostart(vm_name, "--disable")
def run(test, params, env): """ Domain CPU management testing. 1. Prepare a domain for testing, install qemu-guest-ga if needed. 2. Checking for vcpu numbers in vcpucount, vcpuinfo, domain xml, vcpupin and inside domain. 3. Plug vcpu for the domain. 4. Repeat step 2 to check again. 5. Control domain(save, managedsave, s3, s4, etc.). 6. Repeat step 2 to check again. 7. Recover domain(restore, wakeup, etc.). 8. Repeat step 2 to check again. 9. Unplug vcpu for the domain. 10. Repeat step 2 to check again. 11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip save/managedsave related actions). 12. Repeat step 2 to check again. 13. Repeat step 7 to recover domain. 14. Repeat step 2 to check again. 15. Recover test environment. """ def manipulate_domain(vm_name, vm_operation, recover=False): """ Operate domain to given state or recover it. :params vm_name: Name of the VM domain :params vm_operation: Operation to be performed on VM domain like save, managedsave, suspend :params recover: flag to inform whether to set or reset vm_operation """ save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") if not recover: if vm_operation == "save": save_option = "" result = virsh.save(vm_name, save_file, save_option, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "managedsave": managedsave_option = "" result = virsh.managedsave(vm_name, managedsave_option, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s3": suspend_target = "mem" result = virsh.dompmsuspend(vm_name, suspend_target, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s4": suspend_target = "disk" result = virsh.dompmsuspend(vm_name, suspend_target, ignore_status=True, debug=True) libvirt.check_exit_status(result) # Wait domain state change: 'in shutdown' -> 'shut off' utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5) elif vm_operation == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "reboot": vm.reboot() else: logging.debug("No operation for the domain") else: if vm_operation == "save": if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) os.remove(save_file) else: test.error("No save file for domain restore") elif vm_operation in ["managedsave", "s4"]: result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s3": suspend_target = "mem" result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "suspend": result = virsh.resume(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "reboot": pass else: logging.debug("No need recover the domain") def online_new_vcpu(vm, vcpu_plug_num): """ For Fedora/RHEL7 guests, udev can not online hot-added CPUs automatically, (refer to BZ#968811 for details) so enable them manually. :params vm: VM object :params vcpu_plug_num: Hotplugged vcpu count """ cpu_is_online = [] session = vm.wait_for_login() for i in range(1, int(vcpu_plug_num)): cpu_is_online.append(False) cpu = "/sys/devices/system/cpu/cpu%s/online" % i cmd_s, cmd_o = session.cmd_status_output("cat %s" % cpu) logging.debug("cmd exist status: %s, cmd output %s", cmd_s, cmd_o) if cmd_s != 0: logging.error("Can not find cpu %s in domain", i) else: if cmd_o.strip() == "0": if session.cmd_status("echo 1 > %s" % cpu) == 0: cpu_is_online[i-1] = True else: logging.error("Fail to enable cpu %s online", i) else: cpu_is_online[i-1] = True session.close() return False not in cpu_is_online def check_setvcpus_result(cmd_result, expect_error): """ Check command result. For setvcpus, pass unsupported commands(plug or unplug vcpus) by checking command stderr. :params cmd_result: Command result :params expect_error: Whether to expect error True or False """ if cmd_result.exit_status != 0: if expect_error: logging.debug("Expect fail: %s", cmd_result.stderr) return # setvcpu/hotplug is only available as of qemu 1.5 and it's still # evolving. In general the addition of vcpu's may use the QMP # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands. # The removal of vcpu's may work in qemu 1.5 due to how cpu_set # can set vcpus online or offline; however, there doesn't appear # to be a complementary cpu-del feature yet, so we can add, but # not delete in 1.6. # A 1.6 qemu will not allow the cpu-add command to be run on # a configuration using <os> machine property 1.4 or earlier. # That is the XML <os> element with the <type> property having # an attribute 'machine' which is a tuple of 3 elements separated # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5". if re.search("unable to execute QEMU command 'cpu-add'", cmd_result.stderr): test.cancel("guest <os> machine property may be too" " old to allow hotplug") # A qemu older than 1.5 or an unplug for 1.6 will result in # the following failure. In general, any time libvirt determines # it cannot support adding or removing a vCPU... if re.search("cannot change vcpu count of this domain", cmd_result.stderr): test.cancel("Unsupport virsh setvcpu hotplug") # Maybe QEMU doesn't support unplug vcpu if re.search("Operation not supported: qemu didn't unplug the vCPUs", cmd_result.stderr): test.cancel("Your qemu unsupport unplug vcpu") # Qemu guest agent version could be too low if re.search("The command guest-get-vcpus has not been found", cmd_result.stderr): err_msg = "Your agent version is too low: %s" % cmd_result.stderr logging.warning(err_msg) test.cancel(err_msg) # Attempting to enable more vCPUs in the guest than is currently # enabled in the guest but less than the maximum count for the VM if re.search("requested vcpu count is greater than the count of " "enabled vcpus in the domain", cmd_result.stderr): logging.debug("Expect fail: %s", cmd_result.stderr) return # Otherwise, it seems we have a real error test.fail("Run failed with right command: %s" % cmd_result.stderr) else: if expect_error: test.fail("Expect fail but run successfully") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_operation = params.get("vm_operation", "null") vcpu_max_num = int(params.get("vcpu_max_num")) vcpu_current_num = int(params.get("vcpu_current_num")) vcpu_plug = "yes" == params.get("vcpu_plug", "no") vcpu_plug_num = int(params.get("vcpu_plug_num")) vcpu_unplug = "yes" == params.get("vcpu_unplug", "no") vcpu_unplug_num = int(params.get("vcpu_unplug_num")) vcpu_max_timeout = int(params.get("vcpu_max_timeout", "480")) setvcpu_option = params.get("setvcpu_option", "") agent_channel = "yes" == params.get("agent_channel", "yes") install_qemuga = "yes" == params.get("install_qemuga", "no") start_qemuga = "yes" == params.get("start_qemuga", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no") status_error = "yes" == params.get("status_error", "no") pin_before_plug = "yes" == params.get("pin_before_plug", "no") pin_after_plug = "yes" == params.get("pin_after_plug", "no") pin_before_unplug = "yes" == params.get("pin_before_unplug", "no") pin_after_unplug = "yes" == params.get("pin_after_unplug", "no") pin_vcpu = params.get("pin_vcpu") pin_cpu_list = params.get("pin_cpu_list", "x") check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no") with_stress = "yes" == params.get("run_stress", "no") iterations = int(params.get("test_itr", 1)) topology_correction = "yes" == params.get("topology_correction", "no") # Init expect vcpu count values expect_vcpu_num = {'max_config': vcpu_max_num, 'max_live': vcpu_max_num, 'cur_config': vcpu_current_num, 'cur_live': vcpu_current_num, 'guest_live': vcpu_current_num} if check_after_plug_fail: expect_vcpu_num_bk = expect_vcpu_num.copy() # Init expect vcpu pin values expect_vcpupin = {} result_vcpu = True # Init cpu-list for vcpupin host_cpu_count = os.sysconf('SC_NPROCESSORS_CONF') if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"): test.cancel("We need more cpus on host in this case for the cpu-list" "=%s. But current number of cpu on host is %s." % (pin_cpu_list, host_cpu_count)) cpus_list = cpu_util.cpu_online_list() logging.debug("Active cpus in host are %s", cpus_list) cpu_seq_str = "" for i in range(len(cpus_list) - 1): if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]): cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1]) break if pin_cpu_list == "x": pin_cpu_list = cpus_list[-1] if pin_cpu_list == "x-y": if cpu_seq_str: pin_cpu_list = cpu_seq_str else: pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0]) elif pin_cpu_list == "x,y": pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1]) elif pin_cpu_list == "x-y,^z": if cpu_seq_str: pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1] else: pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1], cpus_list[0]) else: # Just use the value get from cfg pass need_mkswap = False # Back up domain XML vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() try: # Customize domain vcpu number if vm.is_alive(): vm.destroy() if agent_channel: vmxml.set_agent_channel() else: vmxml.remove_agent_channels() vmxml.sync() vmxml.set_vm_vcpus(vm_name, vcpu_max_num, vcpu_current_num, topology_correction=topology_correction) # Do not apply S3/S4 on power cpu_arch = platform.machine() if cpu_arch in ('x86_64', 'i386', 'i686'): vmxml.set_pm_suspend(vm_name, "yes", "yes") vm.start() if with_stress: bt = utils_test.run_avocado_bg(vm, params, test) if not bt: test.cancel("guest stress failed to start") # Create swap partition/file if nessesary if vm_operation == "s4": need_mkswap = not vm.has_swap() if need_mkswap: logging.debug("Creating swap partition") vm.create_swap_partition() # Prepare qemu guest agent if install_qemuga: vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga) vm.setenforce(0) else: # Remove qemu-guest-agent for negative test vm.remove_package('qemu-guest-agent') # Run test for _ in range(iterations): result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num) # plug vcpu if vcpu_plug: # Pin vcpu if pin_before_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) check_setvcpus_result(result, status_error) if setvcpu_option == "--config": expect_vcpu_num['cur_config'] = vcpu_plug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num['guest_live'] = vcpu_plug_num else: expect_vcpu_num['cur_live'] = vcpu_plug_num expect_vcpu_num['guest_live'] = vcpu_plug_num if not status_error: if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(vcpu_plug_num, vm), vcpu_max_timeout, text="wait for vcpu online") or not online_new_vcpu(vm, vcpu_plug_num): test.fail("Fail to enable new added cpu") # Pin vcpu if pin_after_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if status_error and check_after_plug_fail: result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num_bk, {}, setvcpu_option) if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception as e: test.warn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num['cur_live'] = vcpu_plug_num expect_vcpu_num['guest_live'] = vcpu_plug_num elif setvcpu_option.count("--guest"): expect_vcpu_num['guest_live'] = vcpu_plug_num else: expect_vcpu_num['cur_live'] = vcpu_current_num expect_vcpu_num['guest_live'] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Unplug vcpu # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable # when VM started , and it required that vcpu 0(id=1) is always # present and non-hotpluggable, which means we can't hotunplug these # vcpus directly. So we can either hotplug more vcpus before we do # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the # vcpus except vcpu 0, to make sure libvirt can find appropriate # hotpluggable vcpus to reach the desired target vcpu count. For # simple prepare step, here we choose to hotplug more vcpus. if vcpu_unplug: if setvcpu_option == "--live": logging.info("Hotplug vcpu to the maximum count to make" "sure all these new plugged vcpus are " "hotunpluggable") result = virsh.setvcpus(vm_name, vcpu_max_num, '--live', debug=True) libvirt.check_exit_status(result) # Pin vcpu if pin_before_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) # As the vcpu will unplug later, so set # expect_vcpupin to empty expect_vcpupin = {} # Operation of setvcpus is asynchronization, even if it return, # may not mean it is complete, a poll checking of guest vcpu numbers # need to be executed. # So for case of unpluging vcpus from max vcpu number to 1, when # setvcpus return, need continue to obverse if vcpu number is # continually to be unplugged to 1 gradually. result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) unsupport_str = utils_hotplug.vcpuhotunplug_unsupport_str() if unsupport_str and (unsupport_str in result.stderr): test.cancel("Vcpu hotunplug is not supported in this host:" "\n%s" % result.stderr) try: session = vm.wait_for_login() cmd = "lscpu | grep \"^CPU(s):\"" operation = "setvcpus" prev_output = -1 while True: ret, output = session.cmd_status_output(cmd) if ret: test.error("Run lscpu failed, output: %s" % output) output = output.split(":")[-1].strip() if int(prev_output) == int(output): break prev_output = output time.sleep(5) logging.debug("CPUs available from inside guest after %s - %s", operation, output) if int(output) != vcpu_unplug_num: test.fail("CPU %s failed as cpus are not " "reflected from inside guest" % operation) finally: if session: session.close() check_setvcpus_result(result, status_error) if setvcpu_option == "--config": expect_vcpu_num['cur_config'] = vcpu_unplug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num['guest_live'] = vcpu_unplug_num else: expect_vcpu_num['cur_live'] = vcpu_unplug_num expect_vcpu_num['guest_live'] = vcpu_unplug_num # Pin vcpu if pin_after_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time # (QEMU bug), here we wait for 10 mins then skip the # remaining part of tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception as e: test.warn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num['cur_live'] = vcpu_unplug_num expect_vcpu_num['guest_live'] = vcpu_unplug_num elif setvcpu_option.count("--guest"): expect_vcpu_num['guest_live'] = vcpu_unplug_num else: expect_vcpu_num['cur_live'] = vcpu_current_num expect_vcpu_num['guest_live'] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands result_vcpu = utils_hotplug.check_vcpu_value(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover env finally: if need_mkswap: vm.cleanup_swap() if with_stress: bt.join(ignore_status=True) vm.destroy() backup_xml.sync() if not status_error: if not result_vcpu: test.fail("Test Failed")
def run(test, params, env): """ Test command: virsh autostart Set(or disable) autostart for a domain """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) persistent_vm = "yes" == params.get("persistent_vm", "yes") readonly_mode = "yes" == params.get("readonly_mode", "no") autostart_vm = "yes" == params.get("autostart_vm", "no") autostart_extra = params.get("autostart_extra", "") status_error = "yes" == params.get("status_error", "no") # Prepare transient/persistent vm original_xml = vm.backup_xml() if not persistent_vm and vm.is_persistent(): vm.undefine() elif persistent_vm and not vm.is_persistent(): vm.define(original_xml) original_autost = vm.is_autostart() logging.debug("Original VM %s autostart: %s", vm_name, original_autost) options = " " if not autostart_vm: options = "--disable " if autostart_extra: options += autostart_extra # Readonly mode ro_flag = False if readonly_mode: ro_flag = True # Result check def autostart_check(): """ Check if the VM autostart """ res = False if autostart_vm and vm.is_autostart() and vm.is_alive(): logging.debug("VM autostart as expected") res = True if not autostart_vm and not vm.is_autostart() and vm.is_dead(): logging.debug("VM not autostart as expected") res = True return res # Run test try: # Make sure the VM is inactive(except transient VM) if vm.is_persistent() and vm.is_alive(): vm.destroy() cmd_result = virsh.autostart(vm_name, options, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status # rhbz#1755303 if libvirt_version.version_compare(5, 6, 0): os.remove("/run/libvirt/qemu/autostarted") # Restart libvirtd and sleep 2 utils_libvirtd.libvirtd_restart() if not status_error: if status: test.fail(err) elif not autostart_check(): test.fail("Autostart check fail") elif status_error and status == 0: test.fail("Expect fail, but run successfully.") finally: # Recover env vm.destroy() if not vm.is_persistent(): virsh.define(original_xml) os.remove(original_xml) if original_autost and not vm.is_autostart(): virsh.autostart(vm_name, "") elif not original_autost and vm.is_autostart(): virsh.autostart(vm_name, "--disable")