def remote_test(remote_ip, local_ip, remote_pwd, remote_prompt, vm_name, status_error_test): """ Test remote case """ err = "" status = 1 status_error = status_error_test try: remote_uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, remote_prompt) session.cmd_output('LANG=C') command = "virsh -c %s setvcpus %s 1 --live" % (remote_uri, vm_name) if virsh.has_command_help_match("setvcpus", "--live") is None: raise error.TestNAError("The current libvirt doesn't support" " '--live' option for setvcpus") status, output = session.cmd_status_output(command, internal_timeout=5) session.close() if status != 0: err = output except process.CmdError: status = 1 err = "remote test failed" return status, status_error, err
def remote_test(remote_ip, local_ip, remote_pwd, remote_prompt, vm_name, status_error_test): """ Test remote case """ err = "" status = 1 status_error = status_error_test try: remote_uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, remote_prompt) session.cmd_output('LANG=C') command = "virsh -c %s setvcpus %s 1 --live" % (remote_uri, vm_name) if virsh.has_command_help_match("setvcpus", "--live") is None: raise error.TestNAError("The current libvirt doesn't support" " '--live' option for setvcpus") status, output = session.cmd_status_output(command, internal_timeout=5) session.close() if status != 0: err = output except error.CmdError: status = 1 err = "remote test failed" return status, status_error, err
def run(test, params, env): """ Test command: virsh qemu-agent-command. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_ref = params.get("vm_ref", "domname") vm_state = params.get("vm_state", "running") cmd = params.get("agent_cmd", "") options = params.get("options", "") needs_agent = "yes" == params.get("needs_agent", "yes") status_error = "yes" == params.get("status_error", "no") if not status_error and options: option = options.split()[0] test_cmd = "qemu-agent-command" if virsh.has_command_help_match(test_cmd, option) is None: raise error.TestNAError("The current libvirt doesn't support" " %s option for %s" % (option, test_cmd)) guest_cpu_busy = "yes" == params.get("guest_cpu_busy", "no") password = params.get("password", None) domuuid = vm.get_uuid() domid = "" xml_file = os.path.join(test.tmpdir, "vm.xml") virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file) libvirtd_inst = utils_libvirtd.Libvirtd() # Prepare domain try: reset_domain(vm, vm_state, needs_agent, guest_cpu_busy, password) except error.TestNAError, details: reset_env(vm_name, xml_file) raise error.TestNAError(details)
def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option))
def check_virsh_command_and_option(test, command, option=None): """ Check if virsh command exists :param test: test object :param command: the command to validate :param option: the option for the command :raise: test.cancel if command is not supported """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option))
def remote_test(remote_ip, local_ip, remote_pwd, remote_prompt, vm_name): """ Test remote case """ err = "" try: remote_uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, remote_prompt) session.cmd_output('LANG=C') command = "virsh -c %s setvcpus %s 1 --live" % (remote_uri, vm_name) if virsh.has_command_help_match(command, "--live") is None: status_error = "yes" status, output = session.cmd_status_output(command, internal_timeout=5) session.close() if status != 0: err = output except error.CmdError: status = 1 status_error = "yes" err = "remote test failed" return status, status_error, err
def operate(self, index): """ Return True/False (good/bad) result of operating on a device """ vadu_dargs = make_vadu_dargs(self.test_params, self.device_xmls[index].xml) # Acts as a dict for it's own API params self.test_params.virsh['debug'] = True vadu_dargs.update(self.test_params.virsh) options = vadu_dargs.get('flagstr') if options: opt_list = options.split() for opt in opt_list: if not virsh.has_command_help_match("attach-device", opt) and\ not self.test_params.status_error: raise error.TestNAError("Current libvirt version doesn't " "support '%s' for attach-device" " command" % opt) cmdresult = self.test_params.virsh.attach_device(**vadu_dargs) self.test_params.virsh['debug'] = False # Command success is not enough, must also confirm activity worked if (cmdresult.exit_status == 0): if (cmdresult.stdout.count('attached successfully') or cmdresult.stderr.count('attached successfully')): return True else: if (cmdresult.stderr.count("XML error") or cmdresult.stdout.count("XML error")): logging.error("Errant XML:") xmldevice = self.device_xmls[index] # All LibvirtXMLBase subclasses string-convert into raw XML for line in str(xmldevice).splitlines(): logging.error(" %s", line) # See analyze_negative_results - expects return of true if self.test_params.status_error: return True else: return False
def operate(self, index): """ Return True/False (good/bad) result of operating on a device """ vadu_dargs = make_vadu_dargs(self.test_params, self.device_xmls[index].xml, self.test) # Acts as a dict for it's own API params self.test_params.virsh['debug'] = True vadu_dargs.update(self.test_params.virsh) options = vadu_dargs.get('flagstr') if options: opt_list = options.split() for opt in opt_list: if not virsh.has_command_help_match("attach-device", opt) and\ not self.test_params.status_error: self.test.cancel("Current libvirt version doesn't " "support '%s' for attach-device" " command" % opt) cmdresult = self.test_params.virsh.attach_device(**vadu_dargs) self.test_params.virsh['debug'] = False # Command success is not enough, must also confirm activity worked # output XML no matter attach pass or not logging.debug("Attached XML:") for line in str(self.device_xmls[index]).splitlines(): logging.debug("%s", line) if (cmdresult.exit_status == 0): if (cmdresult.stdout.strip().count('attached successfully') or cmdresult.stderr.strip().count('attached successfully')): return True else: # See analyze_negative_results - expects return of true if self.test_params.status_error: return True else: return False
delay = int(params.get("virsh_migrate_delay", 10)) status_error = params.get("status_error", 'no') libvirtd_state = params.get("virsh_migrate_libvirtd_state", 'on') src_state = params.get("virsh_migrate_src_state", "running") migrate_uri = params.get("virsh_migrate_migrateuri", None) shared_storage = params.get("virsh_migrate_shared_storage", None) dest_xmlfile = "" # Direct migration is supported only for Xen in libvirt if options.count("direct") or extra.count("direct"): if params.get("driver_type") is not "xen": raise error.TestNAError("Direct migration is supported only for " "Xen in libvirt.") if options.count("compressed") and not \ virsh.has_command_help_match("migrate", "--compressed"): raise error.TestNAError("Do not support compressed option on this version.") # Add migrateuri if exists and check for default example if migrate_uri: if migrate_uri.count("EXAMPLE"): raise error.TestNAError("Set up the migrate_uri.") extra = ("%s --migrateuri=%s" % (extra, migrate_uri)) # To migrate you need to have a shared disk between hosts if shared_storage.count("EXAMPLE"): raise error.TestError("For migration you need to have a shared " "storage.") # Get expected cache state for test attach_scsi_disk = "yes" == params.get("attach_scsi_disk", "no")
def run(test, params, env): """ Test command: virsh reboot. Run a reboot command in the target domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh reboot operation. 4.Recover test environment.(libvirts service) 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # run test case libvirtd = params.get("libvirtd", "on") vm_ref = params.get("reboot_vm_ref") status_error = ("yes" == params.get("status_error")) extra = params.get("reboot_extra", "") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "password") agent = ("yes" == params.get("reboot_agent", "no")) mode = params.get("reboot_mode", "") pre_domian_status = params.get("reboot_pre_domian_status", "running") reboot_readonly = "yes" == params.get("reboot_readonly", "no") xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Add or remove qemu-agent from guest before test try: vm.prepare_guest_agent(channel=agent, start=agent) except virt_vm.VMError as e: logging.debug(e) # qemu-guest-agent is not available on REHL5 test.cancel("qemu-guest-agent package is not available") if pre_domian_status == "shutoff": virsh.destroy(vm_name) if libvirtd == "off": utils_libvirtd.libvirtd_stop() domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "remote_name": if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): test.cancel("remote_ip and/or local_ip parameters" " not changed from default values") complete_uri = libvirt_vm.complete_uri(local_ip) try: session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name, mode) status, output = session.cmd_status_output(command, internal_timeout=5) session.close() if not status: # the operation before the end of reboot # may result in data corruption vm.wait_for_login().close() except (remote.LoginError, process.CmdError, aexpect.ShellError) as e: logging.error("Exception: %s", str(e)) status = -1 if vm_ref != "remote_name": vm_ref = "%s" % vm_ref if extra: vm_ref += " %s" % extra cmdresult = virsh.reboot(vm_ref, mode, ignore_status=True, debug=True) status = cmdresult.exit_status if status: logging.debug("Error status, cmd error: %s", cmdresult.stderr) if not virsh.has_command_help_match('reboot', '\s+--mode\s+'): # old libvirt doesn't support reboot status = -2 else: vm.wait_for_login().close() output = virsh.dom_list(ignore_status=True).stdout.strip() # Test the readonly mode if reboot_readonly: result = virsh.reboot(vm_ref, ignore_status=True, debug=True, readonly=True) libvirt.check_exit_status(result, expect_error=True) # This is for status_error check status = result.exit_status # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # check status_error if status_error: if not status: test.fail("Run successfully with wrong command!") else: if status or (not re.search(vm_name, output)): if status == -2: test.cancel("Reboot command doesn't work on older libvirt " "versions") test.fail("Run failed with right command") finally: xml_backup.sync()
command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name, mode) status, output = session.cmd_status_output(command, internal_timeout=5) session.close() except (remote.LoginError, process.CmdError, aexpect.ShellError), e: logging.error("Exception: %s", str(e)) status = -1 if vm_ref != "remote_name": vm_ref = "%s %s" % (vm_ref, extra) cmdresult = virsh.reboot(vm_ref, mode, ignore_status=True, debug=True) status = cmdresult.exit_status if status: logging.debug("Error status, cmd error: %s", cmdresult.stderr) if not virsh.has_command_help_match('reboot', '\s+--mode\s+'): # old libvirt doesn't support reboot status = -2 output = virsh.dom_list(ignore_status=True).stdout.strip() # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # check status_error if status_error: if not status: raise error.TestFail("Run successfully with wrong command!") else: if status or (not re.search(vm_name, output)): if status == -2:
cpu_mode = "yes" == params.get("managedsave_cpumode", "no") test_undefine = "yes" == params.get("managedsave_undefine", "no") test_bypass_cache = "yes" == params.get("test_bypass_cache", "no") autostart_bypass_cache = params.get("autostart_bypass_cache", "") multi_guests = params.get("multi_guests", "") test_libvirt_guests = params.get("test_libvirt_guests", "") check_flags = "yes" == params.get("check_flags", "no") security_driver = params.get("security_driver", "") remove_after_cmd = "yes" == params.get("remove_after_cmd", "no") option = params.get("managedsave_option", "") check_shutdown = "yes" == params.get("shutdown_after_cmd", "no") pre_vm_state = params.get("pre_vm_state", "") move_saved_file = "yes" == params.get("move_saved_file", "no") test_loop_cmd = "yes" == params.get("test_loop_cmd", "no") if option: if not virsh.has_command_help_match('managedsave', option): # Older libvirt does not have this option raise error.TestNAError("Older libvirt does not" " handle arguments consistently") # Backup xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Get the libvirtd service libvirtd = utils_libvirtd.Libvirtd() # Get config files. qemu_config = utils_config.LibvirtQemuConfig() libvirt_guests_config = utils_config.LibvirtGuestsConfig() # Get libvirt-guests service libvirt_guests = Factory.create_service("libvirt-guests") try:
def run(test, params, env): """ Test command: virsh setvcpus. The command can change the number of virtual CPUs in the guest domain. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh setvcpus operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("setvcpus_pre_vm_state") command = params.get("setvcpus_command", "setvcpus") options = params.get("setvcpus_options") vm_ref = params.get("setvcpus_vm_ref", "name") count = params.get("setvcpus_count", "") convert_err = "Can't convert {0} to integer type" try: count = int(count) except ValueError: # 'count' may not invalid number in negative tests logging.debug(convert_err.format(count)) current_vcpu = int(params.get("setvcpus_current", "1")) try: current_vcpu = int(current_vcpu) except ValueError: raise error.TestError(convert_err.format(current_vcpu)) max_vcpu = int(params.get("setvcpus_max", "4")) try: max_vcpu = int(max_vcpu) except ValueError: raise error.TestError(convert_err.format(max_vcpu)) extra_param = params.get("setvcpus_extra_param") count_option = "%s %s" % (count, extra_param) status_error = params.get("status_error") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") remote_prompt = params.get("remote_prompt", "#") tmpxml = os.path.join(test.tmpdir, 'tmp.xml') set_topology = "yes" == params.get("set_topology", "no") sockets = params.get("topology_sockets") cores = params.get("topology_cores") threads = params.get("topology_threads") start_vm_after_set = "yes" == params.get("start_vm_after_set", "no") start_vm_expect_fail = "yes" == params.get("start_vm_expect_fail", "no") remove_vm_feature = params.get("remove_vm_feature", "") # Early death if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM")): raise error.TestNAError("remote/local ip parameters not set.") # Save original configuration vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() # Normal processing of the test is to set the maximum vcpu count to 4, # and set the current vcpu count to 1, then adjust the 'count' value to # plug or unplug vcpus. # # This is generally fine when the guest is not running; however, the # hotswap functionality hasn't always worked very well and is under # going lots of change from using the hmp "cpu_set" command in 1.5 # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command # seems to have been deprecated making things very messy. # # To further muddy the waters, the "cpu-add" functionality is supported # for specific machine type versions. For the purposes of this test that # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which # version of qemu/kvm was used to initially create/generate the XML for # the machine this could result in a newer qemu still using 1.4 or earlier # for the machine type. # try: if vm.is_alive(): vm.destroy() # Set maximum vcpus, so we can run all kinds of normal tests without # encounter requested vcpus greater than max allowable vcpus error vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("Pre-test xml is %s", vmxml.xmltreefile) # Get the number of cpus, current value if set, and machine type orig_count, orig_current, mtype = get_xmldata(vm_name, tmpxml, options) logging.debug( "Before run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", orig_count, orig_current, mtype) # Set cpu topology if set_topology: vmcpu_xml = vm_xml.VMCPUXML() vmcpu_xml['topology'] = { 'sockets': sockets, 'cores': cores, 'threads': threads } vmxml['cpu'] = vmcpu_xml vmxml.sync() # Remove vm features if remove_vm_feature: try: vmfeature_xml = vmxml['features'] except xcepts.LibvirtXMLNotFoundError, e: logging.debug("features not found in xml\n%s", e) else: vmfeature_xml.remove_feature(remove_vm_feature) vmxml['features'] = vmfeature_xml vmxml.sync() logging.debug("xml after remove feature is:\n%s", vmxml.xmltreefile) # Restart, unless that's not our test if not vm.is_alive(): vm.start() vm.wait_for_login() if orig_count == 1 and count == 1: logging.debug( "Original vCPU count is 1, just checking if setvcpus " "can still set current.") domid = vm.get_id() # only valid for running domuuid = vm.get_uuid() if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shut off" and vm.is_alive(): vm.destroy() # Run test if vm_ref == "remote": (setvcpu_exit_status, status_error, setvcpu_exit_stderr) = remote_test(remote_ip, local_ip, remote_pwd, remote_prompt, vm_name, status_error) else: if vm_ref == "name": dom_option = vm_name elif vm_ref == "id": dom_option = domid if params.get("setvcpus_hex_id") is not None: dom_option = hex(int(domid)) elif params.get("setvcpus_invalid_id") is not None: dom_option = params.get("setvcpus_invalid_id") elif vm_ref == "uuid": dom_option = domuuid if params.get("setvcpus_invalid_uuid") is not None: dom_option = params.get("setvcpus_invalid_uuid") else: dom_option = vm_ref option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) is None: raise error.TestNAError("The current libvirt version" " doesn't support '%s' option" % item) status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True, debug=True) setvcpu_exit_status = status.exit_status setvcpu_exit_stderr = status.stderr.strip() # Start VM after set vcpu if start_vm_after_set: if vm.is_alive(): logging.debug("VM already started") else: result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result, start_vm_expect_fail)
def run(test, params, env): """ Test command: virsh update-device. Update device from an XML <file>. 1.Prepare test environment, adding a cdrom/floppy to VM. 2.Perform virsh update-device operation. 3.Recover test environment. 4.Confirm the test result. """ # Before doing anything - let's be sure we can support this test # Parse flag list, skip testing early if flag is not supported # NOTE: "".split("--") returns [''] which messes up later empty test flag = params.get("updatedevice_flag", "") flag_list = [] if flag.count("--"): flag_list = flag.split("--") for item in flag_list: option = item.strip() if option == "": continue if not bool(virsh.has_command_help_match("update-device", option)): raise error.TestNAError( "virsh update-device doesn't support --%s" % option) # As per RH BZ 961443 avoid testing before behavior changes if 'config' in flag_list: # SKIP tests using --config if libvirt is 0.9.10 or earlier if not libvirt_version.version_compare(0, 9, 10): raise error.TestNAError("BZ 961443: --config behavior change " "in version 0.9.10") if 'persistent' in flag_list: # SKIP tests using --persistent if libvirt 1.0.5 or earlier if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("BZ 961443: --persistent behavior change " "in version 1.0.5") # Prepare initial vm state vm_name = params.get("main_vm") vmxml = VMXML.new_from_dumpxml(vm_name, options="--inactive") vm = env.get_vm(vm_name) start_vm = "yes" == params.get("start_vm", "no") # Get the target bus/dev disk_type = params.get("disk_type", "cdrom") target_bus = params.get("updatedevice_target_bus", "ide") target_dev = params.get("updatedevice_target_dev", "hdc") disk_mode = params.get("disk_mode", "") support_mode = ['readonly', 'shareable'] if not disk_mode and disk_mode not in support_mode: raise error.TestError("%s not in support mode %s" % (disk_mode, support_mode)) # Prepare tmp directory and files. orig_iso = os.path.join(test.virtdir, "orig.iso") test_iso = os.path.join(test.virtdir, "test.iso") test_diff_iso = os.path.join(test.virtdir, "test_diff.iso") update_xmlfile = os.path.join(test.tmpdir, "update.xml") create_attach_xml(update_xmlfile, test_iso, disk_type, target_bus, target_dev, disk_mode) # This test needs a cdrom/floppy attached first - attach a cdrom/floppy # to a shutdown vm. Then decide to restart or not if vm.is_alive(): vm.destroy() create_disk(vm_name, orig_iso, disk_type, target_dev, disk_mode) if start_vm: vm.start() domid = vm.get_id() else: domid = "domid invalid; domain is shut-off" # Get remaining parameters for configuration. twice = "yes" == params.get("updatedevice_twice", "no") diff_iso = "yes" == params.get("updatedevice_diff_iso", "no") vm_ref = params.get("updatedevice_vm_ref", "") status_error = "yes" == params.get("status_error", "no") extra = params.get("updatedevice_extra", "") # OK let's give this a whirl... try: if vm_ref == "id": vm_ref = domid if twice: # Don't pass in any flags virsh.update_device(domainarg=domid, filearg=update_xmlfile, ignore_status=True, debug=True) if diff_iso: # Swap filename of device backing file in update.xml os.remove(update_xmlfile) create_attach_xml(update_xmlfile, test_diff_iso, disk_type, target_bus, target_dev, disk_mode) elif vm_ref == "uuid": vm_ref = vmxml.uuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("updatedevice_invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, extra) cmdresult = virsh.update_device(domainarg=vm_ref, filearg=update_xmlfile, flagstr=flag, ignore_status=True, debug=True) status = cmdresult.exit_status active_vmxml = VMXML.new_from_dumpxml(vm_name) inactive_vmxml = VMXML.new_from_dumpxml(vm_name, options="--inactive") finally: vm.destroy(gracefully=False, free_mac_addresses=False) vmxml.undefine() vmxml.restore() vmxml.define() if os.path.exists(orig_iso): os.remove(orig_iso) if os.path.exists(test_iso): os.remove(test_iso) if os.path.exists(test_diff_iso): os.remove(test_diff_iso) # Result handling logic set errmsg only on error errmsg = None if status_error: if status == 0: errmsg = "Run successfully with wrong command!" else: # Normal test if status != 0: errmsg = "Run failed with right command" if diff_iso: # Expect the backing file to have updated active_attached = is_attached(active_vmxml.devices, disk_type, test_diff_iso, target_dev) inactive_attached = is_attached(inactive_vmxml.devices, disk_type, test_diff_iso, target_dev) else: # Expect backing file to remain the same active_attached = is_attached(active_vmxml.devices, disk_type, test_iso, target_dev) inactive_attached = is_attached(inactive_vmxml.devices, disk_type, test_iso, target_dev) # Check behavior of combination before individual! if "config" in flag_list and "live" in flag_list: if not active_attached: errmsg = ("Active domain XML not updated when " "--config --live options used") if not inactive_attached: errmsg = ("Inactive domain XML not updated when " "--config --live options used") elif "live" in flag_list and inactive_attached: errmsg = ("Inactive domain XML updated when " "--live option used") elif "config" in flag_list and active_attached: errmsg = ("Active domain XML updated when " "--config option used") # persistent option behavior depends on start_vm if "persistent" in flag_list: if start_vm: if not active_attached or not inactive_attached: errmsg = ("XML not updated when --persistent " "option used on active domain") else: if not inactive_attached: errmsg = ("XML not updated when --persistent " "option used on inactive domain") if len(flag_list) == 0: # Not specifying any flag is the same as specifying --current if start_vm: if not active_attached: errmsg = "Active domain XML not updated" elif inactive_attached: errmsg = ("Inactive domain XML updated when active " "requested") # Log some debugging info before destroying instances if errmsg is not None: logging.debug("Active XML:") logging.debug(str(active_vmxml)) logging.debug("Inactive XML:") logging.debug(str(inactive_vmxml)) logging.debug("active_attached: %s", str(active_attached)) logging.debug("inctive_attached: %s", str(inactive_attached)) logging.debug("Device XML:") logging.debug(open(update_xmlfile, "r").read()) # clean up tmp files del vmxml del active_vmxml del inactive_vmxml os.unlink(update_xmlfile) if errmsg is not None: raise error.TestFail(errmsg)
def run(test, params, env): """ Test command: virsh iothreaddel. The command can change the number of iothread. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh iothreaddel operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("iothread_pre_vm_state") command = params.get("iothread_command", "iothreaddel") options = params.get("iothread_options") vm_ref = params.get("iothread_vm_ref", "name") iothreads = params.get("iothreads", 4) iothread_id = params.get("iothread_id", "6") status_error = "yes" == params.get("status_error") iothreadids = params.get("iothreadids") iothreadpins = params.get("iothreadpins") try: iothreads = int(iothreads) except ValueError: # 'iothreads' may not invalid number in negative tests logging.debug("Can't convert %s to integer type", iothreads) # Save original configuration vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() try: if vm.is_alive(): vm.destroy() option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) is None: raise exceptions.TestSkipError("The current libvirt version" " doesn't support '%s' option" % item) # Set iothreads first if iothreadids: ids_xml = vm_xml.VMIothreadidsXML() ids_xml.iothread = iothreadids.split() vmxml.iothreadids = ids_xml if iothreadpins: cputune_xml = vm_xml.VMCPUTuneXML() io_pins = [] for pins in iothreadpins.split(): thread, cpuset = pins.split(':') io_pins.append({"iothread": thread, "cpuset": cpuset}) cputune_xml.iothreadpins = io_pins vmxml.cputune = cputune_xml vmxml.iothreads = iothreads logging.debug("Pre-test xml is %s", vmxml) vmxml.sync() # Restart, unless that's not our test if not vm.is_alive(): vm.start() vm.wait_for_login() domid = vm.get_id() # only valid for running domuuid = vm.get_uuid() if pre_vm_state == "shut off" and vm.is_alive(): vm.destroy() # Run test if vm_ref == "name": dom_option = vm_name elif vm_ref == "id": dom_option = domid elif vm_ref == "uuid": dom_option = domuuid else: dom_option = vm_ref ret = virsh.iothreaddel(dom_option, iothread_id, options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error: # Check domainxml iothread_info = get_xmlinfo(vm_name, options) logging.debug("iothreadinfo: %s", iothread_info) if iothread_id in iothread_info: raise exceptions.TestFail("Failed to add iothread %s in domain xml", iothread_id) # Check iothreadinfo by virsh command iothread_info = libvirt.get_iothreadsinfo(dom_option, options) logging.debug("iothreadinfo: %s", iothread_info) if iothread_info.has_key(iothread_id): raise exceptions.TestFail("Failed to add iothread %s", iothread_id) finally: # Cleanup if vm.is_alive(): vm.destroy() orig_config_xml.sync()
def is_old_libvirt(): """ Check if libvirt is old version """ regex = r'\s+\[--size\]\s+' return bool(not virsh.has_command_help_match('setmem', regex))
def run(test, params, env): """ Test virsh detach-device command. The command can detach disk. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh detach-device operation. 3.Recover test environment. 4.Confirm the test result. """ def create_device_file(device_source="/tmp/attach.img"): """ Create a device source file. :param device_source: Device source file. """ try: with open(device_source, 'wb') as device_file: device_file.seek((512 * 1024 * 1024) - 1) device_file.write(str(0).encode()) except IOError: logging.error("Image file %s created failed.", device_source) def check_vm_partition(vm, device, os_type, target_name): """ Check VM disk's partition. :param vm. VM guest. :param os_type. VM's operation system type. :param target_name. Device target type. :return: True if check successfully. """ logging.info("Checking VM partittion...") if vm.is_dead(): vm.start() try: if os_type == "linux": session = vm.wait_for_login() if device == "disk": s, o = session.cmd_status_output( "grep %s /proc/partitions" % target_name) logging.info("Virtio devices in VM:\n%s", o) elif device == "cdrom": s, o = session.cmd_status_output("ls /dev/cdrom") logging.info("CDROM in VM:\n%s", o) elif device == "iface": s, o = session.cmd_status_output("ls /") session.close() if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def acpiphp_module_modprobe(vm, os_type): """ Add acpiphp module if VM's os type is rhle5.* :param vm. VM guest. :param os_type. VM's operation system type. :return: True if operate successfully. """ if vm.is_dead(): vm.start() try: if os_type == "linux": session = vm.wait_for_login() s_rpm, _ = session.cmd_status_output("rpm --version") # If status is different from 0, this # guest OS doesn't support the rpm package # manager if s_rpm: session.close() return True _, o_vd = session.cmd_status_output("rpm -qa | grep" " redhat-release") if o_vd.find("5Server") != -1: s_mod, _ = session.cmd_status_output("modprobe acpiphp") if s_mod != 0: session.close() return False session.close() return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def create_device_xml(params, xml_path, device_source): """ Create a xml file for device """ device_xml_name = params.get("dt_device_xml", "device.xml") device_xml_file = os.path.join(xml_path, device_xml_name) device_type = params.get("dt_device_device", "disk") if device_type in ["disk", 'cdrom']: disk_class = vm_xml.VMXML.get_device_class('disk') if test_block_dev: disk = disk_class(type_name='block') stype = 'dev' else: disk = disk_class(type_name='file') stype = 'file' disk.device = device_type disk.driver = dict(name='qemu', type='raw') disk.source = disk.new_disk_source(attrs={stype: device_source}) disk.target = dict(bus=device_bus, dev=device_target) disk.xmltreefile.write() shutil.copyfile(disk.xml, device_xml_file) else: iface_class = vm_xml.VMXML.get_device_class('interface') iface = iface_class(type_name='network') iface.mac_address = iface_mac_address iface.source = dict(network=iface_network) iface.model = iface_model_type iface.xmltreefile.write() shutil.copyfile(iface.xml, device_xml_file) return device_xml_file vm_ref = params.get("dt_device_vm_ref", "name") dt_options = params.get("dt_device_options", "") pre_vm_state = params.get("dt_device_pre_vm_state", "running") status_error = "yes" == params.get("status_error", 'no') no_attach = "yes" == params.get("dt_device_no_attach", 'no') os_type = params.get("os_type", "linux") device = params.get("dt_device_device", "disk") readonly = "yes" == params.get("detach_readonly", "no") tmp_dir = data_dir.get_tmp_dir() test_cmd = "detach-device" if not virsh.has_command_help_match(test_cmd, dt_options) and\ not status_error: test.cancel("Current libvirt version doesn't support '%s'" " for %s" % (dt_options, test_cmd)) # Disk specific attributes. device_source_name = params.get("dt_device_device_source", "attach.img") device_target = params.get("dt_device_device_target", "vdd") device_bus = params.get("dt_device_bus_type") test_block_dev = "yes" == params.get("dt_device_iscsi_device", "no") # interface specific attributes. iface_network = params.get("dt_device_iface_network") iface_model_type = params.get("dt_device_iface_model_type") iface_mac_address = params.get("dt_device_iface_mac_address") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) device_source = os.path.join(tmp_dir, device_source_name) # Create virtual device file. if test_block_dev: device_source = libvirt.setup_or_cleanup_iscsi(True) if not device_source: # We should skip this case test.cancel("Can not get iscsi device name in host") else: create_device_file(device_source) try: if vm.is_alive(): vm.destroy(gracefully=False) # If we are testing cdrom device, we need to detach hdc in VM first. if device == "cdrom": virsh.detach_disk(vm_name, device_target, "--config", ignore_status=True) device_xml = create_device_xml(params, tmp_dir, device_source) if not no_attach: s_attach = virsh.attach_device(vm_name, device_xml, flagstr="--config").exit_status if s_attach != 0: logging.error("Attach device failed before testing " "detach-device") vm.start() vm.wait_for_serial_login() # Add acpiphp module before testing if VM's os type is rhle5.* if device in ['disk', 'cdrom']: if not acpiphp_module_modprobe(vm, os_type): test.error("Add acpiphp module failed before test.") # Turn VM into certain state. if pre_vm_state == "paused": logging.info("Suspending %s...", vm_name) if vm.is_alive(): vm.pause() elif pre_vm_state == "shut off": logging.info("Shutting down %s...", vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Get disk count before test. if device in ['disk', 'cdrom']: device_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name) else: vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name) device_count_before_cmd = len(vm_cls.devices) # Test. domid = vm.get_id() domuuid = vm.get_uuid() # Confirm how to reference a VM. if vm_ref == "name": vm_ref = vm_name elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid else: vm_ref = "" status = virsh.detach_device(vm_ref, device_xml, readonly=readonly, flagstr=dt_options, debug=True).exit_status time.sleep(2) # Resume guest after command. On newer libvirt this is fixed as it has # been a bug. The change in xml file is done after the guest is # resumed. if pre_vm_state == "paused": vm.resume() # Check disk count after command. check_count_after_cmd = True if device in ['disk', 'cdrom']: device_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name) else: vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name) device_count_after_cmd = len(vm_cls.devices) if device_count_after_cmd < device_count_before_cmd: check_count_after_cmd = False # Recover VM state. if pre_vm_state == "shut off" and device in ['disk', 'cdrom']: vm.start() # Check in VM after command. check_vm_after_cmd = True if device in ['disk', 'cdrom']: check_vm_after_cmd = check_vm_partition(vm, device, os_type, device_target) # Destroy VM. if vm.is_alive(): vm.destroy(gracefully=False) # Check disk count after VM shutdown (with --config). check_count_after_shutdown = True if device in ['disk', 'cdrom']: device_count_after_shutdown = vm_xml.VMXML.get_disk_count(vm_name) else: vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name) device_count_after_shutdown = len(vm_cls.devices) if device_count_after_shutdown < device_count_before_cmd: check_count_after_shutdown = False finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if test_block_dev: libvirt.setup_or_cleanup_iscsi(False) elif os.path.exists(device_source): os.remove(device_source) elif os.path.exists(tmp_dir): os.remove(tmp_dir) # Check results. if status_error: if not status: test.fail("detach-device exit with unexpected value.") else: if status: test.fail("virsh detach-device failed.") if dt_options.count("config"): if check_count_after_shutdown: test.fail("See config detached device in " "xml file after VM shutdown.") if pre_vm_state == "shut off": if check_count_after_cmd: test.fail("See device in xml after detach with" " --config option") elif pre_vm_state == "running": if not check_vm_after_cmd and device in ['disk', 'cdrom']: test.fail("Cannot see device in VM after" " detach with '--config' option" " when VM is running.") elif dt_options.count("live"): if check_count_after_cmd: test.fail("See device in xml after detach with" "--live option") if not check_count_after_shutdown: test.fail("Cannot see config detached device in" " xml file after VM shutdown with" " '--live' option.") if check_vm_after_cmd and device in ['disk', 'cdrom']: test.fail("See device in VM with '--live' option" " when VM is running") elif dt_options.count("current"): if check_count_after_cmd: test.fail("See device in xml after detach with" " --current option") if pre_vm_state == "running": if not check_count_after_shutdown: test.fail("Cannot see config detached device in" " xml file after VM shutdown with" " '--current' option.") if check_vm_after_cmd and device in ['disk', 'cdrom']: test.fail("See device in VM with '--live'" " option when VM is running") elif dt_options.count("persistent"): if check_count_after_shutdown: test.fail("See device deattached with " "'--persistent' option after " "VM shutdown.")
def run(test, params, env): """ Test command: virsh dump. This command can dump the core of a domain to a file for analysis. 1. Positive testing 1.1 Dump domain with valid options. 1.2 Avoid file system cache when dumping. 1.3 Compress the dump images to valid/invalid formats. 2. Negative testing 2.1 Dump domain to a non-exist directory. 2.2 Dump domain with invalid option. 2.3 Dump a shut-off domain. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(vm_name) options = params.get("dump_options") dump_file = params.get("dump_file", "vm.core") dump_dir = params.get("dump_dir", data_dir.get_tmp_dir()) if os.path.dirname(dump_file) is "": dump_file = os.path.join(dump_dir, dump_file) dump_image_format = params.get("dump_image_format") start_vm = params.get("start_vm") == "yes" paused_after_start_vm = params.get("paused_after_start_vm") == "yes" status_error = params.get("status_error", "no") == "yes" check_bypass_timeout = int(params.get("check_bypass_timeout", "120")) memory_dump_format = params.get("memory_dump_format", "") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") def check_flag(file_flags): """ Check if file flag include O_DIRECT. :param file_flags: The flags of dumped file Note, O_DIRECT(direct disk access hint) is defined as: on x86_64: #define O_DIRECT 00040000 on ppc64le or arch64: #define O_DIRECT 00200000 """ arch = platform.machine() file_flag_check = int('00040000', 16) if 'ppc64' in arch or 'aarch64' in arch: file_flag_check = int('00200000', 16) if int(file_flags, 16) & file_flag_check == file_flag_check: logging.info("File flags include O_DIRECT") return True else: logging.error("File flags doesn't include O_DIRECT") return False def check_bypass(dump_file, result_dict): """ Get the file flags of domain core dump file and check it. """ error = '' cmd1 = "lsof -w %s" % dump_file while True: if not os.path.exists(dump_file) or process.system(cmd1): time.sleep(0.1) continue cmd2 = ("cat /proc/$(%s |awk '/libvirt_i/{print $2}')/fdinfo/1" "|grep flags|awk '{print $NF}'" % cmd1) ret = process.run(cmd2, allow_output_check='combined', shell=True) status, output = ret.exit_status, ret.stdout_text.strip() if status: error = "Fail to get the flags of dumped file" logging.error(error) break if not len(output): continue try: logging.debug("The flag of dumped file: %s", output) if check_flag(output): logging.info("Bypass file system cache " "successfully when dumping") break else: error = "Bypass file system cache fail when dumping" logging.error(error) break except (ValueError, IndexError) as detail: error = detail logging.error(error) break result_dict['bypass'] = error def check_domstate(actual, options): """ Check the domain status according to dump options. """ if options.find('live') >= 0: domstate = "running" if options.find('crash') >= 0 or options.find('reset') > 0: domstate = "running" if paused_after_start_vm: domstate = "paused" elif options.find('crash') >= 0: domstate = "shut off" if options.find('reset') >= 0: domstate = "running" elif options.find('reset') >= 0: domstate = "running" if paused_after_start_vm: domstate = "paused" else: domstate = "running" if paused_after_start_vm: domstate = "paused" if not start_vm: domstate = "shut off" logging.debug("Domain should %s after run dump %s", domstate, options) return (domstate == actual) def check_dump_format(dump_image_format, dump_file): """ Check the format of dumped file. If 'dump_image_format' is not specified or invalid in qemu.conf, then the file shoule be normal raw file, otherwise it shoud be compress to specified format, the supported compress format including: lzop, gzip, bzip2, and xz. For memory-only dump, the default dump format is ELF, and it can also specify format by --format option, the result could be 'elf' or 'data'. """ valid_format = ["lzop", "gzip", "bzip2", "xz", 'elf', 'data'] if len(dump_image_format) == 0 or dump_image_format not in valid_format: logging.debug("No need check the dumped file format") return True else: file_cmd = "file %s" % dump_file ret = process.run(file_cmd, allow_output_check='combined', shell=True) status, output = ret.exit_status, ret.stdout_text.strip() if status: logging.error("Fail to check dumped file %s", dump_file) return False logging.debug("Run file %s output: %s", dump_file, output) actual_format = output.split(" ")[1] if actual_format.lower() != dump_image_format.lower(): logging.error("Compress dumped file to %s fail: %s" % (dump_image_format, actual_format)) return False else: return True # Configure dump_image_format in /etc/libvirt/qemu.conf. qemu_config = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() # Install lsof pkg if not installed if not utils_package.package_install("lsof"): test.cancel("Failed to install lsof in host\n") if len(dump_image_format): qemu_config.dump_image_format = dump_image_format libvirtd.restart() # Deal with memory-only dump format if len(memory_dump_format): # Make sure libvirt support this option if virsh.has_command_help_match("dump", "--format") is None: test.cancel("Current libvirt version doesn't support" " --format option for dump command") # Make sure QEMU support this format query_cmd = '{"execute":"query-dump-guest-memory-capability"}' qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout if (memory_dump_format not in qemu_capa) and not status_error: test.cancel("Unsupported dump format '%s' for" " this QEMU binary" % memory_dump_format) options += " --format %s" % memory_dump_format if memory_dump_format == 'elf': dump_image_format = 'elf' if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']: dump_image_format = 'data' # Back up xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() dump_guest_core = params.get("dump_guest_core", "") if dump_guest_core not in ["", "on", "off"]: test.error("invalid dumpCore value: %s" % dump_guest_core) try: # Set dumpCore in guest xml if dump_guest_core: if vm.is_alive(): vm.destroy(gracefully=False) vmxml.dumpcore = dump_guest_core vmxml.sync() vm.start() # check qemu-kvm cmdline vm_pid = vm.get_pid() cmd = "cat /proc/%d/cmdline|xargs -0 echo" % vm_pid cmd += "|grep dump-guest-core=%s" % dump_guest_core result = process.run(cmd, ignore_status=True, shell=True) logging.debug("cmdline: %s" % result.stdout_text) if result.exit_status: test.fail("Not find dump-guest-core=%s in qemu cmdline" % dump_guest_core) else: logging.info("Find dump-guest-core=%s in qemum cmdline", dump_guest_core) # Deal with bypass-cache option if options.find('bypass-cache') >= 0: vm.wait_for_login() result_dict = multiprocessing.Manager().dict() child_process = multiprocessing.Process(target=check_bypass, args=(dump_file, result_dict)) child_process.start() # Run virsh command cmd_result = virsh.dump(vm_name, dump_file, options, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status = cmd_result.exit_status if 'child_process' in locals(): child_process.join(timeout=check_bypass_timeout) params['bypass'] = result_dict['bypass'] logging.info("Start check result") if not check_domstate(vm.state(), options): test.fail("Domain status check fail.") if status_error: if not status: test.fail("Expect fail, but run successfully") else: if status: test.fail("Expect succeed, but run fail") if not os.path.exists(dump_file): test.fail("Fail to find domain dumped file.") if check_dump_format(dump_image_format, dump_file): logging.info("Successfully dump domain to %s", dump_file) else: test.fail("The format of dumped file is wrong.") if params.get('bypass'): test.fail(params['bypass']) finally: backup_xml.sync() qemu_config.restore() libvirtd.restart() if os.path.isfile(dump_file): os.remove(dump_file)
def run(test, params, env): """ Test command: virsh managedsave. This command can save and destroy a running domain, so it can be restarted from the same state at a later time. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # define function def vm_recover_check(guest_name, option): """ Check if the vm can be recovered correctly. :param guest_name : Checked vm's name. :param option : managedsave command option. """ # This time vm not be shut down if vm.is_alive(): raise error.TestFail("Guest should be inactive") virsh.start(guest_name) # This time vm should be in the list if vm.is_dead(): raise error.TestFail("Guest should be active") if option: if option.count("running"): if vm.is_dead() or vm.is_paused(): raise error.TestFail("Guest state should be" " running after started" " because of '--running' option") elif option.count("paused"): if not vm.is_paused(): raise error.TestFail("Guest state should be" " paused after started" " because of '--paused' option") else: if params.get("paused_after_start_vm") == "yes": if not vm.is_paused(): raise error.TestFail("Guest state should be" " paused after started" " because of initia guest state") domid = vm.get_id() domuuid = vm.get_uuid() status_error = ("yes" == params.get("status_error", "no")) vm_ref = params.get("managedsave_vm_ref") libvirtd = params.get("libvirtd", "on") extra_param = params.get("managedsave_extra_param", "") progress = ("yes" == params.get("managedsave_progress", "no")) option = params.get("managedsave_option", "") if option: if not virsh.has_command_help_match('managedsave', option): # Older libvirt does not have this option raise error.TestNAError("Older libvirt does not" " handle arguments consistently") # run test case if vm_ref == "id": vm_ref = domid elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.count("invalid"): vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name # stop the libvirtd service if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Ignore exception with "ignore_status=True" if progress: option += " --verbose" option += extra_param ret = virsh.managedsave(vm_ref, options=option, ignore_status=True) status = ret.exit_status # The progress information outputed in error message error_msg = ret.stderr.strip() # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # check status_error try: if status_error: if not status: raise error.TestFail("Run successfully with wrong command!") else: if status: raise error.TestFail("Run failed with right command") if progress: if not error_msg.count("Managedsave:"): raise error.TestFail("Got invalid progress output") vm_recover_check(vm_name, option) finally: if vm.is_paused(): virsh.resume(vm_name)
def run(test, params, env): """ Test the command virsh memtune 1) To get the current memtune parameters 2) Change the parameter values 3) Check the memtune query updated with the values 4) Check whether the mounted cgroup path gets the updated value 5) Check the output of virsh dumpxml 6) Check vm is alive """ # Check for memtune command is available in the libvirt version under test if not virsh.has_help_command("memtune"): test.cancel( "Memtune not available in this libvirt version") # Check if memtune options are supported for option in memtune_types: if not virsh.has_command_help_match("memtune", option): test.cancel("%s option not available in memtune " "cmd in this libvirt version" % option) # Get common parameters acceptable_minus = int(params.get("acceptable_minus", 8)) step_mem = params.get("mt_step_mem", "no") == "yes" expect_error = params.get("expect_error", "no") == "yes" restart_libvirtd = params.get("restart_libvirtd", "no") == "yes" set_one_line = params.get("set_in_one_command", "no") == "yes" mt_hard_limit = params.get("mt_hard_limit", None) mt_soft_limit = params.get("mt_soft_limit", None) mt_swap_hard_limit = params.get("mt_swap_hard_limit", None) # if restart_libvirtd is True, set set_one_line is True set_one_line = True if restart_libvirtd else set_one_line # Get the vm name, pid of vm and check for alive vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() # Resolve the memory cgroup path for a domain path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory") # step_mem is used to do step increment limit testing if step_mem: mem_step(params, path, vm, test, acceptable_minus) return if not set_one_line: # Set one type memtune limit in one command if mt_hard_limit: index = 0 mt_limit = mt_hard_limit elif mt_soft_limit: index = 1 mt_limit = mt_soft_limit elif mt_swap_hard_limit: index = 2 mt_limit = mt_swap_hard_limit mt_type = memtune_types[index] mt_cgname = memtune_cgnames[index] options = " --%s %s --live" % (mt_type, mt_limit) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: # If limit value is negative, means no memtune limit mt_expected = mt_limit if int(mt_limit) > 0 else -1 check_limit(path, mt_expected, mt_type, mt_cgname, vm, test, acceptable_minus) else: # Set 3 limits in one command line mt_limits = [mt_hard_limit, mt_soft_limit, mt_swap_hard_limit] options = " %s --live" % ' '.join(mt_limits) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: check_limits(path, mt_limits, vm, test, acceptable_minus) if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if not expect_error: # After libvirtd restared, check memtune values again check_limits(path, mt_limits, vm, test, acceptable_minus)
def run(test, params, env): """ Test command: virsh net-dumpxml. This command can output the network information as an XML dump to stdout. 1.Get all parameters from config file. 2.If test case's network status is inactive, destroy it. 3.Perform virsh net-dumpxml operation. 4.Recover test environment(network status). 5.Confirm the test result. """ status_error = params.get("status_error", "no") net_ref = params.get("net_dumpxml_net_ref") net_name = params.get("net_dumpxml_network", "default") net_status = params.get("net_dumpxml_network_status", "active") xml_flie = params.get("net_dumpxml_xml_file", "default.xml") extra = params.get("net_dumpxml_extra", "") network_xml = os.path.join(test.tmpdir, xml_flie) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(net_name).stdout.strip() elif net_ref == "name": net_ref = net_name net_status_current = "active" if not virsh.net_state_dict()[net_name]['active']: net_status_current = "inactive" if not virsh.net_state_dict()[net_name]['persistent']: raise error.TestError("Network is transient!") try: if net_status == "inactive" and net_status_current == "active": status_destroy = virsh.net_destroy(net_name, ignore_status=True).exit_status if status_destroy != 0: raise error.TestError("Network destroied failed!") result = virsh.net_dumpxml(net_ref, extra, network_xml, ignore_status=True) status = result.exit_status err = result.stderr.strip() xml_validate_cmd = "virt-xml-validate %s network" % network_xml valid_s = utils.run(xml_validate_cmd, ignore_status=True).exit_status # Check option valid or not. if extra.find("--") != -1: options = extra.split("--") for option in options: if option.strip() == "": continue if not virsh.has_command_help_match("net-dumpxml", option.strip()): status_error = "yes" break finally: # Recover network if net_status == "inactive" and net_status_current == "active": status_start = virsh.net_start(net_name, ignore_status=True).exit_status if status_start != 0: raise error.TestError("Network started failed!") # Check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") if err == "": raise error.TestFail("The wrong command has no error outputed!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command!") if valid_s != 0: raise error.TestFail("Command output is invalid!") else: raise error.TestError("The status_error must be 'yes' or 'no'!")
shared_storage = params.get("migrate_shared_storage", "") # use default image jeos-23-64 if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] shared_storage = params.get("nfs_mount_dir") shared_storage += ('/' + default_guest_asset + '.qcow2') options = params.get("virsh_migrate_options") # Direct migration is supported only for Xen in libvirt if options.count("direct") or extra.count("direct"): if params.get("driver_type") is not "xen": raise error.TestNAError("Direct migration is supported only for " "Xen in libvirt.") if (options.count("compressed") and not virsh.has_command_help_match("migrate", "--compressed")): raise error.TestNAError("Do not support compressed option " "on this version.") if (options.count("graphicsuri") and not virsh.has_command_help_match("migrate", "--graphicsuri")): raise error.TestNAError("Do not support 'graphicsuri' option" "on this version.") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") graphics_server = params.get("graphics_server") if graphics_server: try: remote_viewer_executable = path.find_command('remote-viewer')
def run(test, params, env): """ Test command: virsh setvcpu. The command can change the number of virtual CPUs in the guest domain. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh setvcpu operation. 3. Check in the following places vcpuinfo vcpupin vcpucount inside guest 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("setvcpu_pre_vm_state") options = params.get("setvcpu_options") vm_ref = params.get("setvcpu_vm_ref", "name") current_vcpu = int(params.get("setvcpu_current", "2")) vcpu_list_format = params.get("setvcpu_list_format", "comma") iteration = int(params.get("setvcpu_iteration", 1)) invalid_vcpulist = params.get("invalid_vcpulist", "") convert_err = "Can't convert {0} to integer type" unsupport_str = params.get("unsupport_str", "") try: current_vcpu = int(params.get("setvcpu_current", "1")) except ValueError: test.cancel(convert_err.format(current_vcpu)) try: max_vcpu = int(params.get("setvcpu_max", "4")) except ValueError: test.cancel(convert_err.format(max_vcpu)) extra_param = params.get("setvcpu_extra_param") status_error = params.get("status_error") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") set_topology = "yes" == params.get("set_topology", "no") sockets = int(params.get("topology_sockets", '1')) cores = int(params.get("topology_cores", '4')) threads = int(params.get("topology_threads", '1')) start_vm_after_set = "yes" == params.get("start_vm_after_set", "no") iteration = int(params.get("hotplug_iteration", "1")) # Early death option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match("setvcpu", item) is None: test.cancel("The current libvirt " "version doesn't support " "'%s' option" % item) # Calculate count options vcpu_list = [] # Init expect vcpu count values exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu, 'cur_config': current_vcpu, 'cur_live': current_vcpu, 'guest_live': current_vcpu} def set_expected(vm, options, enabled=True): """ Set the expected vcpu numbers :param vm: vm object :param options: setvcpu options :param enabled: True or False base on enable or disable """ if enabled: if ("config" in options) or ("current" in options and vm.is_dead()): exp_vcpu['cur_config'] += threads elif ("live" in options) or ("current" in options and vm.is_alive()): exp_vcpu['cur_live'] += threads exp_vcpu['guest_live'] += threads else: # when none given it defaults to live exp_vcpu['cur_live'] += threads exp_vcpu['guest_live'] += threads else: if ("--config" in options) or ("--current" in options and vm.is_dead()): exp_vcpu['cur_config'] -= threads elif ("--live" in options) or ("--current" in options and vm.is_alive()): exp_vcpu['cur_live'] -= threads exp_vcpu['guest_live'] -= threads else: # when none given it defaults to live exp_vcpu['cur_live'] -= threads exp_vcpu['guest_live'] -= threads if threads > 1: start_vcpu = current_vcpu else: start_vcpu = current_vcpu + 1 if 'hypen' in vcpu_list_format: for vcpu_start in range(start_vcpu, max_vcpu, threads): if int(threads) > 1: lst = "%s-%s" % ( str(vcpu_start), str(vcpu_start + threads - 1)) else: lst = vcpu_start vcpu_list.append(lst) elif 'comma' in vcpu_list_format: for vcpu_start in range(start_vcpu, max_vcpu, threads): if int(threads) > 1: lst = '' for idx in range(vcpu_start, vcpu_start + threads): lst += "%s," % idx else: lst = vcpu_start vcpu_list.append(lst.strip(',')) else: pass # Early death if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM")): test.cancel("remote/local ip parameters not set.") # Save original configuration vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() # Run test try: if vm.is_alive(): vm.destroy() # Set cpu topology if set_topology: vmxml.set_vm_vcpus(vm.name, max_vcpu, current_vcpu, sockets=sockets, cores=cores, threads=threads, add_topology=True) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("Pre-test xml is %s", vmxml.xmltreefile) if not vm.is_alive(): vm.start() vm.wait_for_login() domid = vm.get_id() # only valid for running domuuid = vm.get_uuid() if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shut off" and vm.is_alive(): vm.destroy() setvcpu_exit_status = 0 setvcpu_exit_stderr = '' # TODO: Run remote test,for future use if vm_ref == "remote": pass # Run local test else: if vm_ref == "name": dom_option = vm_name elif vm_ref == "id": dom_option = domid if params.get("setvcpu_hex_id") is not None: dom_option = hex(int(domid)) elif params.get("setvcpu_invalid_id") is not None: dom_option = params.get("setvcpu_invalid_id") elif vm_ref == "uuid": dom_option = domuuid if params.get("setvcpu_invalid_uuid") is not None: dom_option = params.get("setvcpu_invalid_uuid") else: dom_option = vm_ref for itr in range(iteration): if extra_param: vcpu_list_option = "%s %s" % (vcpu_list[itr], extra_param) elif invalid_vcpulist != "": vcpu_list_option = invalid_vcpulist else: vcpu_list_option = vcpu_list[itr] if 'enable' in options: status = virsh.setvcpu( dom_option, vcpu_list_option, options, ignore_status=True, debug=True) # Preserve the first failure if status.exit_status != 0: setvcpu_exit_status = status.exit_status # Accumulate the error strings setvcpu_exit_stderr += "itr-%d-enable: %s\n" % (itr, status.stderr.strip()) set_expected(vm, options, True) elif 'disable' in options: # disable needs a hotpluggable cpus, lets make sure we have if status_error != "yes": options_enable = options.replace("disable", "enable") virsh.setvcpu(dom_option, vcpu_list_option, options_enable, ignore_status=False, debug=True) # Adjust the expected vcpus set_expected(vm, options, True) status = virsh.setvcpu( dom_option, vcpu_list_option, options, ignore_status=True, debug=True) unsupport_str = cpu.vcpuhotunplug_unsupport_str() if unsupport_str and (unsupport_str in status.stderr): test.cancel("Vcpu hotunplug is not supported in this host:" "\n%s" % status.stderr) # Preserve the first failure if status.exit_status != 0: setvcpu_exit_status = status.exit_status # Accumulate the error strings setvcpu_exit_stderr += "itr-%d-disable: %s\n" % (itr, status.stderr.strip()) # Adjust the expected vcpus set_expected(vm, options, False) # Handle error cases else: status = virsh.setvcpu(dom_option, vcpu_list_option, options, ignore_status=True, debug=True) # Preserve the first failure if status.exit_status != 0: setvcpu_exit_status = status.exit_status # Accumulate the error strings setvcpu_exit_stderr += "itr-%d-error: %s\n" % (itr, status.stderr.strip()) # Start VM after set vcpu if start_vm_after_set: if "--enable" in options: if "--config" in options or "--current" in options: exp_vcpu['cur_live'] = exp_vcpu['cur_config'] if "--disable" in options: if "--config" in options or "--current" in options: exp_vcpu['cur_live'] = exp_vcpu['cur_config'] if vm.is_alive(): logging.debug("VM already started") else: result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) # Lets validate the result in positive cases if status_error != "yes": result = cpu.check_vcpu_value(vm, exp_vcpu, option=options) finally: if pre_vm_state == "paused": virsh.resume(vm_name, ignore_status=True) orig_config_xml.sync() # check status_error if status_error == "yes": if setvcpu_exit_status == 0: test.fail("Run successfully with wrong command!") else: if setvcpu_exit_status != 0: # Otherwise, it seems we have a real error test.fail("Run failed with right command" " stderr=%s" % setvcpu_exit_stderr) else: if not result: test.fail("Test Failed")
def run(test, params, env): """ Test the command virsh memtune 1) To get the current memtune parameters 2) Change the parameter values 3) Check the memtune query updated with the values 4) Check whether the mounted cgroup path gets the updated value 5) Check the output of virsh dumpxml 6) Check vm is alive """ # Check for memtune command is available in the libvirt version under test if not virsh.has_help_command("memtune"): test.cancel("Memtune not available in this libvirt version") # Check if memtune options are supported for option in memtune_types: option = re.sub('_', '-', option) if not virsh.has_command_help_match("memtune", option): test.cancel("%s option not available in memtune " "cmd in this libvirt version" % option) # Get common parameters acceptable_minus = int(utils_memory.getpagesize() - 1) step_mem = params.get("mt_step_mem", "no") == "yes" expect_error = params.get("expect_error", "no") == "yes" restart_libvirtd = params.get("restart_libvirtd", "no") == "yes" set_one_line = params.get("set_in_one_command", "no") == "yes" mt_hard_limit = params.get("mt_hard_limit", None) mt_soft_limit = params.get("mt_soft_limit", None) mt_swap_hard_limit = params.get("mt_swap_hard_limit", None) # if restart_libvirtd is True, set set_one_line is True set_one_line = True if restart_libvirtd else set_one_line # Get the vm name, pid of vm and check for alive vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() # Resolve the memory cgroup path for a domain cgtest = libvirt_cgroup.CgroupTest(pid) path = cgtest.get_cgroup_path("memory") logging.debug("cgroup path is %s", path) global mem_cgroup_info mem_cgroup_info = cgtest.get_cgroup_file_mapping(virsh_cmd='memtune') logging.debug("memtune cgroup info is %s", mem_cgroup_info) # step_mem is used to do step increment limit testing if step_mem: mem_step(params, path, vm, test, acceptable_minus) return if not set_one_line: # Set one type memtune limit in one command if mt_hard_limit: index = 0 mt_limit = mt_hard_limit elif mt_soft_limit: index = 1 mt_limit = mt_soft_limit elif mt_swap_hard_limit: index = 2 mt_limit = mt_swap_hard_limit mt_type = memtune_types[index] mt_cgname = mem_cgroup_info[mt_type] options = " --%s %s --live" % (re.sub('_', '-', mt_type), mt_limit) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: # If limit value is negative, means no memtune limit mt_expected = mt_limit if int(mt_limit) > 0 else -1 check_limit(path, mt_expected, mt_type, mt_cgname, vm, test, acceptable_minus) else: # Set 3 limits in one command line mt_limits = [mt_hard_limit, mt_soft_limit, mt_swap_hard_limit] options = " %s --live" % ' '.join(mt_limits) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: check_limits(path, mt_limits, vm, test, acceptable_minus) if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if not expect_error: # After libvirtd restared, check memtune values again check_limits(path, mt_limits, vm, test, acceptable_minus)
def run(test, params, env): """ Test command: virsh reboot. Run a reboot command in the target domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh reboot operation. 4.Recover test environment.(libvirts service) 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # run test case libvirtd = params.get("libvirtd", "on") vm_ref = params.get("reboot_vm_ref") status_error = ("yes" == params.get("status_error")) extra = params.get("reboot_extra") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "password") agent = ("yes" == params.get("reboot_agent", "no")) mode = params.get("reboot_mode", "") pre_domian_status = params.get("reboot_pre_domian_status", "running") xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Add or remove qemu-agent from guest before test if agent: vm_xml.VMXML.set_agent_channel(vm_name) else: vm_xml.VMXML.remove_agent_channel(vm_name) virsh.start(vm_name) guest_session = vm.wait_for_login() if agent: if guest_session.cmd_status("which qemu-ga"): raise error.TestNAError("Cannot execute this test for domain" " doesn't have qemu-ga command!") # check if the qemu-guest-agent is active or not firstly stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: s, o = guest_session.cmd_status_output("qemu-ga -d") if s != 0: raise error.TestError("'qemu-ga -d' failed.\noutput:%s" % o) stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga") guest_session.close() if stat_ps: raise error.TestError("Fail to start qemu-guest-agent!") if pre_domian_status == "shutoff": virsh.destroy(vm_name) if libvirtd == "off": utils_libvirtd.libvirtd_stop() domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "remote_name": if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): raise error.TestNAError("remote_ip and/or local_ip parameters" " not changed from default values") complete_uri = libvirt_vm.complete_uri(local_ip) try: session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name, mode) status, output = session.cmd_status_output(command, internal_timeout=5) session.close() # FIXME: Catch specific exception except Exception, detail: logging.error("Exception: %s", str(detail)) status = -1 if vm_ref != "remote_name": vm_ref = "%s %s" % (vm_ref, extra) cmdresult = virsh.reboot(vm_ref, mode, ignore_status=True, debug=True) status = cmdresult.exit_status if status: logging.debug("Error status, cmd error: %s", cmdresult.stderr) if not virsh.has_command_help_match('reboot', '\s+--mode\s+'): # old libvirt doesn't support reboot status = -2 output = virsh.dom_list(ignore_status=True).stdout.strip() # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # check status_error if status_error: if not status: raise error.TestFail("Run successfully with wrong command!") else: if status or (not re.search(vm_name, output)): if status == -2: raise error.TestNAError( "Reboot command doesn't work on older libvirt versions") raise error.TestFail("Run failed with right command")
def run(test, params, env): """ Test command: virsh setvcpus. The command can change the number of virtual CPUs in the guest domain. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh setvcpus operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("setvcpus_pre_vm_state") command = params.get("setvcpus_command", "setvcpus") options = params.get("setvcpus_options") vm_ref = params.get("setvcpus_vm_ref", "name") count = params.get("setvcpus_count", "") convert_err = "Can't convert {0} to integer type" try: count = int(count) except ValueError: # 'count' may not invalid number in negative tests logging.debug(convert_err.format(count)) current_vcpu = int(params.get("setvcpus_current", "1")) try: current_vcpu = int(current_vcpu) except ValueError: raise error.TestError(convert_err.format(current_vcpu)) max_vcpu = int(params.get("setvcpus_max", "4")) try: max_vcpu = int(max_vcpu) except ValueError: raise error.TestError(convert_err.format(max_vcpu)) extra_param = params.get("setvcpus_extra_param") count_option = "%s %s" % (count, extra_param) status_error = params.get("status_error") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") remote_prompt = params.get("remote_prompt", "#") tmpxml = os.path.join(test.tmpdir, "tmp.xml") set_topology = "yes" == params.get("set_topology", "no") sockets = params.get("topology_sockets") cores = params.get("topology_cores") threads = params.get("topology_threads") start_vm_after_set = "yes" == params.get("start_vm_after_set", "no") start_vm_expect_fail = "yes" == params.get("start_vm_expect_fail", "no") remove_vm_feature = params.get("remove_vm_feature", "") # Early death if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM")): raise error.TestNAError("remote/local ip parameters not set.") # Save original configuration vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() # Normal processing of the test is to set the maximum vcpu count to 4, # and set the current vcpu count to 1, then adjust the 'count' value to # plug or unplug vcpus. # # This is generally fine when the guest is not running; however, the # hotswap functionality hasn't always worked very well and is under # going lots of change from using the hmp "cpu_set" command in 1.5 # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command # seems to have been deprecated making things very messy. # # To further muddy the waters, the "cpu-add" functionality is supported # for specific machine type versions. For the purposes of this test that # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which # version of qemu/kvm was used to initially create/generate the XML for # the machine this could result in a newer qemu still using 1.4 or earlier # for the machine type. # try: if vm.is_alive(): vm.destroy() # Set maximum vcpus, so we can run all kinds of normal tests without # encounter requested vcpus greater than max allowable vcpus error vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu) # Get the number of cpus, current value if set, and machine type orig_count, orig_current, mtype = get_xmldata(vm_name, tmpxml, options) logging.debug("Before run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", orig_count, orig_current, mtype) # Set cpu topology if set_topology: vmcpu_xml = vm_xml.VMCPUXML() vmcpu_xml["topology"] = {"sockets": sockets, "cores": cores, "threads": threads} vmxml["cpu"] = vmcpu_xml vmxml.sync() # Remove vm features if remove_vm_feature: vmfeature_xml = vmxml["features"] vmfeature_xml.remove_feature(remove_vm_feature) vmxml["features"] = vmfeature_xml vmxml.sync() # Restart, unless that's not our test if not vm.is_alive(): vm.start() vm.wait_for_login() if orig_count == 1 and count == 1: logging.debug("Original vCPU count is 1, just checking if setvcpus " "can still set current.") domid = vm.get_id() # only valid for running domuuid = vm.get_uuid() if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shut off" and vm.is_alive(): vm.destroy() # Run test if vm_ref == "remote": (setvcpu_exit_status, status_error, setvcpu_exit_stderr) = remote_test( remote_ip, local_ip, remote_pwd, remote_prompt, vm_name, status_error ) else: if vm_ref == "name": dom_option = vm_name elif vm_ref == "id": dom_option = domid if params.get("setvcpus_hex_id") is not None: dom_option = hex(int(domid)) elif params.get("setvcpus_invalid_id") is not None: dom_option = params.get("setvcpus_invalid_id") elif vm_ref == "uuid": dom_option = domuuid if params.get("setvcpus_invalid_uuid") is not None: dom_option = params.get("setvcpus_invalid_uuid") else: dom_option = vm_ref option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) is None: raise error.TestNAError("The current libvirt version" " doesn't support '%s' option" % item) status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True, debug=True) setvcpu_exit_status = status.exit_status setvcpu_exit_stderr = status.stderr.strip() # Start VM after set vcpu if start_vm_after_set: if vm.is_alive(): logging.debug("VM already started") else: result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result, start_vm_expect_fail) finally: new_count, new_current, mtype = get_xmldata(vm_name, tmpxml, options) logging.debug("After run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", new_count, new_current, mtype) # Cleanup if pre_vm_state == "paused": virsh.resume(vm_name, ignore_status=True) orig_config_xml.sync() if os.path.exists(tmpxml): os.remove(tmpxml) # check status_error if status_error == "yes": if setvcpu_exit_status == 0: # RHEL7/Fedora has a bug(BZ#1000354) against qemu-kvm, so throw the # bug info here if remove_vm_feature: logging.error("You may encounter bug: " "https://bugzilla.redhat.com/show_bug.cgi?id=1000354") raise error.TestFail("Run successfully with wrong command!") else: if setvcpu_exit_status != 0: # setvcpu/hotplug is only available as of qemu 1.5 and it's still # evolving. In general the addition of vcpu's may use the QMP # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands. # The removal of vcpu's may work in qemu 1.5 due to how cpu_set # can set vcpus online or offline; however, there doesn't appear # to be a complementary cpu-del feature yet, so we can add, but # not delete in 1.6. # A 1.6 qemu will not allow the cpu-add command to be run on # a configuration using <os> machine property 1.4 or earlier. # That is the XML <os> element with the <type> property having # an attribute 'machine' which is a tuple of 3 elements separated # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5". if re.search("unable to execute QEMU command 'cpu-add'", setvcpu_exit_stderr): raise error.TestNAError("guest <os> machine property '%s' " "may be too old to allow hotplug.", mtype) # A qemu older than 1.5 or an unplug for 1.6 will result in # the following failure. In general, any time libvirt determines # it cannot support adding or removing a vCPU... if re.search("cannot change vcpu count of this domain", setvcpu_exit_stderr): raise error.TestNAError("virsh setvcpu hotplug unsupported, " " mtype=%s" % mtype) # Otherwise, it seems we have a real error raise error.TestFail("Run failed with right command mtype=%s" " stderr=%s" % (mtype, setvcpu_exit_stderr)) else: if "--maximum" in options: if new_count != count: raise error.TestFail("Changing guest maximum vcpus failed" " while virsh command return 0") else: if new_current != count: raise error.TestFail("Changing guest current vcpus failed" " while virsh command return 0")
def run_virsh_managedsave(test, params, env): """ Test command: virsh managedsave. This command can save and destroy a running domain, so it can be restarted from the same state at a later time. """ vm_name = params.get("main_vm") vm = env.get_vm(params["main_vm"]) #define function def vm_recover_check(guest_name): """ Check if the vm can be recovered correctly. @param: guest_name : Checked vm's name. """ ret = virsh.dom_list() #This time vm should not be in the list if re.search(guest_name, ret.stdout): raise error.TestFail("virsh list output invalid") virsh.start(guest_name) if params.get("paused_after_start_vm") == "yes": virsh.resume(guest_name) #This time vm should be in the list ret = virsh.dom_list() if not re.search(guest_name, ret.stdout): raise error.TestFail("virsh list output invalid") domid = vm.get_id() domuuid = vm.get_uuid() libvirtd = params.get("managedsave_libvirtd","on") #run test case vm_ref = params.get("managedsave_vm_ref") if vm_ref == "id": vm_ref = domid elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "managedsave_invalid_id" or\ vm_ref == "managedsave_invalid_uuid": vm_ref = params.get(vm_ref) elif vm_ref == "name" or vm_ref == "extra_parame": vm_ref = "%s %s" % (vm_name, params.get("managedsave_extra_parame")) #stop the libvirtd service if libvirtd == "off": libvirt_vm.libvirtd_stop() #Ignore exception with "ignore_status=True" ret = virsh.managedsave(vm_ref, ignore_status=True) status = ret.exit_status #recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() #check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if not virsh.has_command_help_match('managedsave', r'\s+--running\s+'): # Older libvirt does not have --running parameter raise error.TestNAError("Older libvirt does not handle arguments consistently") else: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") vm_recover_check(vm_name)
def run(test, params, env): """ Test command: virsh net-dumpxml. This command can output the network information as an XML dump to stdout. 1.Get all parameters from config file. 2.If test case's network status is inactive, destroy it. 3.Perform virsh net-dumpxml operation. 4.Recover test environment(network status). 5.Confirm the test result. """ status_error = params.get("status_error", "no") net_ref = params.get("net_dumpxml_net_ref") net_name = params.get("net_dumpxml_network", "default") net_status = params.get("net_dumpxml_network_status", "active") xml_flie = params.get("net_dumpxml_xml_file", "default.xml") extra = params.get("net_dumpxml_extra", "") network_xml = os.path.join(data_dir.get_tmp_dir(), xml_flie) # acl polkit params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(net_name).stdout.strip() elif net_ref == "name": net_ref = net_name net_status_current = "active" if not virsh.net_state_dict()[net_name]['active']: net_status_current = "inactive" if not virsh.net_state_dict()[net_name]['persistent']: test.error("Network is transient!") try: if net_status == "inactive" and net_status_current == "active": status_destroy = virsh.net_destroy(net_name, ignore_status=True).exit_status if status_destroy != 0: test.error("Network destroied failed!") virsh_dargs = {'ignore_status': True} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri result = virsh.net_dumpxml(net_ref, extra, network_xml, **virsh_dargs) status = result.exit_status err = result.stderr.strip() xml_validate_cmd = "virt-xml-validate %s network" % network_xml valid_s = process.run(xml_validate_cmd, ignore_status=True, shell=True).exit_status # Check option valid or not. if extra.find("--") != -1: options = extra.split("--") for option in options: if option.strip() == "": continue if not virsh.has_command_help_match("net-dumpxml", option.strip()) and\ status_error == "no": test.cancel("The current libvirt version" " doesn't support '%s' option" % option.strip()) finally: # Recover network if net_status == "inactive" and net_status_current == "active": status_start = virsh.net_start(net_name, ignore_status=True).exit_status if status_start != 0: test.error("Network started failed!") # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") if err == "": test.fail("The wrong command has no error outputed!") elif status_error == "no": if status != 0: test.fail("Run failed with right command!") if valid_s != 0: test.fail("Command output is invalid!") else: test.error("The status_error must be 'yes' or 'no'!")
def run_virsh_net_dumpxml(test, params, env): """ Test command: virsh net-dumpxml. This command can output the network information as an XML dump to stdout. 1.Get all parameters from config file. 2.If test case's network status is inactive, destroy it. 3.Perform virsh net-dumpxml operation. 4.Recover test environment(network status). 5.Confirm the test result. """ status_error = params.get("status_error", "no") net_ref = params.get("net_dumpxml_net_ref") net_name = params.get("net_dumpxml_network", "default") net_status = params.get("net_dumpxml_network_status", "active") xml_flie = params.get("net_dumpxml_xml_file", "default.xml") extra = params.get("net_dumpxml_extra", "") network_xml = os.path.join(test.tmpdir, xml_flie) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(net_name).stdout.strip() elif net_ref == "name": net_ref = net_name net_status_current = "active" if not virsh.net_state_dict()[net_name]['active']: net_status_current = "inactive" if not virsh.net_state_dict()[net_name]['persistent']: raise error.TestError("Network is transient!") try: if net_status == "inactive" and net_status_current == "active": status_destroy = virsh.net_destroy(net_name, ignore_status=True).exit_status if status_destroy != 0: raise error.TestError("Network destroied failed!") result = virsh.net_dumpxml(net_ref, extra, network_xml, ignore_status=True) status = result.exit_status err = result.stderr.strip() xml_validate_cmd = "virt-xml-validate %s network" % network_xml valid_s = utils.run(xml_validate_cmd, ignore_status=True).exit_status # Check option valid or not. if extra.find("--") != -1: options = extra.split("--") for option in options: if option.strip() == "": continue if not virsh.has_command_help_match("net-dumpxml", option.strip()): status_error = "yes" break finally: # Recover network if net_status == "inactive" and net_status_current == "active": status_start = virsh.net_start(net_name, ignore_status=True).exit_status if status_start != 0: raise error.TestError("Network started failed!") # Check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") if err == "": raise error.TestFail("The wrong command has no error outputed!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command!") if valid_s != 0: raise error.TestFail("Command output is invalid!") else: raise error.TestError("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test command: virsh setvcpus. The conmand can change the number of virtual CPUs in the guest domain. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh setvcpus operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) xml_file = params.get("setvcpus_xml_file", "vm.xml") virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file) tmp_file = params.get("setvcpus_tmp_file", "tmp.xml") pre_vm_state = params.get("setvcpus_pre_vm_state") command = params.get("setvcpus_command", "setvcpus") options = params.get("setvcpus_options") domain = params.get("setvcpus_domain") count = params.get("setvcpus_count") extra_param = params.get("setvcpus_extra_param") count_option = "%s %s" % (count, extra_param) status_error = params.get("status_error") def get_current_vcpus(): """ Get current vcpu number. """ vcpus_set = "" virsh.dumpxml(vm_name, extra="", to_file=tmp_file) dom = parse(tmp_file) root = dom.documentElement vcpus_2 = root.getElementsByTagName("vcpu") for n in vcpus_2: vcpus_set += n.getAttribute("current") vcpus_set = int(vcpus_set) dom.unlink() return vcpus_set if vm.is_alive(): vm.destroy() vm_xml = libvirt_xml.VMXML() vm_xml.set_vm_vcpus(vm_name, 2) vm.start() vm.wait_for_login() if status_error == "no": vcpus_new = len(vm.vcpuinfo()) domid = vm.get_id() domuuid = vm.get_uuid() if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shut off": vm.destroy() if domain == "remote_name": remote_ssh_addr = params.get("remote_ip", None) remote_addr = params.get("local_ip", None) remote_password = params.get("remote_password", None) host_type = virsh.driver() if host_type == "qemu": remote_string = "qemu+ssh://%s/system" % remote_addr elif host_type == "xen": remote_string = "xen+ssh://%s" % remote_addr command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name) if virsh.has_command_help_match(command, "--live") is None: status_error = "yes" session = remote.remote_login("ssh", remote_ssh_addr, "22", "root", remote_password, "#") session.cmd_output('LANG=C') status, output = session.cmd_status_output(command, internal_timeout=5) session.close() vcpus_current = len(vm.vcpuinfo()) else: if domain == "name": dom_option = vm_name elif domain == "id": dom_option = domid if params.get("setvcpus_hex_id") is not None: dom_option = hex(int(domid)) elif params.get("setvcpus_invalid_id") is not None: dom_option = params.get("setvcpus_invalid_id") elif domain == "uuid": dom_option = domuuid if params.get("setvcpus_invalid_uuid") is not None: dom_option = params.get("setvcpus_invalid_uuid") else: dom_option = domain option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) is None: status_error = "yes" break status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True).exit_status if pre_vm_state == "paused": virsh.resume(vm_name, ignore_status=True) if status_error == "no": if status == 0: if pre_vm_state == "shut off": if options == "--config": vcpus_set = len(vm.vcpuinfo()) elif options == "--current": vcpus_set = get_current_vcpus() elif options == "--maximum --config": vcpus_set = "" dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name) vcpus_set = dom.getElementsByTagName( "vcpu")[0].firstChild.data vcpus_set = int(vcpus_set) dom.unlink() else: vcpus_set = len(vm.vcpuinfo()) if domain == "id": cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name output1 = commands.getoutput(cmd_chk) logging.info("guest-info:\n%s" % output1) virsh.destroy(vm_name) virsh.undefine(vm_name) virsh.define(xml_file) if os.path.exists(xml_file): os.remove(xml_file) if os.path.exists(tmp_file): os.remove(tmp_file) # check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") else: if status != 0: raise error.TestFail("Run failed with right command") else: if options == "--maximum --config": if vcpus_set != 4: raise error.TestFail("Run failed with right command1") elif domain == "id": if options == "--config": if vcpus_set != vcpus_new or not re.search( '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1): raise error.TestFail("Run failed with right command2") elif options == "--config --live": if vcpus_set != 1 or not re.search( '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1): raise error.TestFail("Run failed with right command3") else: if vcpus_set != 1 or re.search( '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1): raise error.TestFail("Run failed with right command4") else: if vcpus_set != 1: raise error.TestFail("Run failed with right command5")
iface.mac_address = iface_mac_address iface.source = dict(network=iface_network) iface.model = iface_model_type iface.xmltreefile.write() shutil.copyfile(iface.xml, device_xml_file) return device_xml_file vm_ref = params.get("dt_device_vm_ref", "name") dt_options = params.get("dt_device_options", "") pre_vm_state = params.get("dt_device_pre_vm_state", "running") status_error = "yes" == params.get("status_error", 'no') no_attach = "yes" == params.get("dt_device_no_attach", 'no') os_type = params.get("os_type", "linux") device = params.get("dt_device_device", "disk") test_cmd = "detach-device" if not virsh.has_command_help_match(test_cmd, dt_options) and\ not status_error: raise error.TestNAError("Current libvirt version doesn't support '%s'" " for %s" % (dt_options, test_cmd)) # Disk specific attributes. device_source_name = params.get("dt_device_device_source", "attach.img") device_target = params.get("dt_device_device_target", "vdd") device_bus = params.get("dt_device_bus_type") test_block_dev = "yes" == params.get("dt_device_iscsi_device", "no") # interface specific attributes. iface_network = params.get("dt_device_iface_network") iface_model_type = params.get("dt_device_iface_model_type") iface_mac_address = params.get("dt_device_iface_mac_address")
def run_virsh_update_device(test, params, env): """ Test command: virsh update-device. Update device from an XML <file>. 1.Prepare test environment.Make sure a cdrom exists in VM. If not, please attach one cdrom manually. 2.Perform virsh update-device operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) if vm.is_alive() and params.get("start_vm") == "no": vm.destroy() def create_attach_xml(update_xmlfile, source_iso): """ Create a xml file to update a device. :param update_xmlfile : Temp xml saves device information. :param source_iso : disk's source file. """ try: f = open(source_iso, 'wb') f.seek((1024 * 1024) - 1) f.write(str(0)) f.close() except IOError: raise error.TestFail("Create source_iso failed!") content = """ <disk device='cdrom' type='file'> <driver name='file'/> <source file='%s'/> <target bus='ide' dev='hdc'/> <readonly/> </disk> """ % source_iso xmlfile = open(update_xmlfile, 'w') xmlfile.write(content) xmlfile.close() def check_attach(source_file, output): """ Check attached device and disk exist or not. :param source_file : disk's source file. :param output :VM's xml information . """ dom = parseString(output) source = dom.getElementsByTagName("source") output2 = "" for n in source: output2 += n.getAttribute("file") target = dom.getElementsByTagName("target") output3 = "" for n in target: output3 += n.getAttribute("dev") dom.unlink source_iso = "%s" % source_file if not re.search(source_iso, output2): raise error.TestFail("didn't see 'attached disk") if not re.search('hdc', output3): raise error.TestFail("didn't see 'attached device") domid = vm.get_id() domuuid = vm.get_uuid() # Prepare tmp directory and files. tmp_iso = os.path.join(test.virtdir, "tmp.iso") tmp2_iso = os.path.join(test.virtdir, "tmp2.iso") update_xmlfile = os.path.join(test.tmpdir, "xml_file") # Get all parameters for configuration. flag = params.get("updatedevice_flag", "") twice = "yes" == params.get("updatedevice_twice", "no") diff_iso = params.get("updatedevice_diff_iso", "no") vm_ref = params.get("updatedevice_vm_ref", "") status_error = params.get("status_error", "no") extra = params.get("updatedevice_extra", "") create_attach_xml(update_xmlfile, tmp_iso) vm_xml = os.path.join(test.tmpdir, "vm_xml") virsh.dumpxml(vm_name, extra="", to_file=vm_xml) vmxml_before = libvirt_xml.VMXML.new_from_dumpxml(vm_name) if vm_ref == "id": vm_ref = domid if twice: virsh.update_device(domainarg=domid, filearg=update_xmlfile, ignore_status=True) if diff_iso == "yes": os.remove(update_xmlfile) create_attach_xml(update_xmlfile, tmp2_iso) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("updatedevice_invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, extra) status = virsh.update_device(domainarg=vm_ref, filearg=update_xmlfile, flagstr=flag, ignore_status=True, debug=True).exit_status output = "%s" % libvirt_xml.VMXML.new_from_dumpxml(vm_name) if params.has_key("updatedevice_diff_file"): vm_xml_after = os.path.join(test.tmpdir, "vm_xml_after") virsh.dumpxml(vm_name, extra="", to_file=vm_xml_after) vm.destroy() output_shut = "%s" % libvirt_xml.VMXML.new_from_dumpxml(vm_name) # Recover environment vm.undefine() vmxml_before.define() if os.path.exists(tmp_iso): os.remove(tmp_iso) if os.path.exists(tmp2_iso): os.remove(tmp2_iso) # Check status_error flag_list = flag.split("--") for item in flag_list: option = item.strip() if option == "": continue if virsh.has_command_help_match("update-device", option) is None: status_error = "yes" break if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") else: if flag == "--persistent" or flag == "--config": if not re.search(tmp_iso, output_shut): raise error.TestFail("virsh update-device function invalid" "didn't see 'attached device' in XML") else: if params.has_key("updatedevice_diff_file"): context_before = file(vm_xml, 'r').read().splitlines() context_after = file(vm_xml_after, 'r').read().splitlines() output_diff = difflib.Differ().compare( context_before, context_after) if not re.search(tmp_iso, "\n".join(list(output_diff))): raise error.TestFail( "virsh update-device function " "invalid; can't see 'attached device'in before/after" ) else: if re.search(tmp_iso, output_shut): raise error.TestFail( "virsh attach-device without " "--persistent/--config function invalid;can see " "'attached device'in XML") if diff_iso == "yes": check_attach(tmp2_iso, output) if vm_ref == "name": check_attach(tmp_iso, output)
def run(test, params, env): """ Test command: virsh dump. This command can dump the core of a domain to a file for analysis. 1. Positive testing 1.1 Dump domain with valid options. 1.2 Avoid file system cache when dumping. 1.3 Compress the dump images to valid/invalid formats. 2. Negative testing 2.1 Dump domain to a non-exist directory. 2.2 Dump domain with invalid option. 2.3 Dump a shut-off domain. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(vm_name) options = params.get("dump_options") dump_file = params.get("dump_file", "vm.core") if os.path.dirname(dump_file) is "": dump_file = os.path.join(test.tmpdir, dump_file) dump_image_format = params.get("dump_image_format") start_vm = params.get("start_vm") == "yes" paused_after_start_vm = params.get("paused_after_start_vm") == "yes" status_error = params.get("status_error", "no") == "yes" timeout = int(params.get("timeout", "5")) memory_dump_format = params.get("memory_dump_format", "") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") def check_domstate(actual, options): """ Check the domain status according to dump options. """ if options.find('live') >= 0: domstate = "running" if options.find('crash') >= 0 or options.find('reset') > 0: domstate = "running" if paused_after_start_vm: domstate = "paused" elif options.find('crash') >= 0: domstate = "shut off" if options.find('reset') >= 0: domstate = "running" elif options.find('reset') >= 0: domstate = "running" if paused_after_start_vm: domstate = "paused" else: domstate = "running" if paused_after_start_vm: domstate = "paused" if not start_vm: domstate = "shut off" logging.debug("Domain should %s after run dump %s", domstate, options) return (domstate == actual) def check_dump_format(dump_image_format, dump_file): """ Check the format of dumped file. If 'dump_image_format' is not specified or invalid in qemu.conf, then the file shoule be normal raw file, otherwise it shoud be compress to specified format, the supported compress format including: lzop, gzip, bzip2, and xz. For memory-only dump, the default dump format is ELF, and it can also specify format by --format option, the result could be 'elf' or 'data'. """ valid_format = ["lzop", "gzip", "bzip2", "xz", 'elf', 'data'] if len(dump_image_format) == 0 or dump_image_format not in valid_format: logging.debug("No need check the dumped file format") return True else: file_cmd = "file %s" % dump_file (status, output) = commands.getstatusoutput(file_cmd) if status: logging.error("Fail to check dumped file %s", dump_file) return False logging.debug("Run file %s output: %s", dump_file, output) actual_format = output.split(" ")[1] if actual_format.lower() != dump_image_format.lower(): logging.error("Compress dumped file to %s fail: %s" % (dump_image_format, actual_format)) return False else: return True # Configure dump_image_format in /etc/libvirt/qemu.conf. qemu_config = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() if len(dump_image_format): qemu_config.dump_image_format = dump_image_format libvirtd.restart() # Deal with bypass-cache option child_pid = 0 if options.find('bypass-cache') >= 0: pid = os.fork() if pid: # Guarantee check_bypass function has run before dump child_pid = pid try: wait_pid_active(pid, timeout) finally: os.kill(child_pid, signal.SIGUSR1) else: check_bypass(dump_file) # Wait for parent process over while True: time.sleep(1) # Deal with memory-only dump format if len(memory_dump_format): # Make sure libvirt support this option if virsh.has_command_help_match("dump", "--format") is None: raise error.TestNAError("Current libvirt version doesn't support" " --format option for dump command") # Make sure QEMU support this format query_cmd = '{"execute":"query-dump-guest-memory-capability"}' qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout if (memory_dump_format not in qemu_capa) and not status_error: raise error.TestNAError("Unsupported dump format '%s' for" " this QEMU binary" % memory_dump_format) options += " --format %s" % memory_dump_format if memory_dump_format == 'elf': dump_image_format = 'elf' if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']: dump_image_format = 'data' # Back up xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() dump_guest_core = params.get("dump_guest_core", "") if dump_guest_core not in ["", "on", "off"]: raise error.TestError("invalid dumpCore value: %s" % dump_guest_core) try: # Set dumpCore in guest xml if dump_guest_core: if vm.is_alive(): vm.destroy(gracefully=False) vmxml.dumpcore = dump_guest_core vmxml.sync() vm.start() # check qemu-kvm cmdline vm_pid = vm.get_pid() cmd = "cat /proc/%d/cmdline|xargs -0 echo" % vm_pid cmd += "|grep dump-guest-core=%s" % dump_guest_core result = utils.run(cmd, ignore_status=True) logging.debug("cmdline: %s" % result.stdout) if result.exit_status: error.TestFail("Not find dump-guest-core=%s in qemu cmdline" % dump_guest_core) else: logging.info("Find dump-guest-core=%s in qemum cmdline", dump_guest_core) # Run virsh command cmd_result = virsh.dump(vm_name, dump_file, options, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status = cmd_result.exit_status logging.info("Start check result") if not check_domstate(vm.state(), options): raise error.TestFail("Domain status check fail.") if status_error: if not status: raise error.TestFail("Expect fail, but run successfully") else: if status: raise error.TestFail("Expect succeed, but run fail") if not os.path.exists(dump_file): raise error.TestFail("Fail to find domain dumped file.") if check_dump_format(dump_image_format, dump_file): logging.info("Successfully dump domain to %s", dump_file) else: raise error.TestFail("The format of dumped file is wrong.") finally: if child_pid: os.kill(child_pid, signal.SIGUSR1) if os.path.isfile(dump_file): os.remove(dump_file) if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() qemu_config.restore() libvirtd.restart()
def run(test, params, env): """ Test command: virsh managedsave. This command can save and destroy a running domain, so it can be restarted from the same state at a later time. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name shutdown_timeout = int(params.get('shutdown_timeout', 60)) # define function def vm_recover_check(option, libvirtd, check_shutdown=False): """ Check if the vm can be recovered correctly. :param guest_name : Checked vm's name. :param option : managedsave command option. """ # This time vm not be shut down if vm.is_alive(): test.fail("Guest should be inactive") # Check vm managed save state. ret = virsh.dom_list("--managed-save --inactive", debug=True) vm_state1 = re.findall(r".*%s.*" % vm_name, ret.stdout.strip())[0].split()[2] ret = virsh.dom_list("--managed-save --all", debug=True) vm_state2 = re.findall(r".*%s.*" % vm_name, ret.stdout.strip())[0].split()[2] if vm_state1 != "saved" or vm_state2 != "saved": test.fail("Guest state should be saved") virsh.start(vm_name, debug=True) # This time vm should be in the list if vm.is_dead(): test.fail("Guest should be active") # Restart libvirtd and check vm status again. libvirtd.restart() if vm.is_dead(): test.fail("Guest should be active after" " restarting libvirtd") # Check managed save file: if os.path.exists(managed_save_file): test.fail("Managed save image exist " "after starting the domain") if option: if option.count("running"): if vm.is_dead() or vm.is_paused(): test.fail("Guest state should be" " running after started" " because of '--running' option") elif option.count("paused"): if not vm.is_paused(): test.fail("Guest state should be" " paused after started" " because of '--paused' option") else: if params.get("paused_after_start_vm") == "yes": if not vm.is_paused(): test.fail("Guest state should be" " paused after started" " because of initia guest state") if check_shutdown: # Resume the domain. if vm.is_paused(): vm.resume() vm.wait_for_login() # Shutdown and start the domain, # it should be in runing state and can be login. vm.shutdown() if not vm.wait_for_shutdown(shutdown_timeout): test.fail('VM failed to shutdown') vm.start() vm.wait_for_login() def vm_undefine_check(vm_name): """ Check if vm can be undefined with manage-save option """ #backup xml file xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not os.path.exists(managed_save_file): test.fail("Can't find managed save image") #undefine domain with no options. if not virsh.undefine(vm_name, options=None, ignore_status=True).exit_status: test.fail("Guest shouldn't be undefined" "while domain managed save image exists") #undefine domain with managed-save option. if virsh.undefine(vm_name, options="--managed-save", ignore_status=True).exit_status: test.fail("Guest can't be undefine with " "managed-save option") if os.path.exists(managed_save_file): test.fail("Managed save image exists" " after undefining vm") #restore and start the vm. xml_backup.define() vm.start() def check_flags_parallel(virsh_cmd, bash_cmd, flags): """ Run the commands parallel and check the output. """ cmd = ("%s & %s" % (virsh_cmd, bash_cmd)) ret = process.run(cmd, ignore_status=True, shell=True, ignore_bg_processes=True) output = ret.stdout_text.strip() logging.debug("check flags output: %s" % output) lines = re.findall(r"flags:.(\d+)", output, re.M) logging.debug("Find all fdinfo flags: %s" % lines) lines = [int(i, 8) & flags for i in lines] if flags not in lines: test.fail("Checking flags %s failed" % flags) return ret def check_multi_guests(guests, start_delay, libvirt_guests): """ Check start_delay option for multiple guests. """ # Destroy vm first if vm.is_alive(): vm.destroy(gracefully=False) # Clone given number of guests timeout = params.get("clone_timeout", 360) for i in range(int(guests)): dst_vm = "%s_%s" % (vm_name, i) utils_libguestfs.virt_clone_cmd(vm_name, dst_vm, True, timeout=timeout) virsh.start(dst_vm, debug=True) # Wait 10 seconds for vm to start time.sleep(10) is_systemd = process.run("cat /proc/1/comm", shell=True).stdout_text.count("systemd") if is_systemd: libvirt_guests.restart() pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest.+done' else: ret = process.run("service libvirt-guests restart | \ awk '{ print strftime(\"%b %y %H:%M:%S\"), \ $0; fflush(); }'", shell=True) pattern = r'(.+ \d\d:\d\d:\d\d)+ Resuming guest.+done' # libvirt-guests status command read messages from systemd # journal, in cases of messages are not ready in time, # add a time wait here. def wait_func(): return libvirt_guests.raw_status().stdout.count("Resuming guest") utils_misc.wait_for(wait_func, 5) if is_systemd: ret = libvirt_guests.raw_status() logging.info("status output: %s", ret.stdout_text) resume_time = re.findall(pattern, ret.stdout_text, re.M) if not resume_time: test.fail("Can't see messages of resuming guest") # Convert time string to int resume_seconds = [ time.mktime(time.strptime(tm, "%b %y %H:%M:%S")) for tm in resume_time ] logging.info("Resume time in seconds: %s", resume_seconds) # Check if start_delay take effect for i in range(len(resume_seconds) - 1): if resume_seconds[i + 1] - resume_seconds[i] < int(start_delay): test.fail("Checking start_delay failed") def wait_for_state(vm_state): """ Wait for vm state is ready. """ utils_misc.wait_for(lambda: vm.state() == vm_state, 10) def check_guest_flags(bash_cmd, flags): """ Check bypass_cache option for single guest. """ # Drop caches. drop_caches() # form proper parallel command based on if systemd is used or not is_systemd = process.run("cat /proc/1/comm", shell=True).stdout_text.count("systemd") if is_systemd: virsh_cmd_stop = "systemctl stop libvirt-guests" virsh_cmd_start = "systemctl start libvirt-guests" else: virsh_cmd_stop = "service libvirt-guests stop" virsh_cmd_start = "service libvirt-guests start" ret = check_flags_parallel( virsh_cmd_stop, bash_cmd % (managed_save_file, managed_save_file, "1"), flags) if is_systemd: ret = libvirt_guests.raw_status() logging.info("status output: %s", ret.stdout_text) if all([ "Suspending %s" % vm_name not in ret.stdout_text, "stopped, with saved guests" not in ret.stdout_text ]): test.fail("Can't see messages of suspending vm") # status command should return 3. if not is_systemd: ret = libvirt_guests.raw_status() if ret.exit_status != 3: test.fail("The exit code %s for libvirt-guests" " status is not correct" % ret) # Wait for VM in shut off state wait_for_state("shut off") check_flags_parallel( virsh_cmd_start, bash_cmd % (managed_save_file, managed_save_file, "0"), flags) # Wait for VM in running state wait_for_state("running") def vm_msave_remove_check(vm_name): """ Check managed save remove command. """ if not os.path.exists(managed_save_file): test.fail("Can't find managed save image") virsh.managedsave_remove(vm_name, debug=True) if os.path.exists(managed_save_file): test.fail("Managed save image still exists") virsh.start(vm_name, debug=True) # The domain state should be running if vm.state() != "running": test.fail("Guest state should be" " running after started") def vm_managedsave_loop(vm_name, loop_range, libvirtd): """ Run a loop of managedsave command and check its result. """ if vm.is_dead(): virsh.start(vm_name, debug=True) for i in range(int(loop_range)): logging.debug("Test loop: %s" % i) virsh.managedsave(vm_name, debug=True) virsh.start(vm_name, debug=True) # Check libvirtd status. if not libvirtd.is_running(): test.fail("libvirtd is stopped after cmd") # Check vm status. if vm.state() != "running": test.fail("Guest isn't in running state") def build_vm_xml(vm_name, **dargs): """ Build the new domain xml and define it. """ try: # stop vm before doing any change to xml if vm.is_alive(): vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if dargs.get("cpu_mode"): if "cpu" in vmxml: del vmxml.cpu cpuxml = vm_xml.VMCPUXML() cpuxml.mode = params.get("cpu_mode", "host-model") cpuxml.match = params.get("cpu_match", "exact") cpuxml.fallback = params.get("cpu_fallback", "forbid") cpu_topology = {} cpu_topology_sockets = params.get("cpu_topology_sockets") if cpu_topology_sockets: cpu_topology["sockets"] = cpu_topology_sockets cpu_topology_cores = params.get("cpu_topology_cores") if cpu_topology_cores: cpu_topology["cores"] = cpu_topology_cores cpu_topology_threads = params.get("cpu_topology_threads") if cpu_topology_threads: cpu_topology["threads"] = cpu_topology_threads if cpu_topology: cpuxml.topology = cpu_topology vmxml.cpu = cpuxml vmxml.vcpu = int(params.get("vcpu_nums")) if dargs.get("sec_driver"): seclabel_dict = { "type": "dynamic", "model": "selinux", "relabel": "yes" } vmxml.set_seclabel([seclabel_dict]) vmxml.sync() vm.start() except Exception as e: logging.error(str(e)) test.cancel("Build domain xml failed") status_error = ("yes" == params.get("status_error", "no")) vm_ref = params.get("managedsave_vm_ref", "name") libvirtd_state = params.get("libvirtd", "on") extra_param = params.get("managedsave_extra_param", "") progress = ("yes" == params.get("managedsave_progress", "no")) cpu_mode = "yes" == params.get("managedsave_cpumode", "no") test_undefine = "yes" == params.get("managedsave_undefine", "no") test_bypass_cache = "yes" == params.get("test_bypass_cache", "no") autostart_bypass_cache = params.get("autostart_bypass_cache", "") multi_guests = params.get("multi_guests", "") test_libvirt_guests = params.get("test_libvirt_guests", "") check_flags = "yes" == params.get("check_flags", "no") security_driver = params.get("security_driver", "") remove_after_cmd = "yes" == params.get("remove_after_cmd", "no") option = params.get("managedsave_option", "") check_shutdown = "yes" == params.get("shutdown_after_cmd", "no") pre_vm_state = params.get("pre_vm_state", "") move_saved_file = "yes" == params.get("move_saved_file", "no") test_loop_cmd = "yes" == params.get("test_loop_cmd", "no") if option: if not virsh.has_command_help_match('managedsave', option): # Older libvirt does not have this option test.cancel("Older libvirt does not" " handle arguments consistently") # Backup xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Get the libvirtd service libvirtd = utils_libvirtd.Libvirtd() # Get config files. qemu_config = utils_config.LibvirtQemuConfig() libvirt_guests_config = utils_config.LibvirtGuestsConfig() # Get libvirt-guests service libvirt_guests = Factory.create_service("libvirt-guests") try: # Destroy vm first for setting configuration file if vm.state() == "running": vm.destroy(gracefully=False) # Prepare test environment. if libvirtd_state == "off": libvirtd.stop() if autostart_bypass_cache: ret = virsh.autostart(vm_name, "", ignore_status=True, debug=True) libvirt.check_exit_status(ret) qemu_config.auto_start_bypass_cache = autostart_bypass_cache libvirtd.restart() if security_driver: qemu_config.security_driver = [security_driver] if test_libvirt_guests: if multi_guests: start_delay = params.get("start_delay", "20") libvirt_guests_config.START_DELAY = start_delay if check_flags: libvirt_guests_config.BYPASS_CACHE = "1" # The config file format should be "x=y" instead of "x = y" process.run( "sed -i -e 's/ = /=/g' " "/etc/sysconfig/libvirt-guests", shell=True) libvirt_guests.restart() # Change domain xml. if cpu_mode: build_vm_xml(vm_name, cpu_mode=True) if security_driver: build_vm_xml(vm_name, sec_driver=True) # Turn VM into certain state. if pre_vm_state == "transient": logging.info("Creating %s..." % vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Wait for VM to be in shut off state utils_misc.wait_for(lambda: vm.state() == "shut off", 10) vm.undefine() if virsh.create(vmxml_for_test.xml, ignore_status=True, debug=True).exit_status: vmxml_backup.define() test.cancel("Cann't create the domain") # Wait for vm in stable state if params.get("start_vm") == "yes": if vm.state() == "shut off": vm.start() vm.wait_for_login() # run test case domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.count("invalid"): vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name # Ignore exception with "ignore_status=True" if progress: option += " --verbose" option += extra_param # For bypass_cache test. Run a shell command to check fd flags while # excuting managedsave command software_mgr = software_manager.SoftwareManager() if not software_mgr.check_installed('lsof'): logging.info('Installing lsof package:') software_mgr.install('lsof') bash_cmd = ( "let i=1; while((i++<400)); do if [ -e %s ]; then (cat /proc" "/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/%s |" "grep 'flags:.*') && break; else sleep 0.05; fi; done;") # Flags to check bypass cache take effect flags = os.O_DIRECT if test_bypass_cache: # Drop caches. drop_caches() virsh_cmd = "virsh managedsave %s %s" % (option, vm_name) check_flags_parallel( virsh_cmd, bash_cmd % (managed_save_file, managed_save_file, "1"), flags) # Wait for VM in shut off state wait_for_state("shut off") virsh_cmd = "virsh start %s %s" % (option, vm_name) check_flags_parallel( virsh_cmd, bash_cmd % (managed_save_file, managed_save_file, "0"), flags) # Wait for VM in running state wait_for_state("running") elif test_libvirt_guests: logging.debug("libvirt-guests status: %s", libvirt_guests.status()) if multi_guests: check_multi_guests(multi_guests, start_delay, libvirt_guests) if check_flags: check_guest_flags(bash_cmd, flags) else: # Ensure VM is running utils_misc.wait_for(lambda: vm.state() == "running", 10) ret = virsh.managedsave(vm_ref, options=option, ignore_status=True, debug=True) status = ret.exit_status # The progress information outputed in error message error_msg = ret.stderr.strip() if move_saved_file: cmd = "echo > %s" % managed_save_file process.run(cmd, shell=True) # recover libvirtd service start if libvirtd_state == "off": libvirtd.start() if status_error: if not status: if libvirtd_state == "off" and libvirt_version.version_compare( 5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed") else: test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") if progress: if not error_msg.count("Managedsave:"): test.fail("Got invalid progress output") if remove_after_cmd: vm_msave_remove_check(vm_name) elif test_undefine: vm_undefine_check(vm_name) elif autostart_bypass_cache: # rhbz#1755303 if libvirt_version.version_compare(5, 6, 0): os.remove("/run/libvirt/qemu/autostarted") libvirtd.stop() virsh_cmd = ("(service libvirtd start)") check_flags_parallel( virsh_cmd, bash_cmd % (managed_save_file, managed_save_file, "0"), flags) elif test_loop_cmd: loop_range = params.get("loop_range", "20") vm_managedsave_loop(vm_name, loop_range, libvirtd) else: vm_recover_check(option, libvirtd, check_shutdown) finally: # Restore test environment. # Restart libvirtd.service qemu_config.restore() libvirt_guests_config.restore() libvirtd.restart() if autostart_bypass_cache: virsh.autostart(vm_name, "--disable", ignore_status=True, debug=True) vm.destroy(gracefully=False) virsh.managedsave_remove(vm_name, debug=True) vmxml_backup.sync() if multi_guests: for i in range(int(multi_guests)): virsh.remove_domain("%s_%s" % (vm_name, i), "--remove-all-storage", debug=True)
def is_old_libvirt(): regex = r'\s+\[--size\]\s+' return bool(not virsh.has_command_help_match('setmaxmem', regex))
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_allocation = params.get("vol_allocation") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") b_luks_encrypted = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] wipe_old_vol = False if virsh.has_command_help_match("vol-clone", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: test.cancel("Option --prealloc-metadata " "is not supported.") clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.cancel("API acl test not supported in current" " libvirt version.") # Using algorithms other than zero need scrub installed. try: utils_path.find_command('scrub') except utils_path.CmdNotFoundError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = ["zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random"] # Choose an algorithm randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) libvirt_pvt = utlv.PoolVolumeTest(test, params) libvirt_pool = libvirt_storage.StoragePool() if libvirt_pool.pool_exists(pool_name): test.error("Pool '%s' already exist" % pool_name) try: # Create a new pool disk_vol = [] if pool_type == 'disk': disk_vol.append(params.get("pre_vol", '10M')) libvirt_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol) libvirt_vol = libvirt_storage.PoolVolume(pool_name) # Create a new volume if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']: if (b_luks_encrypted and vol_format in ['raw']): if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS is not supported in current" " libvirt version") luks_sec_uuid = create_luks_secret(os.path.join(pool_target, vol_name), encryption_password, test) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(vol_capability) vol_arg['allocation'] = int(vol_allocation) create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: libvirt_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name) elif vol_format == 'partition': vol_name = list(utlv.get_vol_list(pool_name).keys())[0] logging.debug("Find partition %s in disk pool", vol_name) elif vol_format == 'sparse': # Create a sparse file in pool sparse_file = pool_target + '/' + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability process.run(cmd, ignore_status=True, shell=True) else: test.error("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name, debug=True) vol_info = libvirt_vol.volume_info(vol_name) if not vol_info: test.error("Fail to get info of volume %s" % vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count("prealloc-metadata"): clone_status_error = True if b_luks_encrypted: wipe_old_vol = True if pool_type == "disk": new_vol_name = utlv.new_disk_vol_name(pool_name) if new_vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name utlv.update_polkit_rule(params, vol_pat, new_value) bad_cloned_vol_name = params.get("bad_cloned_vol_name", "") if bad_cloned_vol_name: new_vol_name = bad_cloned_vol_name # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: test.fail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libvirt_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = ["Unsupported algorithm", "no such pattern sequence"] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: test.fail("Expect clone volume fail, but run" " successfully.") if wipe_old_vol: # Wipe the old volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = ["Unsupported algorithm", "no such pattern sequence"] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") if bad_cloned_vol_name: pattern = "volume name '%s' cannot contain '/'" % new_vol_name if re.search(pattern, clone_result.stderr) is None: test.fail("vol-clone failed with unexpected reason") finally: # Clean up try: libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) except exceptions.TestFail as detail: logging.error(str(detail))
def run_virsh_update_device(test, params, env): """ Test command: virsh update-device. Update device from an XML <file>. 1.Prepare test environment.Make sure a cdrom exists in VM. If not, please attach one cdrom manually. 2.Perform virsh update-device operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) if vm.is_alive() and params.get("start_vm") == "no": vm.destroy() def create_attach_xml(update_xmlfile, source_iso): """ Create a xml file to update a device. @param: update_xmlfile : Temp xml saves device infomation. @param: source_iso : disk's source file. """ try: f = open(source_iso, 'wb') f.seek((1024 * 1024) - 1) f.write(str(0)) f.close() except IOError: raise error.TestFail("Create source_iso failed!") content = """ <disk device='cdrom' type='file'> <driver name='file'/> <source file='%s'/> <target bus='ide' dev='hdc'/> <readonly/> </disk> """ % source_iso xmlfile = open(update_xmlfile, 'w') xmlfile.write(content) xmlfile.close() def check_attach(source_file, output): """ Check attached device and disk exist or not. @param: source_file : disk's source file. @param: output :VM's xml infomation . """ dom = parseString(output) source = dom.getElementsByTagName("source") output2 = "" for n in source: output2 += n.getAttribute("file") target = dom.getElementsByTagName("target") output3 = "" for n in target: output3 += n.getAttribute("dev") dom.unlink source_iso = "%s" % source_file if not re.search(source_iso, output2): raise error.TestFail("didn't see 'attached disk") if not re.search('hdc', output3): raise error.TestFail("didn't see 'attached device") domid = vm.get_id() domuuid = vm.get_uuid() # Prepare tmp directory and files. tmp_iso = os.path.join(test.virtdir, "tmp.iso") tmp2_iso = os.path.join(test.virtdir, "tmp2.iso") update_xmlfile = os.path.join(test.tmpdir, "xml_file") # Get all parameters for configuration. flag = params.get("updatedevice_flag", "") twice = "yes" == params.get("updatedevice_twice", "no") diff_iso = params.get("updatedevice_diff_iso", "no") vm_ref = params.get("updatedevice_vm_ref", "") status_error = params.get("status_error", "no") extra = params.get("updatedevice_extra", "") create_attach_xml(update_xmlfile, tmp_iso) vm_xml = os.path.join(test.tmpdir, "vm_xml") virsh.dumpxml(vm_name, extra="", to_file=vm_xml) vmxml_before = libvirt_xml.VMXML.new_from_dumpxml(vm_name) if vm_ref == "id": vm_ref = domid if twice: virsh.update_device(domainarg=domid, filearg=update_xmlfile, ignore_status=True) if diff_iso == "yes": os.remove(update_xmlfile) create_attach_xml(update_xmlfile, tmp2_iso) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("updatedevice_invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, extra) status = virsh.update_device(domainarg=vm_ref, filearg=update_xmlfile, flagstr=flag, ignore_status=True, debug=True).exit_status output = "%s" % libvirt_xml.VMXML.new_from_dumpxml(vm_name) if params.has_key("updatedevice_diff_file"): vm_xml_after = os.path.join(test.tmpdir, "vm_xml_after") virsh.dumpxml(vm_name, extra="", to_file=vm_xml_after) vm.destroy() output_shut = "%s" % libvirt_xml.VMXML.new_from_dumpxml(vm_name) # Recover environment vm.undefine() vmxml_before.define() if os.path.exists(tmp_iso): os.remove(tmp_iso) if os.path.exists(tmp2_iso): os.remove(tmp2_iso) # Check status_error flag_list = flag.split("--") for item in flag_list: option = item.strip() if option == "": continue if virsh.has_command_help_match("update-device", option) == None: status_error = "yes" break if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") else: if flag == "--persistent" or flag == "--config": if not re.search(tmp_iso, output_shut): raise error.TestFail("virsh update-device function invalid" "didn't see 'attached device' in XML") else: if params.has_key("updatedevice_diff_file"): context_before = file(vm_xml, 'r').read().splitlines() context_after = file(vm_xml_after, 'r').read().splitlines() output_diff = difflib.Differ().compare(context_before, context_after) if not re.search(tmp_iso, "\n".join(list(output_diff))): raise error.TestFail("virsh update-device function " "invalid; can't see 'attached device'in before/after") else: if re.search(tmp_iso, output_shut): raise error.TestFail("virsh attach-device without " "--persistent/--config function invalid;can see " "'attached device'in XML") if diff_iso == "yes": check_attach(tmp2_iso, output) if vm_ref == "name": check_attach(tmp_iso, output)
def run(test, params, env): """ Test command: virsh setvcpus. The command can change the number of virtual CPUs in the guest domain. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh setvcpus operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("setvcpus_pre_vm_state") command = params.get("setvcpus_command", "setvcpus") options = params.get("setvcpus_options") vm_ref = params.get("setvcpus_vm_ref", "name") status_error = (params.get("status_error", "no") == "yes") convert_err = "Can't convert {0} to integer type" try: current_vcpu = int(params.get("setvcpus_current", "1")) except ValueError: test.error(convert_err.format(current_vcpu)) try: max_vcpu = int(params.get("setvcpus_max", "4")) except ValueError: test.error(convert_err.format(max_vcpu)) try: count = params.get("setvcpus_count", "") if count: count = eval(count) count = int(count) except ValueError: # 'count' may not invalid number in negative tests logging.debug(convert_err.format(count)) extra_param = params.get("setvcpus_extra_param") count_option = "%s %s" % (count, extra_param) remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") remote_user = params.get("remote_user", "root") remote_uri = params.get("remote_uri") tmpxml = os.path.join(data_dir.get_tmp_dir(), 'tmp.xml') topology_correction = "yes" == params.get("topology_correction", "yes") result = True # Early death 1.1 if remote_uri: if remote_ip.count("EXAMPLE.COM"): test.cancel("remote ip parameters not set.") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) # Early death 1.2 option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) is None: test.cancel("The current libvirt version" " doesn't support '%s' option" % item) # Init expect vcpu count values exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu, 'cur_config': current_vcpu, 'cur_live': current_vcpu, 'guest_live': current_vcpu} def set_expected(vm, options): """ Set the expected vcpu numbers :param vm: vm object :param options: setvcpus options """ if ("config" in options) or ("current" in options and vm.is_dead()): if "maximum" in options: exp_vcpu["max_config"] = count else: exp_vcpu['cur_config'] = count if ("live" in options) or ("current" in options and vm.is_alive()): exp_vcpu['cur_live'] = count exp_vcpu['guest_live'] = count if options == '': # when none given it defaults to live exp_vcpu['cur_live'] = count exp_vcpu['guest_live'] = count # Save original configuration vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() # Normal processing of the test is to set the maximum vcpu count to 4, # and set the current vcpu count to 1, then adjust the 'count' value to # plug or unplug vcpus. # # This is generally fine when the guest is not running; however, the # hotswap functionality hasn't always worked very well and is under # going lots of change from using the hmp "cpu_set" command in 1.5 # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command # seems to have been deprecated making things very messy. # # To further muddy the waters, the "cpu-add" functionality is supported # for specific machine type versions. For the purposes of this test that # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which # version of qemu/kvm was used to initially create/generate the XML for # the machine this could result in a newer qemu still using 1.4 or earlier # for the machine type. # try: # Set maximum vcpus, so we can run all kinds of normal tests without # encounter requested vcpus greater than max allowable vcpus error topology = vmxml.get_cpu_topology() if topology and ("config" and "maximum" in options) and not status_error: # https://bugzilla.redhat.com/show_bug.cgi?id=1426220 vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) del vmxml.cpu vmxml.sync() vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu, topology_correction=topology_correction) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("Pre-test xml is %s", vmxml.xmltreefile) # Get the number of cpus, current value if set, and machine type cpu_xml_data = utils_hotplug.get_cpu_xmldata(vm, options) logging.debug("Before run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", cpu_xml_data['vcpu'], cpu_xml_data['current_vcpu'], cpu_xml_data['mtype']) # Restart, unless that's not our test if not vm.is_alive(): vm.start() vm.wait_for_login() if cpu_xml_data['vcpu'] == 1 and count == 1: logging.debug("Original vCPU count is 1, just checking if setvcpus " "can still set current.") domid = vm.get_id() # only valid for running domuuid = vm.get_uuid() if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shut off" and vm.is_alive(): vm.destroy() # Run test if vm_ref == "name": dom_option = vm_name elif vm_ref == "id": dom_option = domid if params.get("setvcpus_hex_id") is not None: dom_option = hex(int(domid)) elif params.get("setvcpus_invalid_id") is not None: dom_option = params.get("setvcpus_invalid_id") elif vm_ref == "uuid": dom_option = domuuid if params.get("setvcpus_invalid_uuid") is not None: dom_option = params.get("setvcpus_invalid_uuid") else: dom_option = vm_ref if remote_uri: status = virsh.setvcpus(dom_option, "1", "--config", ignore_status=True, debug=True, uri=remote_uri) else: status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True, debug=True) if not status_error: set_expected(vm, options) result = utils_hotplug.check_vcpu_value(vm, exp_vcpu, option=options) setvcpu_exit_status = status.exit_status setvcpu_exit_stderr = status.stderr.strip() finally: cpu_xml_data = utils_hotplug.get_cpu_xmldata(vm, options) logging.debug("After run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", cpu_xml_data['vcpu'], cpu_xml_data['current_vcpu'], cpu_xml_data['mtype']) # Cleanup if pre_vm_state == "paused": virsh.resume(vm_name, ignore_status=True) orig_config_xml.sync() if os.path.exists(tmpxml): os.remove(tmpxml) # check status_error if status_error: if setvcpu_exit_status == 0: test.fail("Run successfully with wrong command!") else: if setvcpu_exit_status != 0: # setvcpu/hotplug is only available as of qemu 1.5 and it's still # evolving. In general the addition of vcpu's may use the QMP # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands. # The removal of vcpu's may work in qemu 1.5 due to how cpu_set # can set vcpus online or offline; however, there doesn't appear # to be a complementary cpu-del feature yet, so we can add, but # not delete in 1.6. # A 1.6 qemu will not allow the cpu-add command to be run on # a configuration using <os> machine property 1.4 or earlier. # That is the XML <os> element with the <type> property having # an attribute 'machine' which is a tuple of 3 elements separated # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5". if re.search("unable to execute QEMU command 'cpu-add'", setvcpu_exit_stderr): test.cancel("guest <os> machine property '%s' " "may be too old to allow hotplug." % cpu_xml_data['mtype']) # A qemu older than 1.5 or an unplug for 1.6 will result in # the following failure. In general, any time libvirt determines # it cannot support adding or removing a vCPU... if re.search("cannot change vcpu count of this domain", setvcpu_exit_stderr): test.cancel("virsh setvcpu hotplug unsupported, " " mtype=%s" % cpu_xml_data['mtype']) # Otherwise, it seems we have a real error test.fail("Run failed with right command mtype=%s" " stderr=%s" % (cpu_xml_data['mtype'], setvcpu_exit_stderr)) else: if not result: test.fail("Test Failed")
def is_old_libvirt(): regex = r"\s+\[--size\]\s+" return bool(not virsh.has_command_help_match("setmem", regex))
(dump_image_format, actual_format)) return False else: return True # Configure dump_image_format in /etc/libvirt/qemu.conf. qemu_config = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() if len(dump_image_format): qemu_config.dump_image_format = dump_image_format libvirtd.restart() # Deal with memory-only dump format if len(memory_dump_format): # Make sure libvirt support this option if virsh.has_command_help_match("dump", "--format") is None: test.cancel("Current libvirt version doesn't support" " --format option for dump command") # Make sure QEMU support this format query_cmd = '{"execute":"query-dump-guest-memory-capability"}' qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout if (memory_dump_format not in qemu_capa) and not status_error: test.cancel("Unsupported dump format '%s' for" " this QEMU binary" % memory_dump_format) options += " --format %s" % memory_dump_format if memory_dump_format == 'elf': dump_image_format = 'elf' if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']: dump_image_format = 'data' # Back up xml file
def run_virsh_setvcpus(test, params, env): """ Test command: virsh setvcpus. The conmand can change the number of virtual CPUs in the guest domain. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh setvcpus operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) xml_file = params.get("setvcpus_xml_file", "vm.xml") virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file) tmp_file = params.get("setvcpus_tmp_file", "tmp.xml") pre_vm_state = params.get("setvcpus_pre_vm_state") command = params.get("setvcpus_command", "setvcpus") options = params.get("setvcpus_options") domain = params.get("setvcpus_domain") count = params.get("setvcpus_count") extra_param = params.get("setvcpus_extra_param") count_option = "%s %s" % (count, extra_param) status_error = params.get("status_error") def get_current_vcpus(): """ Get current vcpu number. """ vcpus_set = "" virsh.dumpxml(vm_name, extra="", to_file=tmp_file) dom = parse(tmp_file) root = dom.documentElement vcpus_2 = root.getElementsByTagName("vcpu") for n in vcpus_2: vcpus_set += n.getAttribute("current") vcpus_set = int(vcpus_set) dom.unlink() return vcpus_set if vm.is_alive(): vm.destroy() vm_xml = libvirt_xml.VMXML() vm_xml.set_vm_vcpus(vm_name, 2) vm.start() vm.wait_for_login() if status_error == "no": vcpus_new = len(vm.vcpuinfo()) domid = vm.get_id() domuuid = vm.get_uuid() if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shut off": vm.destroy() if domain == "remote_name": remote_ssh_addr = params.get("remote_ip", None) remote_addr = params.get("local_ip", None) remote_password = params.get("remote_password", None) host_type = virsh.driver() if host_type == "qemu": remote_string = "qemu+ssh://%s/system" % remote_addr elif host_type == "xen": remote_string = "xen+ssh://%s" % remote_addr command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name) if virsh.has_command_help_match(command, "--live") is None: status_error = "yes" session = remote.remote_login( "ssh", remote_ssh_addr, "22", "root", remote_password, "#") session.cmd_output('LANG=C') status, output = session.cmd_status_output(command, internal_timeout=5) session.close() vcpus_current = len(vm.vcpuinfo()) else: if domain == "name": dom_option = vm_name elif domain == "id": dom_option = domid if params.get("setvcpus_hex_id") is not None: dom_option = hex(int(domid)) elif params.get("setvcpus_invalid_id") is not None: dom_option = params.get("setvcpus_invalid_id") elif domain == "uuid": dom_option = domuuid if params.get("setvcpus_invalid_uuid") is not None: dom_option = params.get("setvcpus_invalid_uuid") else: dom_option = domain option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) is None: status_error = "yes" break status = virsh.setvcpus( dom_option, count_option, options, ignore_status=True).exit_status if pre_vm_state == "paused": virsh.resume(vm_name, ignore_status=True) if status_error == "no": if status == 0: if pre_vm_state == "shut off": if options == "--config": vcpus_set = len(vm.vcpuinfo()) elif options == "--current": vcpus_set = get_current_vcpus() elif options == "--maximum --config": vcpus_set = "" dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name) vcpus_set = dom.getElementsByTagName( "vcpu")[0].firstChild.data vcpus_set = int(vcpus_set) dom.unlink() else: vcpus_set = len(vm.vcpuinfo()) if domain == "id": cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name output1 = commands.getoutput(cmd_chk) logging.info("guest-info:\n%s" % output1) virsh.destroy(vm_name) virsh.undefine(vm_name) virsh.define(xml_file) if os.path.exists(xml_file): os.remove(xml_file) if os.path.exists(tmp_file): os.remove(tmp_file) # check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") else: if status != 0: raise error.TestFail("Run failed with right command") else: if options == "--maximum --config": if vcpus_set != 4: raise error.TestFail("Run failed with right command1") elif domain == "id": if options == "--config": if vcpus_set != vcpus_new or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1): raise error.TestFail("Run failed with right command2") elif options == "--config --live": if vcpus_set != 1 or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1): raise error.TestFail("Run failed with right command3") else: if vcpus_set != 1 or re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1): raise error.TestFail("Run failed with right command4") else: if vcpus_set != 1: raise error.TestFail("Run failed with right command5")
def run(test, params, env): """ Test the command virsh memtune (1) To get the current memtune parameters (2) Change the parameter values (3) Check the memtune query updated with the values (4) Check whether the mounted cgroup path gets the updated value (5) Login to guest and use the memory greater that the assigned value and check whether it kills the vm. (6) TODO:Check more values and robust scenarios. """ def check_limit(path, expected_value, limit_name): """ Matches the expected and actual output (1) Match the output of the virsh memtune (2) Match the output of the respective cgroup fs value :params: path: memory controller path for a domain :params: expected_value: the expected limit value :params: limit_name: the limit to be checked hard_limit/soft_limit/swap_hard_limit :return: True or False based on the checks """ status_value = True # Check 1 actual_value = virsh.memtune_get(domname, limit_name) if actual_value == -1: raise error.TestFail("the key %s not found in the " "virsh memtune output" % limit_name) if actual_value != int(expected_value): status_value = False logging.error("%s virsh output:\n\tExpected value:%d" "\n\tActual value: " "%d", limit_name, int(expected_value), int(actual_value)) # Check 2 if limit_name == 'hard_limit': cg_file_name = '%s/memory.limit_in_bytes' % path elif limit_name == 'soft_limit': cg_file_name = '%s/memory.soft_limit_in_bytes' % path elif limit_name == 'swap_hard_limit': cg_file_name = '%s/memory.memsw.limit_in_bytes' % path cg_file = None try: try: cg_file = open(cg_file_name) output = cg_file.read() value = int(output) / 1024 if int(expected_value) != int(value): status_value = False logging.error("%s cgroup fs:\n\tExpected Value: %d" "\n\tActual Value: " "%d", limit_name, int(expected_value), int(value)) except IOError: status_value = False logging.error("Error while reading:\n%s", cg_file_name) finally: if cg_file is not None: cg_file.close() return status_value # Get the vm name, pid of vm and check for alive domname = params.get("main_vm") vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() logging.info("Verify valid cgroup path for VM pid: %s", pid) # Resolve the memory cgroup path for a domain path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory") # Set the initial memory starting value for test case # By default set 1GB less than the total memory # In case of total memory is less than 1GB set to 256MB # visit subtests.cfg to change these default values Memtotal = utils_memory.read_from_meminfo('MemTotal') base_mem = params.get("memtune_base_mem") if int(Memtotal) < int(base_mem): Mem = int(params.get("memtune_min_mem")) else: Mem = int(Memtotal) - int(base_mem) # Initialize error counter error_counter = 0 # Check for memtune command is available in the libvirt version under test if not virsh.has_help_command("memtune"): raise error.TestNAError( "Memtune not available in this libvirt version") # Run test case with 100kB increase in memory value for each iteration while (Mem < Memtotal): if virsh.has_command_help_match("memtune", "hard-limit"): hard_mem = Mem - int(params.get("memtune_hard_base_mem")) options = " --hard-limit %d --live" % hard_mem virsh.memtune_set(domname, options) if not check_limit(path, hard_mem, "hard_limit"): error_counter += 1 else: raise error.TestNAError("harlimit option not available in memtune " "cmd in this libvirt version") if virsh.has_command_help_match("memtune", "soft-limit"): soft_mem = Mem - int(params.get("memtune_soft_base_mem")) options = " --soft-limit %d --live" % soft_mem virsh.memtune_set(domname, options) if not check_limit(path, soft_mem, "soft_limit"): error_counter += 1 else: raise error.TestNAError("softlimit option not available in memtune " "cmd in this libvirt version") if virsh.has_command_help_match("memtune", "swap-hard-limit"): swaphard = Mem options = " --swap-hard-limit %d --live" % swaphard virsh.memtune_set(domname, options) if not check_limit(path, swaphard, "swap_hard_limit"): error_counter += 1 else: raise error.TestNAError("swaplimit option not available in memtune " "cmd in this libvirt version") Mem += int(params.get("memtune_hard_base_mem")) # Raise error based on error_counter if error_counter > 0: raise error.TestFail( "Test failed, consult the previous error messages")
def run(test, params, env): """ Test command: virsh update-device. Update device from an XML <file>. 1.Prepare test environment, adding a cdrom/floppy to VM. 2.Perform virsh update-device operation. 3.Recover test environment. 4.Confirm the test result. """ # Before doing anything - let's be sure we can support this test # Parse flag list, skip testing early if flag is not supported # NOTE: "".split("--") returns [''] which messes up later empty test flag = params.get("updatedevice_flag", "") flag_list = [] if flag.count("--"): flag_list = flag.split("--") for item in flag_list: option = item.strip() if option == "": continue if not bool(virsh.has_command_help_match("update-device", option)): raise error.TestNAError("virsh update-device doesn't support --%s" % option) # As per RH BZ 961443 avoid testing before behavior changes if 'config' in flag_list: # SKIP tests using --config if libvirt is 0.9.10 or earlier if not libvirt_version.version_compare(0, 9, 10): raise error.TestNAError("BZ 961443: --config behavior change " "in version 0.9.10") if 'persistent' in flag_list: # SKIP tests using --persistent if libvirt 1.0.5 or earlier if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("BZ 961443: --persistent behavior change " "in version 1.0.5") # Prepare initial vm state vm_name = params.get("main_vm") vmxml = VMXML.new_from_dumpxml(vm_name, options="--inactive") vm = env.get_vm(vm_name) start_vm = "yes" == params.get("start_vm", "no") # Get the target bus/dev disk_type = params.get("disk_type", "cdrom") target_bus = params.get("updatedevice_target_bus", "ide") target_dev = params.get("updatedevice_target_dev", "hdc") disk_mode = params.get("disk_mode", "") support_mode = ['readonly', 'shareable'] if not disk_mode and disk_mode not in support_mode: raise error.TestError("%s not in support mode %s" % (disk_mode, support_mode)) # Prepare tmp directory and files. orig_iso = os.path.join(test.virtdir, "orig.iso") test_iso = os.path.join(test.virtdir, "test.iso") test_diff_iso = os.path.join(test.virtdir, "test_diff.iso") update_xmlfile = os.path.join(test.tmpdir, "update.xml") create_attach_xml(update_xmlfile, test_iso, disk_type, target_bus, target_dev, disk_mode) # This test needs a cdrom/floppy attached first - attach a cdrom/floppy # to a shutdown vm. Then decide to restart or not if vm.is_alive(): vm.destroy(gracefully=False) # Vm should be in 'shut off' status utils_misc.wait_for(lambda: vm.state() == "shut off", 30) create_disk(vm_name, orig_iso, disk_type, target_dev, disk_mode) if start_vm: vm.start() vm.wait_for_login().close() domid = vm.get_id() else: domid = "domid invalid; domain is shut-off" # Get remaining parameters for configuration. twice = "yes" == params.get("updatedevice_twice", "no") diff_iso = "yes" == params.get("updatedevice_diff_iso", "no") vm_ref = params.get("updatedevice_vm_ref", "") status_error = "yes" == params.get("status_error", "no") extra = params.get("updatedevice_extra", "") # OK let's give this a whirl... errmsg = "" try: if vm_ref == "id": vm_ref = domid if twice: # Don't pass in any flags ret = virsh.update_device(domainarg=domid, filearg=update_xmlfile, ignore_status=True, debug=True) if not status_error: status = ret.exit_status errmsg += ret.stderr libvirt.check_exit_status(ret) if diff_iso: # Swap filename of device backing file in update.xml os.remove(update_xmlfile) create_attach_xml(update_xmlfile, test_diff_iso, disk_type, target_bus, target_dev, disk_mode) elif vm_ref == "uuid": vm_ref = vmxml.uuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("updatedevice_invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, extra) cmdresult = virsh.update_device(domainarg=vm_ref, filearg=update_xmlfile, flagstr=flag, ignore_status=True, debug=True) status = cmdresult.exit_status if not status_error: errmsg += cmdresult.stderr active_vmxml = VMXML.new_from_dumpxml(vm_name) inactive_vmxml = VMXML.new_from_dumpxml(vm_name, options="--inactive") finally: vm.destroy(gracefully=False, free_mac_addresses=False) vmxml.undefine() vmxml.restore() vmxml.define() if os.path.exists(orig_iso): os.remove(orig_iso) if os.path.exists(test_iso): os.remove(test_iso) if os.path.exists(test_diff_iso): os.remove(test_diff_iso) # Result handling logic set errmsg only on error if status_error: if status == 0: errmsg += "\nRun successfully with wrong command!\n" else: # Normal test if status != 0: errmsg += "\nRun failed with right command\n" if diff_iso: # Expect the backing file to have updated active_attached = is_attached(active_vmxml.devices, disk_type, test_diff_iso, target_dev) inactive_attached = is_attached(inactive_vmxml.devices, disk_type, test_diff_iso, target_dev) else: # Expect backing file to remain the same active_attached = is_attached(active_vmxml.devices, disk_type, test_iso, target_dev) inactive_attached = is_attached(inactive_vmxml.devices, disk_type, test_iso, target_dev) # Check behavior of combination before individual! if "config" in flag_list and "live" in flag_list: if not active_attached: errmsg += ("Active domain XML not updated when " "--config --live options used\n") if not inactive_attached: errmsg += ("Inactive domain XML not updated when " "--config --live options used\n") elif "live" in flag_list and inactive_attached: errmsg += ("Inactive domain XML updated when " "--live option used\n") elif "config" in flag_list and active_attached: errmsg += ("Active domain XML updated when " "--config option used\n") # persistent option behavior depends on start_vm if "persistent" in flag_list: if start_vm: if not active_attached or not inactive_attached: errmsg += ("XML not updated when --persistent " "option used on active domain\n") else: if not inactive_attached: errmsg += ("XML not updated when --persistent " "option used on inactive domain\n") if len(flag_list) == 0: # Not specifying any flag is the same as specifying --current if start_vm: if not active_attached: errmsg += "Active domain XML not updated\n" elif inactive_attached: errmsg += ("Inactive domain XML updated when active " "requested\n") # Log some debugging info before destroying instances if errmsg and not status_error: logging.debug("Active XML:") logging.debug(str(active_vmxml)) logging.debug("Inactive XML:") logging.debug(str(inactive_vmxml)) logging.debug("active_attached: %s", str(active_attached)) logging.debug("inctive_attached: %s", str(inactive_attached)) logging.debug("Device XML:") logging.debug(open(update_xmlfile, "r").read()) # clean up tmp files del vmxml del active_vmxml del inactive_vmxml os.unlink(update_xmlfile) if errmsg: raise error.TestFail(errmsg)
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_allocation = params.get("vol_allocation") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") b_luks_encrypted = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] wipe_old_vol = False with_clusterSize = "yes" == params.get("with_clusterSize") vol_clusterSize = params.get("vol_clusterSize", "64") vol_clusterSize_unit = params.get("vol_clusterSize_unit") libvirt_version.is_libvirt_feature_supported(params) if virsh.has_command_help_match("vol-clone", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: test.cancel("Option --prealloc-metadata " "is not supported.") clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.cancel("API acl test not supported in current" " libvirt version.") # Using algorithms other than zero need scrub installed. try: utils_path.find_command('scrub') except utils_path.CmdNotFoundError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = [ "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random" ] # Choose an algorithm randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) libvirt_pvt = utlv.PoolVolumeTest(test, params) libvirt_pool = libvirt_storage.StoragePool() if libvirt_pool.pool_exists(pool_name): test.error("Pool '%s' already exist" % pool_name) try: # Create a new pool disk_vol = [] if pool_type == 'disk': disk_vol.append(params.get("pre_vol", '10M')) libvirt_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol) libvirt_vol = libvirt_storage.PoolVolume(pool_name) # Create a new volume if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']: if (b_luks_encrypted and vol_format in ['raw', 'qcow2']): if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS is not supported in current" " libvirt version") if vol_format == "qcow2" and not libvirt_version.version_compare( 6, 10, 0): test.cancel("Qcow2 format with luks encryption is not" " supported in current libvirt version") luks_sec_uuid = create_luks_secret( os.path.join(pool_target, vol_name), encryption_password, test) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(vol_capability) vol_arg['allocation'] = int(vol_allocation) vol_arg['format'] = vol_format if with_clusterSize: vol_arg['clusterSize'] = int(vol_clusterSize) vol_arg['clusterSize_unit'] = vol_clusterSize_unit create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: libvirt_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name) elif vol_format == 'partition': vol_name = list(utlv.get_vol_list(pool_name).keys())[0] logging.debug("Find partition %s in disk pool", vol_name) elif vol_format == 'sparse': # Create a sparse file in pool sparse_file = pool_target + '/' + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability process.run(cmd, ignore_status=True, shell=True) else: test.error("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name, debug=True) vol_info = libvirt_vol.volume_info(vol_name) if not vol_info: test.error("Fail to get info of volume %s" % vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count( "prealloc-metadata"): clone_status_error = True if b_luks_encrypted: wipe_old_vol = True if pool_type == "disk": new_vol_name = utlv.new_disk_vol_name(pool_name) if new_vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name utlv.update_polkit_rule(params, vol_pat, new_value) bad_cloned_vol_name = params.get("bad_cloned_vol_name", "") if bad_cloned_vol_name: new_vol_name = bad_cloned_vol_name # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: test.fail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libvirt_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = [ "Unsupported algorithm", "no such pattern sequence" ] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: test.fail("Expect clone volume fail, but run" " successfully.") if wipe_old_vol: # Wipe the old volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = [ "Unsupported algorithm", "no such pattern sequence" ] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") if bad_cloned_vol_name: pattern = "volume name '%s' cannot contain '/'" % new_vol_name if re.search(pattern, clone_result.stderr) is None: test.fail("vol-clone failed with unexpected reason") finally: # Clean up try: libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) except exceptions.TestFail as detail: logging.error(str(detail))
def run(test, params, env): """ Test command: virsh setvcpus. The command can change the number of virtual CPUs in the guest domain. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh setvcpus operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("setvcpus_pre_vm_state") command = params.get("setvcpus_command", "setvcpus") options = params.get("setvcpus_options") vm_ref = params.get("setvcpus_vm_ref", "name") status_error = (params.get("status_error", "no") == "yes") convert_err = "Can't convert {0} to integer type" try: current_vcpu = int(params.get("setvcpus_current", "1")) except ValueError: test.error(convert_err.format(current_vcpu)) try: max_vcpu = int(params.get("setvcpus_max", "4")) except ValueError: test.error(convert_err.format(max_vcpu)) try: count = params.get("setvcpus_count", "") if count: count = eval(count) count = int(count) except ValueError: # 'count' may not invalid number in negative tests logging.debug(convert_err.format(count)) extra_param = params.get("setvcpus_extra_param") count_option = "%s %s" % (count, extra_param) remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") remote_prompt = params.get("remote_prompt", "#") tmpxml = os.path.join(test.tmpdir, 'tmp.xml') set_topology = (params.get("set_topology", "no") == "yes") sockets = params.get("sockets") cores = params.get("cores") threads = params.get("threads") # Early death 1.1 if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM")): test.cancel("remote/local ip parameters not set.") # Early death 1.2 option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) is None: test.cancel("The current libvirt version" " doesn't support '%s' option" % item) # Init expect vcpu count values exp_vcpu = { 'max_config': max_vcpu, 'max_live': max_vcpu, 'cur_config': current_vcpu, 'cur_live': current_vcpu, 'guest_live': current_vcpu } def set_expected(vm, options): """ Set the expected vcpu numbers :param vm: vm object :param options: setvcpus options """ if ("config" in options) or ("current" in options and vm.is_dead()): if "maximum" in options: exp_vcpu["max_config"] = count else: exp_vcpu['cur_config'] = count if ("live" in options) or ("current" in options and vm.is_alive()): exp_vcpu['cur_live'] = count exp_vcpu['guest_live'] = count if options == '': # when none given it defaults to live exp_vcpu['cur_live'] = count exp_vcpu['guest_live'] = count # Save original configuration vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() # Normal processing of the test is to set the maximum vcpu count to 4, # and set the current vcpu count to 1, then adjust the 'count' value to # plug or unplug vcpus. # # This is generally fine when the guest is not running; however, the # hotswap functionality hasn't always worked very well and is under # going lots of change from using the hmp "cpu_set" command in 1.5 # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command # seems to have been deprecated making things very messy. # # To further muddy the waters, the "cpu-add" functionality is supported # for specific machine type versions. For the purposes of this test that # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which # version of qemu/kvm was used to initially create/generate the XML for # the machine this could result in a newer qemu still using 1.4 or earlier # for the machine type. # try: # Set maximum vcpus, so we can run all kinds of normal tests without # encounter requested vcpus greater than max allowable vcpus error topology = vmxml.get_cpu_topology() if all([topology, sockets, cores, threads]) or set_topology: vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu, sockets, cores, threads, True) else: vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu) if topology and ("config" and "maximum" in options) and not status_error: # https://bugzilla.redhat.com/show_bug.cgi?id=1426220 vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) del vmxml.cpu vmxml.sync() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("Pre-test xml is %s", vmxml.xmltreefile) # Get the number of cpus, current value if set, and machine type cpu_xml_data = utils_hotplug.get_cpu_xmldata(vm, options) logging.debug( "Before run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", cpu_xml_data['vcpu'], cpu_xml_data['current_vcpu'], cpu_xml_data['mtype']) # Restart, unless that's not our test if not vm.is_alive(): vm.start() vm.wait_for_login() if cpu_xml_data['vcpu'] == 1 and count == 1: logging.debug( "Original vCPU count is 1, just checking if setvcpus " "can still set current.") domid = vm.get_id() # only valid for running domuuid = vm.get_uuid() if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shut off" and vm.is_alive(): vm.destroy() # Run test if vm_ref == "remote": (setvcpu_exit_status, status_error, setvcpu_exit_stderr) = remote_test(remote_ip, local_ip, remote_pwd, remote_prompt, vm_name, status_error) else: if vm_ref == "name": dom_option = vm_name elif vm_ref == "id": dom_option = domid if params.get("setvcpus_hex_id") is not None: dom_option = hex(int(domid)) elif params.get("setvcpus_invalid_id") is not None: dom_option = params.get("setvcpus_invalid_id") elif vm_ref == "uuid": dom_option = domuuid if params.get("setvcpus_invalid_uuid") is not None: dom_option = params.get("setvcpus_invalid_uuid") else: dom_option = vm_ref status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True, debug=True) if not status_error: set_expected(vm, options) result = utils_hotplug.check_vcpu_value(vm, exp_vcpu, option=options) setvcpu_exit_status = status.exit_status setvcpu_exit_stderr = status.stderr.strip() finally: cpu_xml_data = utils_hotplug.get_cpu_xmldata(vm, options) logging.debug( "After run setvcpus: cpu_count=%d, cpu_current=%d," " mtype=%s", cpu_xml_data['vcpu'], cpu_xml_data['current_vcpu'], cpu_xml_data['mtype']) # Cleanup if pre_vm_state == "paused": virsh.resume(vm_name, ignore_status=True) orig_config_xml.sync() if os.path.exists(tmpxml): os.remove(tmpxml) # check status_error if status_error: if setvcpu_exit_status == 0: test.fail("Run successfully with wrong command!") else: if setvcpu_exit_status != 0: # setvcpu/hotplug is only available as of qemu 1.5 and it's still # evolving. In general the addition of vcpu's may use the QMP # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands. # The removal of vcpu's may work in qemu 1.5 due to how cpu_set # can set vcpus online or offline; however, there doesn't appear # to be a complementary cpu-del feature yet, so we can add, but # not delete in 1.6. # A 1.6 qemu will not allow the cpu-add command to be run on # a configuration using <os> machine property 1.4 or earlier. # That is the XML <os> element with the <type> property having # an attribute 'machine' which is a tuple of 3 elements separated # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5". if re.search("unable to execute QEMU command 'cpu-add'", setvcpu_exit_stderr): test.cancel("guest <os> machine property '%s' " "may be too old to allow hotplug." % cpu_xml_data['mtype']) # A qemu older than 1.5 or an unplug for 1.6 will result in # the following failure. In general, any time libvirt determines # it cannot support adding or removing a vCPU... if re.search("cannot change vcpu count of this domain", setvcpu_exit_stderr): test.cancel("virsh setvcpu hotplug unsupported, " " mtype=%s" % cpu_xml_data['mtype']) # Otherwise, it seems we have a real error test.fail("Run failed with right command mtype=%s" " stderr=%s" % (cpu_xml_data['mtype'], setvcpu_exit_stderr)) else: if not result: test.fail("Test Failed")
def run_virsh_domblkstat(test, params, env): """ Test command: virsh domblkstat. The command get device block stats for a running domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domblkstat operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) domid = vm.get_id() domuuid = vm.get_uuid() blklist = libvirt_xml.VMXML.get_disk_blk(vm_name) if blklist == None: raise error.TestFail("Cannot find disk in %s" % vm_name) #Select a block device from disks blk = blklist[0] libvirtd = params.get("libvirtd", "on") vm_ref = params.get("domblkstat_vm_ref") options = params.get("domblkstat_option", "") status_error = params.get("status_error", "no") if params.get("domblkinfo_dev") == "no": blk = "" if vm_ref == "id": vm_ref = domid elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, params.get("domblkstat_extra")) option_list = options.split(" ") for option in option_list: if virsh.has_command_help_match("domblkstat", option) == None: status_error = "yes" break if libvirtd == "off": utils_libvirtd.libvirtd_stop() result = virsh.domblkstat(vm_ref, blk, options, ignore_status=True) status = result.exit_status output = result.stdout.strip() err = result.stderr.strip() #recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() #check status_error if status_error == "yes": if status == 0 or err == "": raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0 or output == "": raise error.TestFail("Run failed with right command")
def run(test, params, env): """ Test command: virsh iothread. The command can change the number of iothread. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh iothreadadd operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("iothread_pre_vm_state") command = params.get("iothread_command", "iothread") options = params.get("iothread_options") vm_ref = params.get("iothread_vm_ref", "name") iothreads = params.get("iothreads", 4) iothread_id = params.get("iothread_id", "6") cpuset = params.get("cpuset", "1") status_error = "yes" == params.get("status_error") iothreadids = params.get("iothreadids") iothreadpins = params.get("iothreadpins") try: iothreads = int(iothreads) except ValueError: # 'iothreads' may not invalid number in negative tests logging.debug("Can't convert %s to integer type", iothreads) # Save original configuration vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() try: if vm.is_alive(): vm.destroy() option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) is None: raise exceptions.TestSkipError("The current libvirt version" " doesn't support '%s' option" % item) # Set iothreads first if iothreadids: ids_xml = vm_xml.VMIothreadidsXML() ids_xml.iothread = iothreadids.split() vmxml.iothreadids = ids_xml if iothreadpins: cputune_xml = vm_xml.VMCPUTuneXML() io_pins = [] for pins in iothreadpins.split(): thread, cpu = pins.split(':') io_pins.append({"iothread": thread, "cpuset": cpu}) cputune_xml.iothreadpins = io_pins vmxml.cputune = cputune_xml vmxml.iothreads = iothreads logging.debug("Pre-test xml is %s", vmxml) vmxml.sync() # Restart, unless that's not our test if not vm.is_alive(): vm.start() vm.wait_for_login() domid = vm.get_id() # only valid for running domuuid = vm.get_uuid() if pre_vm_state == "shut off" and vm.is_alive(): vm.destroy() # Run test if vm_ref == "name": dom_option = vm_name elif vm_ref == "id": dom_option = domid elif vm_ref == "uuid": dom_option = domuuid else: dom_option = vm_ref virsh_dargs = {"debug": True, "ignore_status": True} if "yes" == params.get("readonly", "no"): virsh_dargs.update({"readonly": True}) ret = virsh.iothreadpin(dom_option, iothread_id, cpuset, options, **virsh_dargs) libvirt.check_exit_status(ret, status_error) if not status_error: # Check domainxml iothread_info = get_xmlinfo(vm_name, options) logging.debug("iothreadinfo: %s", iothread_info) for info in iothread_info: if info["iothread"] == iothread_id and info["cpuset"] == cpuset: # Find the iothreadpins in domain xml break elif iothread_info.index(info) == (len(iothread_info) - 1): # Can not find the iothreadpins at last raise exceptions.TestFail( "Failed to add iothread %s in domain xml", iothread_id) # Check iothreadinfo by virsh command iothread_info = libvirt.get_iothreadsinfo(dom_option, options) logging.debug("iothreadinfo: %s", iothread_info) if (iothread_id not in iothread_info or iothread_info[iothread_id] != cpuset): raise exceptions.TestFail("Failed to add iothreadpins %s", iothread_id) finally: # Cleanup if vm.is_alive(): vm.destroy() orig_config_xml.sync()
def run(test, params, env): """ Test the command virsh vcpucount (1) Iterate perform setvcpus operation with four valid options. (2) Iterate call virsh vcpucount with given options. (3) Check whether the virsh vcpucount works as expected. (4) Recover test environment. The test works for domain state as "shut off" or "running", it check vcpucount result after vcpu hotplug using setvcpus. For setvcpus, include four valid options: --config --config --maximum --live --guest For vcpucount options, restrict up to 2 options together, upstream libvirt support more options combinations now (e.g. 3 options together or single --maximum option), for backward support, only following options are checked: None --config --active --config --maximum --live --active --live --maximum --current --active --current --maximum --guest """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) xml_file = params.get("vcpucount_xml_file", "vm.xml") virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file) pre_vm_state = params.get("vcpucount_pre_vm_state") options = params.get("vcpucount_options") status_error = params.get("status_error") maxvcpu = int(params.get("vcpucount_maxvcpu", "4")) curvcpu = int(params.get("vcpucount_current", "1")) sockets = int(params.get("sockets", "1")) cores = int(params.get("cores", "4")) threads = int(params.get("threads", "1")) livevcpu = curvcpu + threads set_option = ["--config", "--config --maximum", "--live", "--guest"] # Early death # 1.1 More than two options not supported if len(options.split()) > 2: test.cancel("Options exceeds 2 is not supported") # 1.2 Check for all options option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match("vcpucount", item) is None: test.cancel("The current libvirt " "version doesn't support " "'%s' option" % item) # 1.3 Check for vcpu values if (sockets and cores and threads): if int(maxvcpu) != int(sockets) * int(cores) * int(threads): test.cancel("Invalid topology definition, VM will not start") try: # Prepare domain reset_domain(vm, pre_vm_state, maxvcpu, curvcpu, sockets, cores, threads, ("--guest" in options)) # Perform guest vcpu hotplug for idx in range(len(set_option)): # Remove topology for maximum config # https://bugzilla.redhat.com/show_bug.cgi?id=1426220 if idx == 1: del_topology(vm, pre_vm_state) # Hotplug domain vcpu result = virsh.setvcpus(vm_name, livevcpu, set_option[idx], ignore_status=True, debug=True) setvcpus_status = result.exit_status # Call virsh vcpucount with option result = virsh.vcpucount(vm_name, options, ignore_status=True, debug=True) output = result.stdout.strip() vcpucount_status = result.exit_status if "--guest" in options: if result.stderr.count("doesn't support option") or \ result.stderr.count("command guest-get-vcpus has not been found"): reset_env(vm_name, xml_file) test.fail("Option %s is not supported" % options) # Reset domain reset_domain(vm, pre_vm_state, maxvcpu, curvcpu, sockets, cores, threads, ("--guest" in options)) # Check result if status_error == "yes": if vcpucount_status == 0: reset_env(vm_name, xml_file) test.fail("Run successfully with wrong command!") else: logging.info("Run failed as expected") else: if vcpucount_status != 0: reset_env(vm_name, xml_file) test.fail("Run command failed with options %s" % options) elif setvcpus_status == 0: if pre_vm_state == "shut off": if idx == 0: expect_out = [maxvcpu, livevcpu] chk_output_shutoff(output, expect_out, options) elif idx == 1: expect_out = [livevcpu, curvcpu] chk_output_shutoff(output, expect_out, options) else: reset_env(vm_name, xml_file) test.fail("setvcpus should failed") else: if idx == 0: expect_out = [ maxvcpu, maxvcpu, livevcpu, curvcpu, curvcpu ] chk_output_running(output, expect_out, options) elif idx == 1: expect_out = [ livevcpu, maxvcpu, curvcpu, curvcpu, curvcpu ] chk_output_running(output, expect_out, options) elif idx == 2: expect_out = [ maxvcpu, maxvcpu, curvcpu, livevcpu, livevcpu ] chk_output_running(output, expect_out, options) else: expect_out = [ maxvcpu, maxvcpu, curvcpu, curvcpu, livevcpu ] chk_output_running(output, expect_out, options) else: if pre_vm_state == "shut off": expect_out = [maxvcpu, curvcpu] chk_output_shutoff(output, expect_out, options) else: expect_out = [ maxvcpu, maxvcpu, curvcpu, curvcpu, curvcpu ] chk_output_running(output, expect_out, options) finally: # Recover env reset_env(vm_name, xml_file)