def run(test, params, env): """ Test for virt-top, it is a top like tool for virtual machine. """ # Install virt-top package if missing. software_mgr = software_manager.SoftwareManager() if not software_mgr.check_installed('virt-top'): logging.info('Installing virt-top package:') software_mgr.install('virt-top') # Get the full path of virt-top command. try: VIRT_TOP = path.find_command("virt-top") except path.CmdNotFoundError as info: raise exceptions.TestSkipError("No virt-top command found - %s" % info) vm_name = params.get("main_vm", "avocado-vt-vm1") output = params.get("output_file", "output") output_path = os.path.join(data_dir.get_tmp_dir(), output) status_error = ("yes" == params.get("status_error", "no")) options = params.get("options", "") id_result = virsh.domid(vm_name) if id_result.exit_status: raise exceptions.TestError("Get domid failed.") domid = id_result.stdout.strip() if "--stream" in options: cmd = "%s %s 1>%s" % (VIRT_TOP, options, output_path) else: cmd = "%s %s" % (VIRT_TOP, options) # Add a timeout command to end it automatically. cmd = "timeout 10 %s" % cmd cmd_result = process.run(cmd, ignore_status=True, shell=True) if not status_error: # Read and analyse the output of virt-top. success = False with open(output_path) as output_file: lines = output_file.readlines() for line in lines: if line.count(vm_name): sub_string = line.split() if domid == sub_string[0].strip(): success = True break else: continue else: continue if not success: raise exceptions.TestFail("Command virt-top exit successfully, but" "domid is expected") else: if cmd_result.exit_status != 2: raise exceptions.TestFail("Command virt-top exit successfully with" "invalid option:%s" % cmd_result.stdout_text)
def run(test, params, env): """ Test for virt-top, it is a top like tool for virtual machine. """ # Get the full path of virt-top command. try: VIRT_TOP = os_dep.command("virt-top") except ValueError: raise error.TestNAError("No virt-top command found.") vm_name = params.get("main_vm", "avocado-vt-vm1") output = params.get("output_file", "output") output_path = os.path.join(data_dir.get_tmp_dir(), output) status_error = ("yes" == params.get("status_error", "no")) options = params.get("options", "") id_result = virsh.domid(vm_name) if id_result.exit_status: raise error.TestNAError("Get domid failed.") domid = id_result.stdout.strip() if "--stream" in options: cmd = "%s %s 1>%s" % (VIRT_TOP, options, output_path) else: cmd = "%s %s" % (VIRT_TOP, options) # Add a timeout command to end it automatically. cmd = "timeout 10 %s" % cmd cmd_result = utils.run(cmd, ignore_status=True) if not status_error: # Read and analyse the output of virt-top. success = False output_file = open(output_path) lines = output_file.readlines() for line in lines: if line.count(vm_name): sub_string = line.split() if domid == sub_string[0].strip(): success = True break else: continue else: continue if not success: raise error.TestFail("Command virt-top exit successfully, but " "domid is expected") else: if cmd_result.exit_status != 2: raise error.TestFail( "Command virt-top exit successfully with" "invalid option:%s\n", cmd_result.stdout)
def run(test, params, env): """ Test for virt-top, it is a top like tool for virtual machine. """ # Get the full path of virt-top command. try: VIRT_TOP = os_dep.command("virt-top") except ValueError: raise error.TestNAError("No virt-top command found.") vm_name = params.get("main_vm", "avocado-vt-vm1") output = params.get("output_file", "output") output_path = os.path.join(data_dir.get_tmp_dir(), output) status_error = ("yes" == params.get("status_error", "no")) options = params.get("options", "") id_result = virsh.domid(vm_name) if id_result.exit_status: raise error.TestNAError("Get domid failed.") domid = id_result.stdout.strip() if "--stream" in options: cmd = "%s %s 1>%s" % (VIRT_TOP, options, output_path) else: cmd = "%s %s" % (VIRT_TOP, options) # Add a timeout command to end it automatically. cmd = "timeout 10 %s" % cmd cmd_result = utils.run(cmd, ignore_status=True) if not status_error: # Read and analyse the output of virt-top. success = False output_file = open(output_path) lines = output_file.readlines() for line in lines: if line.count(vm_name): sub_string = line.split() if domid == sub_string[0].strip(): success = True break else: continue else: continue if not success: raise error.TestFail("Command virt-top exit successfully, but " "domid is expected") else: if cmd_result.exit_status != 2: raise error.TestFail("Command virt-top exit successfully with" "invalid option:%s\n", cmd_result.stdout)
def run(test, params, env): """ Test virsh domdisplay command, return the graphic url This test covered vnc and spice type, also readonly and readwrite mode If have --include-passwd option, also need to check passwd list in result """ if not virsh.has_help_command('domdisplay'): test.cancel("This version of libvirt doesn't support " "domdisplay test") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("domdisplay_options", "") graphic = params.get("domdisplay_graphic", "vnc") readonly = ("yes" == params.get("readonly", "no")) passwd = params.get("domdisplay_passwd") is_ssl = ("yes" == params.get("domdisplay_ssl", "no")) is_domid = ("yes" == params.get("domdisplay_domid", "no")) is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no")) qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_file = os.path.join(data_dir.get_tmp_dir(), "qemu.conf.bk") if "--type" in options: if not libvirt_version.version_compare(1, 2, 6): test.cancel("--type option is not supported in this" " libvirt version.") elif "vnc" in options and graphic != "vnc" or \ "spice" in options and graphic != "spice": status_error = True def restart_libvirtd(): # make modification effect libvirtd_instance = utils_libvirtd.Libvirtd() libvirtd_instance.restart() def clean_ssl_env(): """ Clean ssl spice connection firstly """ # modify qemu.conf with open(qemu_conf, "r") as f_obj: cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content with open(qemu_conf, "w") as f_obj: f_obj.write(left_cont) def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf clean_ssl_env() # Append ssl spice configuration with open(qemu_conf, "a") as f_obj: f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd) os.chmod('/etc/pki/libvirt-spice/server-key.pem', 0o644) os.chmod('/etc/pki/libvirt-spice/ca-key.pem', 0o644) try: graphic_count = len(vmxml_backup.get_graphics_devices()) shutil.copyfile(qemu_conf, tmp_file) if is_ssl: # Do backup for qemu.conf in tmp_file prepare_ssl_env() restart_libvirtd() if graphic_count: Graphics.del_graphic(vm_name) Graphics.add_graphic(vm_name, passwd, "spice", True) else: clean_ssl_env() restart_libvirtd() if graphic_count: Graphics.del_graphic(vm_name) Graphics.add_graphic(vm_name, passwd, graphic) vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() dom_id = virsh.domid(vm_name).stdout.strip() dom_uuid = virsh.domuuid(vm_name).stdout.strip() if is_domid: vm_name = dom_id if is_domuuid: vm_name = dom_uuid # Do test result = virsh.domdisplay(vm_name, options, readonly=readonly, debug=True) logging.debug("result is %s", result) if result.exit_status: if not status_error: test.fail("Fail to get domain display info. Error:" "%s." % result.stderr.strip()) else: logging.info( "Get domain display info failed as expected. " "Error:%s.", result.stderr.strip()) return elif status_error: test.fail("Expect fail, but succeed indeed!") output = result.stdout.strip() # Different result depends on the domain xml listen address if output.find("localhost:") >= 0: expect_addr = "localhost" else: expect_addr = "127.0.0.1" # Get active domain xml info vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info") logging.debug("xml is %s", vmxml_act.get_xmltreefile()) graphics = vmxml_act.devices.by_device_tag('graphics') for graph in graphics: if graph.type_name == graphic: graphic_act = graph port = graph.port # Do judgement for result if graphic == "vnc": expect = "vnc://%s:%s" % (expect_addr, str(int(port) - 5900)) elif graphic == "spice" and is_ssl: tlsport = graphic_act.tlsPort expect = "spice://%s:%s?tls-port=%s" % \ (expect_addr, port, tlsport) elif graphic == "spice": expect = "spice://%s:%s" % (expect_addr, port) if options == "--include-password" and passwd is not None: # have --include-passwd and have passwd in xml if graphic == "vnc": expect = "vnc://:%s@%s:%s" % \ (passwd, expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: expect = expect + "&password="******"spice": expect = expect + "?password="******"Get correct display:%s", output) else: test.fail("Expect %s, but get %s" % (expect, output)) finally: # qemu.conf recovery shutil.move(tmp_file, qemu_conf) restart_libvirtd() # Domain xml recovery vmxml_backup.sync()
def run_virsh_save(test, params, env): """ Test command: virsh save. The command can save the RAM state of a running domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Run virsh save command with assigned options. 4.Recover test environment.(If the libvirtd service is stopped ,start the libvirtd service.) 5.Confirm the test result. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(params["main_vm"]) vm.verify_alive() domid = virsh.domid(vm_name).strip() domuuid = virsh.domuuid(vm_name).strip() savefile = params.get("save_file") pre_vm_state = params.get("save_pre_vm_state", "null") libvirtd = params.get("save_libvirtd") extra_param = params.get("save_extra_param") vm_ref = params.get("save_vm_ref") # prepare the environment if vm_ref == "name" and pre_vm_state == "paused": virsh.suspend(vm_name) elif vm_ref == "name" and pre_vm_state == "shut off": virsh.destroy(vm_name) # set the option if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "save_invalid_id" or vm_ref == "save_invalid_uuid": vm_ref = params.get(vm_ref) elif vm_ref.find("name") != -1 or vm_ref == "extra_param": savefile = "%s %s" % (savefile, extra_param) if vm_ref == "only_name": savefile = " " vm_ref = vm_name if libvirtd == "off": libvirt_vm.libvirtd_stop() status = virsh.save(vm_ref, savefile, ignore_status=True).exit_status # recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() # cleanup if os.path.exists(savefile): virsh.restore(savefile) os.remove(savefile) # check status_error status_error = params.get("save_status_error") if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command")
def run(test, params, env): """ Test command: virsh domid. The command returns basic information about the domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domid operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(vm_name) if vm.is_alive() and params.get("start_vm") == "no": vm.destroy() domid = vm.get_id() domuuid = vm.get_uuid() vm_ref = params.get("domid_vm_ref") extra = params.get("domid_extra", "") status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") def remote_test(params, vm_name): """ Test remote case. """ remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") status = 0 output = "" err = "" try: if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): raise error.TestNAError("remote_ip and/or local_ip parameters " "not changed from default values.") uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') command = "virsh -c %s domid %s" % (uri, vm_name) status, output = session.cmd_status_output(command, internal_timeout=5) if status != 0: err = output session.close() except process.CmdError: status = 1 output = "" err = "remote test failed" return status, output, err # run test case if vm_ref == "id": vm_ref = domid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, extra) elif vm_ref == "uuid": vm_ref = domuuid if libvirtd == "off": utils_libvirtd.libvirtd_stop() if vm_ref != "remote": result = virsh.domid(vm_ref, ignore_status=True) status = result.exit_status output = result.stdout.strip() err = result.stderr.strip() else: status, output, err = remote_test(params, vm_name) # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # check status_error if status_error == "yes": if status == 0 or err == "": raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0 or output == "": raise error.TestFail("Run failed with right command")
def run(test, params, env): """ Test virsh domdisplay command, return the graphic url This test covered vnc and spice type, also readonly and readwrite mode If have --include-passwd option, also need to check passwd list in result """ if not virsh.has_help_command('domdisplay'): raise error.TestNAError("This version of libvirt doesn't support " "domdisplay test") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("domdisplay_options", "") graphic = params.get("domdisplay_graphic", "vnc") readonly = ("yes" == params.get("readonly", "no")) passwd = params.get("domdisplay_passwd") is_ssl = ("yes" == params.get("domdisplay_ssl", "no")) is_domid = ("yes" == params.get("domdisplay_domid", "no")) is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no")) qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_file = os.path.join(test.tmpdir, "qemu.conf.bk") def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf f_obj = open(qemu_conf, "r") cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content f_obj = open(qemu_conf, "w") f_obj.write(left_cont) f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") f_obj.close() # make modification effect utils_libvirtd.libvirtd_restart() # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd) try: graphic_count = len(vmxml_backup.get_graphics_devices()) if is_ssl: # Do backup for qemu.conf in tmp_file shutil.copyfile(qemu_conf, tmp_file) prepare_ssl_env() if graphic_count: Graphics.del_graphic(vm_name) Graphics.add_graphic(vm_name, passwd, "spice", True) else: if not graphic_count: Graphics.add_graphic(vm_name, passwd, graphic) # Only change graphic type and passwd Graphics.change_graphic_type_passwd(vm_name, graphic, passwd) vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() dom_id = virsh.domid(vm_name).stdout.strip() dom_uuid = virsh.domuuid(vm_name).stdout.strip() if is_domid: vm_name = dom_id if is_domuuid: vm_name = dom_uuid # Do test result = virsh.domdisplay(vm_name, options, readonly=readonly, debug=True) logging.debug("result is %s", result) if result.exit_status: if not status_error: raise error.TestFail("Fail to get domain display info. Error:" "%s." % result.stderr.strip()) else: logging.info("Get domain display info failed as expected. " "Error:%s.", result.stderr.strip()) return elif status_error: raise error.TestFail("Expect fail, but succeed indeed!") output = result.stdout.strip() # Different result depends on the domain xml listen address if output.find("localhost:") >= 0: expect_addr = "localhost" else: expect_addr = "127.0.0.1" # Get active domain xml info vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info") logging.debug("xml is %s", vmxml_act.get_xmltreefile()) graphic_act = vmxml_act.devices.by_device_tag('graphics')[0] port = graphic_act.port # Do judgement for result if graphic == "vnc": expect = "vnc://%s:%s" % (expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: tlsport = graphic_act.tlsPort expect = "spice://%s:%s?tls-port=%s" % \ (expect_addr, port, tlsport) elif graphic == "spice": expect = "spice://%s:%s" % (expect_addr, port) if options != "" and passwd is not None: # have --include-passwd and have passwd in xml if graphic == "vnc": expect = "vnc://:%s@%s:%s" % \ (passwd, expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: expect = expect + "&password="******"spice": expect = expect + "?password="******"Get correct display:%s", output) else: raise error.TestFail("Expect %s, but get %s" % (expect, output)) finally: # Domain xml recovery vmxml_backup.sync() if is_ssl: # qemu.conf recovery shutil.move(tmp_file, qemu_conf) utils_libvirtd.libvirtd_restart()
def run(test, params, env): """ Test virtiofs filesystem device: 1.Start guest with 1/2 virtiofs filesystem devices. 2.Start 2 guest with the same virtiofs filesystem device. 3.Coldplug/Coldunplug virtiofs filesystem device 4.Share data between guests and host. 5.Lifecycle for guest with virtiofs filesystem device. """ def generate_expected_process_option(expected_results): """ Generate expected virtiofsd process option """ if cache_mode != "auto": expected_results = "cache=%s" % cache_mode if xattr == "on": expected_results += ",xattr" elif xattr == "off": expected_results += ",no_xattr" if flock == "on": expected_results += ",flock" else: expected_results += ",no_flock" if lock_posix == "on": expected_results += ",posix_lock" else: expected_results += ",no_posix_lock" logging.debug(expected_results) return expected_results def shared_data(vm_names, fs_devs): """ Shared data between guests and host: 1.Mount dir in guest; 2.Write a file in guest; 3.Check the md5sum value are the same in guests and host; """ md5s = [] for vm in vms: session = vm.wait_for_login() for fs_dev in fs_devs: logging.debug(fs_dev) mount_dir = '/var/tmp/' + fs_dev.target['dir'] session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=False) session.cmd('mkdir -p %s' % mount_dir) logging.debug("mount virtiofs dir in guest") cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'], mount_dir) status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("mount virtiofs dir failed: %s" % output) if vm == vms[0]: filename_guest = mount_dir + '/' + vm.name cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename_guest status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("Write data failed: %s" % output) md5_value = session.cmd_status_output( "md5sum %s" % filename_guest)[1].strip().split()[0] md5s.append(md5_value) logging.debug(md5_value) md5_value = process.run( "md5sum %s" % filename_guest).stdout_text.strip().split()[0] logging.debug(md5_value) md5s.append(md5_value) session.close() if len(set(md5s)) != len(fs_devs): test.fail("The md5sum value are not the same in guests and host") def launch_externally_virtiofs(source_dir, source_socket): """ Launch externally virtiofs :param source_dir: the dir shared on host :param source_socket: the socket file listened on """ process.run('chcon -t virtd_exec_t %s' % path, ignore_status=False, shell=True) cmd = "systemd-run %s --socket-path=%s -o source=%s" % ( path, source_socket, source_dir) try: process.run(cmd, ignore_status=False, shell=True) # Make sure the socket is created utils_misc.wait_for(lambda: os.path.isdir(source_socket), timeout=3) process.run("chown qemu:qemu %s" % source_socket, ignore_status=False) process.run('chcon -t svirt_image_t %s' % source_socket, ignore_status=False, shell=True) except Exception as err: cmd = "pkill virtiofsd" process.run(cmd, shell=True) test.fail("{}".format(err)) def prepare_stress_script(script_path, script_content): """ Refer to xfstest generic/531. Create stress test script to create a lot of unlinked files. :param source_path: The path of script :param content: The content of stress script """ logging.debug("stress script path: %s content: %s" % (script_path, script_content)) script_lines = script_content.split(';') try: with open(script_path, 'w') as fd: fd.write('\n'.join(script_lines)) os.chmod(script_path, 0o777) except Exception as e: test.error("Prepare the guest stress script failed %s" % e) def run_stress_script(session, script_path): """ Run stress script in the guest :param session: guest session :param script_path: The path of script in the guest """ # Set ULIMIT_NOFILE to increase the number of unlinked files session.cmd("ulimit -n 500000 && /usr/bin/python3 %s" % script_path, timeout=120) def umount_fs(vm): """ Unmount the filesystem in guest :param vm: filesystem in this vm that should be unmounted """ if vm.is_alive(): session = vm.wait_for_login() for fs_dev in fs_devs: mount_dir = '/var/tmp/' + fs_dev.target['dir'] session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True) session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True) session.close() def check_detached_xml(vm): """ Check whether there is xml about the filesystem device in the vm xml :param vm: the vm to be checked """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) filesystems = vmxml.devices.by_device_tag('filesystem') if filesystems: test.fail("There should be no filesystem devices in guest " "xml after hotunplug") def check_filesystem_in_guest(vm, fs_dev): """ Check whether there is virtiofs in vm :param vm: the vm to be checked :param fs_dev: the virtiofs device to be checked """ session = vm.wait_for_login() mount_dir = '/var/tmp/' + fs_dev.target['dir'] cmd = "mkdir %s; mount -t virtiofs %s %s" % ( mount_dir, fs_dev.target['dir'], mount_dir) status, output = session.cmd_status_output(cmd, timeout=300) session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True) if not status: test.fail( "Mount virtiofs should failed after hotunplug device. %s" % output) session.close() start_vm = params.get("start_vm", "no") vm_names = params.get("vms", "avocado-vt-vm1").split() cache_mode = params.get("cache_mode", "none") xattr = params.get("xattr", "on") lock_posix = params.get("lock_posix", "on") flock = params.get("flock", "on") xattr = params.get("xattr", "on") path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd") queue_size = int(params.get("queue_size", "512")) driver_type = params.get("driver_type", "virtiofs") guest_num = int(params.get("guest_num", "1")) fs_num = int(params.get("fs_num", "1")) vcpus_per_cell = int(params.get("vcpus_per_cell", 2)) dir_prefix = params.get("dir_prefix", "mount_tag") error_msg_start = params.get("error_msg_start", "") error_msg_save = params.get("error_msg_save", "") status_error = params.get("status_error", "no") == "yes" socket_file_checking = params.get("socket_file_checking", "no") == "yes" suspend_resume = params.get("suspend_resume", "no") == "yes" managedsave = params.get("managedsave", "no") == "yes" coldplug = params.get("coldplug", "no") == "yes" hotplug_unplug = params.get("hotplug_unplug", "no") == "yes" detach_device_alias = params.get("detach_device_alias", "no") == "yes" extra_hugepages = params.get_numeric("extra_hugepages") edit_start = params.get("edit_start", "no") == "yes" with_hugepages = params.get("with_hugepages", "yes") == "yes" with_numa = params.get("with_numa", "yes") == "yes" with_memfd = params.get("with_memfd", "no") == "yes" source_socket = params.get("source_socket", "/var/tmp/vm001.socket") launched_mode = params.get("launched_mode", "auto") destroy_start = params.get("destroy_start", "no") == "yes" bug_url = params.get("bug_url", "") script_content = params.get("stress_script", "") stdio_handler_file = "file" == params.get("stdio_handler") fs_devs = [] vms = [] vmxml_backups = [] expected_fails_msg = [] expected_results = "" host_hp_size = utils_memory.get_huge_page_size() backup_huge_pages_num = utils_memory.get_num_huge_pages() huge_pages_num = 0 if len(vm_names) != guest_num: test.cancel("This test needs exactly %d vms." % guest_num) if not libvirt_version.version_compare(7, 0, 0) and not with_numa: test.cancel("Not supported without NUMA before 7.0.0") if not libvirt_version.version_compare(7, 6, 0) and destroy_start: test.cancel("Bug %s is not fixed on current build" % bug_url) try: # Define filesystem device xml for index in range(fs_num): driver = {'type': driver_type, 'queue': queue_size} source_dir = os.path.join('/var/tmp/', str(dir_prefix) + str(index)) logging.debug(source_dir) not os.path.isdir(source_dir) and os.mkdir(source_dir) target_dir = dir_prefix + str(index) source = {'socket': source_socket} target = {'dir': target_dir} if launched_mode == "auto": binary_keys = [ 'path', 'cache_mode', 'xattr', 'lock_posix', 'flock' ] binary_values = [path, cache_mode, xattr, lock_posix, flock] binary_dict = dict(zip(binary_keys, binary_values)) source = {'dir': source_dir} accessmode = "passthrough" fsdev_keys = [ 'accessmode', 'driver', 'source', 'target', 'binary' ] fsdev_values = [ accessmode, driver, source, target, binary_dict ] else: fsdev_keys = ['driver', 'source', 'target'] fsdev_values = [driver, source, target] fsdev_dict = dict(zip(fsdev_keys, fsdev_values)) logging.debug(fsdev_dict) fs_dev = libvirt_device_utils.create_fs_xml( fsdev_dict, launched_mode) logging.debug(fs_dev) fs_devs.append(fs_dev) #Start guest with virtiofs filesystem device for index in range(guest_num): logging.debug("prepare vm %s", vm_names[index]) vm = env.get_vm(vm_names[index]) vms.append(vm) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) vmxml_backup = vmxml.copy() vmxml_backups.append(vmxml_backup) if vmxml.max_mem < 1024000: vmxml.max_mem = 1024000 if with_hugepages: huge_pages_num += vmxml.max_mem // host_hp_size + extra_hugepages utils_memory.set_num_huge_pages(huge_pages_num) vmxml.remove_all_device_by_type('filesystem') vmxml.sync() numa_no = None if with_numa: numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1 vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name, vmxml.vcpu, numa_number=numa_no) vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name, access_mode="shared", hpgs=with_hugepages, memfd=with_memfd) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) logging.debug(vmxml) if launched_mode == "externally": launch_externally_virtiofs(source_dir, source_socket) if coldplug: ret = virsh.attach_device(vm_names[index], fs_devs[0].xml, flagstr='--config', debug=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) else: if not hotplug_unplug: for fs in fs_devs: vmxml.add_device(fs) vmxml.sync() logging.debug(vmxml) libvirt_pcicontr.reset_pci_num(vm_names[index]) result = virsh.start(vm_names[index], debug=True) if hotplug_unplug: if stdio_handler_file: qemu_config = LibvirtQemuConfig() qemu_config.stdio_handler = "file" utils_libvirtd.Libvirtd().restart() for fs_dev in fs_devs: ret = virsh.attach_device(vm_names[index], fs_dev.xml, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error: return if status_error and not managedsave: expected_error = error_msg_start utils_test.libvirt.check_exit_status(result, expected_error) return else: utils_test.libvirt.check_exit_status(result, expect_error=False) expected_results = generate_expected_process_option( expected_results) if launched_mode == "auto": cmd = 'ps aux | grep virtiofsd | head -n 1' utils_test.libvirt.check_cmd_output(cmd, content=expected_results) if managedsave: expected_error = error_msg_save result = virsh.managedsave(vm_names[0], ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: shared_data(vm_names, fs_devs) if suspend_resume: virsh.suspend(vm_names[0], debug=True, ignore_status=False) time.sleep(30) virsh.resume(vm_names[0], debug=True, ignore_statue=False) elif destroy_start: session = vm.wait_for_login(timeout=120) # Prepare the guest test script script_path = os.path.join(fs_devs[0].source["dir"], "test.py") script_content %= (fs_devs[0].source["dir"], fs_devs[0].source["dir"]) prepare_stress_script(script_path, script_content) # Run guest stress script stress_script_thread = threading.Thread( target=run_stress_script, args=(session, script_path)) stress_script_thread.setDaemon(True) stress_script_thread.start() # Create a lot of unlink files time.sleep(60) virsh.destroy(vm_names[0], debug=True, ignore_status=False) ret = virsh.start(vm_names[0], debug=True) libvirt.check_exit_status(ret) elif edit_start: vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml( vm_names[0]) if vm.is_alive(): virsh.destroy(vm_names[0]) cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[ 0] cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug(virsh.dumpxml(vm_names[0])) if cmd_result.exit_status: test.error("virt-xml edit guest failed: %s" % cmd_result) result = virsh.start(vm_names[0], ignore_status=True, debug=True) if error_msg_start: expected_fails_msg.append(error_msg_start) utils_test.libvirt.check_result( result, expected_fails=expected_fails_msg) if not libvirt_version.version_compare(6, 10, 0): # Because of bug #1897105, it was fixed in libvirt-6.10.0, # before this version, need to recover the env manually. cmd = "pkill virtiofsd" process.run(cmd, shell=True) if not vm.is_alive(): # Restoring vm and check if vm can start successfully vmxml_virtio_backup.sync() virsh.start(vm_names[0], ignore_status=False, shell=True) elif socket_file_checking: result = virsh.domid(vm_names[0]) domid = result.stdout.strip() domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[ 0] if result.exit_status: test.fail("Get domid failed.") for fs_dev in fs_devs: alias = fs_dev.alias['name'] expected_pid = domain_dir + alias + '-fs.pid' expected_sock = alias + '-fs.sock' status1 = process.run('ls -l %s' % expected_pid, shell=True).exit_status status2 = process.run('ls -l %s' % expected_sock, shell=True).exit_status if not (status1 and status2): test.fail( "The socket and pid file is not as expected") elif hotplug_unplug: for vm in vms: umount_fs(vm) for fs_dev in fs_devs: if detach_device_alias: alias = fs_dev.alias['name'] cmd = 'lsof /var/log/libvirt/qemu/%s-%s-virtiofsd.log' % ( vm.name, alias) output = process.run(cmd).stdout_text.splitlines() for item in output[1:]: if stdio_handler_file: if item.split()[0] != "virtiofsd": test.fail( "When setting stdio_handler as file, the command" "to write log should be virtiofsd!" ) else: if item.split()[0] != "virtlogd": test.fail( "When setting stdio_handler as logd, the command" "to write log should be virtlogd!") ret = virsh.detach_device_alias( vm.name, alias, ignore_status=True, debug=True, wait_for_event=True) else: ret = virsh.detach_device(vm.name, fs_dev.xml, ignore_status=True, debug=True, wait_for_event=True) libvirt.check_exit_status(ret, status_error) check_filesystem_in_guest(vm, fs_dev) check_detached_xml(vm) finally: for vm in vms: if vm.is_alive(): umount_fs(vm) vm.destroy(gracefully=False) for vmxml_backup in vmxml_backups: vmxml_backup.sync() for index in range(fs_num): process.run('rm -rf %s' % '/var/tmp/' + str(dir_prefix) + str(index), ignore_status=False) process.run('rm -rf %s' % source_socket, ignore_status=False, shell=True) if launched_mode == "externally": process.run('restorecon %s' % path, ignore_status=False, shell=True) utils_memory.set_num_huge_pages(backup_huge_pages_num) if stdio_handler_file: qemu_config.restore() utils_libvirtd.Libvirtd().restart()
def run_virsh_domid(test, params, env): """ Test command: virsh domid. The command returns basic information about the domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domid operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(vm_name) if vm.is_alive() and params.get("start_vm") == "no": vm.destroy() domid = vm.get_id() domuuid = vm.get_uuid() vm_ref = params.get("domid_vm_ref") extra = params.get("domid_extra", "") status_error = params.get("status_error", "no") libvirtd = params.get("libvirtd", "on") def remote_test(params, vm_name): """ Test remote case. """ remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "") status = 0 output = "" err = "" try: if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): raise error.TestNAError("remote_ip and/or local_ip parameters " "not changed from default values.") uri = libvirt_vm.complete_uri(local_ip) session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') command = "virsh -c %s domid %s" % (uri, vm_name) status, output = session.cmd_status_output(command, internal_timeout=5) if status != 0: err = output session.close() except error.CmdError: status = 1 output = "" err = "remote test failed" return status, output, err #run test case if vm_ref == "id": vm_ref = domid elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = "%s %s" % (vm_name, extra) elif vm_ref == "uuid": vm_ref = domuuid if libvirtd == "off": utils_libvirtd.libvirtd_stop() if vm_ref != "remote": result = virsh.domid(vm_ref, ignore_status=True) status = result.exit_status output = result.stdout.strip() err = result.stderr.strip() else: status, output, err = remote_test(params, vm_name) #recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() #check status_error if status_error == "yes": if status == 0 or err == "": raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0 or output == "": raise error.TestFail("Run failed with right command")
logging.info('Installing virt-top package:') software_mgr.install('virt-top') # Get the full path of virt-top command. try: VIRT_TOP = path.find_command("virt-top") except path.CmdNotFoundError, info: raise exceptions.TestSkipError("No virt-top command found - %s" % info) vm_name = params.get("main_vm", "avocado-vt-vm1") output = params.get("output_file", "output") output_path = os.path.join(data_dir.get_tmp_dir(), output) status_error = ("yes" == params.get("status_error", "no")) options = params.get("options", "") id_result = virsh.domid(vm_name) if id_result.exit_status: raise exceptions.TestError("Get domid failed.") domid = id_result.stdout.strip() if "--stream" in options: cmd = "%s %s 1>%s" % (VIRT_TOP, options, output_path) else: cmd = "%s %s" % (VIRT_TOP, options) # Add a timeout command to end it automatically. cmd = "timeout 10 %s" % cmd cmd_result = process.run(cmd, ignore_status=True, shell=True) if not status_error: # Read and analyse the output of virt-top. success = False
def run_virsh_managedsave(test, params, env): """ Test command: virsh managedsave. This command can save and destroy a running domain, so it can be restarted from the same state at a later time. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(params["main_vm"]) #define function def vm_recover_check(guest_name): """ Check if the vm can be recovered correctly. @param: guest_name : Checked vm's name. """ ret = virsh.dom_list() #This time vm should not be in the list if re.search(guest_name, ret.stdout): raise error.TestFail("virsh list output invalid") virsh.start(guest_name) if params.get("paused_after_start_vm") == "yes": virsh.resume(guest_name) #This time vm should be in the list ret = virsh.dom_list() if not re.search(guest_name, ret.stdout): raise error.TestFail("virsh list output invalid") domid = virsh.domid(vm_name).strip() domuuid = virsh.domuuid(vm_name).strip() libvirtd = params.get("managedsave_libvirtd", "on") #run test case vm_ref = params.get("managedsave_vm_ref") if vm_ref == "id": vm_ref = domid elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "managedsave_invalid_id" or\ vm_ref == "managedsave_invalid_uuid": vm_ref = params.get(vm_ref) elif vm_ref == "name" or vm_ref == "extra_parame": vm_ref = "%s %s" % (vm_name, params.get("managedsave_extra_parame")) #stop the libvirtd service if libvirtd == "off": libvirt_vm.libvirtd_stop() #Ignore exception with "ignore_status=True" ret = virsh.managedsave(vm_ref, ignore_status=True) status = ret.exit_status #recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() #check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if not virsh.has_command_help_match('managedsave', r'\s+--running\s+'): # Older libvirt does not have --running parameter raise error.TestNAError( "Older libvirt does not handle arguments consistently") else: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") vm_recover_check(vm_name)
def run(test, params, env): """ Test virtiofs filesystem device: 1.Start guest with 1/2 virtiofs filesystem devices. 2.Start 2 guest with the same virtiofs filesystem device. 3.Coldplug/Coldunplug virtiofs filesystem device 4.Share data between guests and host. 5.Lifecycle for guest with virtiofs filesystem device. """ def generate_expected_process_option(expected_results): """ Generate expected virtiofsd process option """ if cache_mode != "auto": expected_results = "cache=%s" % cache_mode if xattr == "on": expected_results += ",xattr" elif xattr == "off": expected_results += ",no_xattr" if flock == "on": expected_results += ",flock" else: expected_results += ",no_flock" if lock_posix == "on": expected_results += ",posix_lock" else: expected_results += ",no_posix_lock" logging.debug(expected_results) return expected_results def shared_data(vm_names, fs_devs): """ Shared data between guests and host: 1.Mount dir in guest; 2.Write a file in guest; 3.Check the md5sum value are the same in guests and host; """ md5s = [] for vm in vms: session = vm.wait_for_login() for fs_dev in fs_devs: logging.debug(fs_dev) session.cmd('rm -rf %s' % fs_dev.source['dir'], ignore_all_errors=False) session.cmd('mkdir -p %s' % fs_dev.source['dir']) logging.debug("mount virtiofs dir in guest") cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'], fs_dev.source['dir']) status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("mount virtiofs dir failed: %s" % output) if vm == vms[0]: filename = fs_dev.source['dir'] + '/' + vm.name cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("Write data failed: %s" % output) md5_value = session.cmd_status_output("md5sum %s" % filename)[1].strip() md5s.append(md5_value) logging.debug(md5_value) md5_value = process.run("md5sum %s" % filename).stdout_text.strip() logging.debug(md5_value) md5s.append(md5_value) session.close() if len(set(md5s)) != len(fs_devs): test.fail("The md5sum value are not the same in guests and host") start_vm = params.get("start_vm", "no") vm_names = params.get("vms", "avocado-vt-vm1").split() cache_mode = params.get("cache_mode", "none") xattr = params.get("xattr", "on") lock_posix = params.get("lock_posix", "on") flock = params.get("flock", "on") xattr = params.get("xattr", "on") path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd") queue_size = int(params.get("queue_size", "512")) driver_type = params.get("driver_type", "virtiofs") guest_num = int(params.get("guest_num", "1")) fs_num = int(params.get("fs_num", "1")) vcpus_per_cell = int(params.get("vcpus_per_cell", 2)) source_dir_prefix = params.get("source_dir_prefix", "/dir") target_prefix = params.get("target_prefix", "mount_tag") error_msg_start = params.get("error_msg_start", "") error_msg_save = params.get("error_msg_save", "") status_error = params.get("status_error", "no") == "yes" socket_file_checking = params.get("socket_file_checking", "no") == "yes" suspend_resume = params.get("suspend_resume", "no") == "yes" managedsave = params.get("managedsave", "no") == "yes" coldplug = params.get("coldplug", "no") == "yes" fs_devs = [] vms = [] vmxml_backups = [] expected_results = "" huge_pages_num = 0 if len(vm_names) != guest_num: test.cancel("This test needs exactly %d vms." % guest_num) try: # Define filesystem device xml for index in range(fs_num): fsdev_keys = ['accessmode', 'driver', 'source', 'target', 'binary'] accessmode = "passthrough" driver = {'type': driver_type, 'queue': queue_size} source_dir = str(source_dir_prefix) + str(index) logging.debug(source_dir) not os.path.isdir(source_dir) and os.mkdir(source_dir) target_dir = target_prefix + str(index) source = {'dir': source_dir} target = {'dir': target_dir} fsdev_dict = [accessmode, driver, source, target] binary_keys = [ 'path', 'cache_mode', 'xattr', 'lock_posix', 'flock' ] binary_values = [path, cache_mode, xattr, lock_posix, flock] binary_dict = dict(zip(binary_keys, binary_values)) fsdev_values = [accessmode, driver, source, target, binary_dict] fsdev_dict = dict(zip(fsdev_keys, fsdev_values)) logging.debug(fsdev_dict) fs_dev = libvirt_device_utils.create_fs_xml(fsdev_dict) logging.debug(fs_dev) fs_devs.append(fs_dev) #Start guest with virtiofs filesystem device for index in range(guest_num): logging.debug("prepare vm %s", vm_names[index]) vm = env.get_vm(vm_names[index]) vms.append(vm) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) vmxml_backup = vmxml.copy() vmxml_backups.append(vmxml_backup) if vmxml.max_mem < 1024000: vmxml.max_mem = 1024000 hp_obj = test_setup.HugePageConfig(params) host_hp_size = hp_obj.get_hugepage_size() huge_pages_num += vmxml.max_mem // host_hp_size + 128 utils_memory.set_num_huge_pages(huge_pages_num) vmxml.remove_all_device_by_type('filesystem') vmxml.sync() numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1 vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name, vmxml.vcpu, numa_number=numa_no) vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name, access_mode="shared") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) logging.debug(vmxml) if coldplug: ret = virsh.attach_device(vm_names[index], fs_devs[0].xml, flagstr='--config', debug=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) else: for fs in fs_devs: vmxml.add_device(fs) vmxml.sync() logging.debug(vmxml) result = virsh.start(vm_names[index], debug=True) if status_error and not managedsave: expected_error = error_msg_start utils_test.libvirt.check_exit_status(result, expected_error) return else: utils_test.libvirt.check_exit_status(result, expect_error=False) expected_results = generate_expected_process_option( expected_results) cmd = 'ps aux | grep virtiofsd | head -n 1' utils_test.libvirt.check_cmd_output(cmd, content=expected_results) if managedsave: expected_error = error_msg_save result = virsh.managedsave(vm_names[0], ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: shared_data(vm_names, fs_devs) if suspend_resume: virsh.suspend(vm_names[0], debug=True, ignore_status=False) time.sleep(30) virsh.resume(vm_names[0], debug=True, ignore_statue=False) elif socket_file_checking: result = virsh.domid(vm_names[0]) domid = result.stdout.strip() domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[ 0] if result.exit_status: test.fail("Get domid failed.") for fs_dev in fs_devs: alias = fs_dev.alias['name'] expected_pid = domain_dir + alias + '-fs.pid' expected_sock = alias + '-fs.sock' status1 = process.run('ls -l %s' % expected_pid, shell=True).exit_status status2 = process.run('ls -l %s' % expected_sock, shell=True).exit_status if not (status1 and status2): test.fail( "The socket and pid file is not as expected") finally: for vm in vms: if vm.is_alive(): session = vm.wait_for_login() for fs_dev in fs_devs: mount_dir = fs_dev.source['dir'] logging.debug(mount_dir) session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True) session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True) session.close() vm.destroy(gracefully=False) for index in range(fs_num): process.run('rm -rf %s' % source_dir_prefix + str(index), ignore_status=False) for vmxml_backup in vmxml_backups: vmxml_backup.sync()
def run(test, params, env): """ Test virsh domdisplay command, return the graphic url This test covered vnc and spice type, also readonly and readwrite mode If have --include-passwd option, also need to check passwd list in result """ if not virsh.has_help_command('domdisplay'): raise error.TestNAError("This version of libvirt doesn't support " "domdisplay test") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("domdisplay_options", "") graphic = params.get("domdisplay_graphic", "vnc") readonly = ("yes" == params.get("readonly", "no")) passwd = params.get("domdisplay_passwd") is_ssl = ("yes" == params.get("domdisplay_ssl", "no")) is_domid = ("yes" == params.get("domdisplay_domid", "no")) is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no")) qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_file = os.path.join(test.tmpdir, "qemu.conf.bk") def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf f_obj = open(qemu_conf, "r") cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content f_obj = open(qemu_conf, "w") f_obj.write(left_cont) f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") f_obj.close() # make modification effect utils_libvirtd.libvirtd_restart() # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd) try: if is_ssl: # Do backup for qemu.conf in tmp_file shutil.copyfile(qemu_conf, tmp_file) prepare_ssl_env() Graphics.del_graphic(vm_name) Graphics.add_ssl_spice_graphic(vm_name, passwd) else: # Only change graphic type and passwd Graphics.change_graphic_type_passwd(vm_name, graphic, passwd) vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() dom_id = virsh.domid(vm_name).stdout.strip() dom_uuid = virsh.domuuid(vm_name).stdout.strip() if is_domid: vm_name = dom_id if is_domuuid: vm_name = dom_uuid # Do test result = virsh.domdisplay(vm_name, options, readonly=readonly, debug=True) logging.debug("result is %s", result) if result.exit_status: if not status_error: raise error.TestFail("Fail to get domain display info. Error:" "%s." % result.stderr.strip()) else: logging.info( "Get domain display info failed as expected. " "Error:%s.", result.stderr.strip()) return elif status_error: raise error.TestFail("Expect fail, but succeed indeed!") output = result.stdout.strip() # Different result depends on the domain xml listen address if output.find("localhost:") >= 0: expect_addr = "localhost" else: expect_addr = "127.0.0.1" # Get active domain xml info vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info") logging.debug("xml is %s", vmxml_act.get_xmltreefile()) graphic_act = vmxml_act.devices.by_device_tag('graphics')[0] port = graphic_act.port # Do judgement for result if graphic == "vnc": expect = "vnc://%s:%s" % (expect_addr, str(int(port) - 5900)) elif graphic == "spice" and is_ssl: tlsport = graphic_act.tlsPort expect = "spice://%s:%s?tls-port=%s" % \ (expect_addr, port, tlsport) elif graphic == "spice": expect = "spice://%s:%s" % (expect_addr, port) if options != "" and passwd is not None: # have --include-passwd and have passwd in xml if graphic == "vnc": expect = "vnc://:%s@%s:%s" % \ (passwd, expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: expect = expect + "&password="******"spice": expect = expect + "?password="******"Get correct display:%s", output) else: raise error.TestFail("Expect %s, but get %s", expect, output) finally: # Domain xml recovery vmxml_backup.sync() if is_ssl: # qemu.conf recovery shutil.move(tmp_file, qemu_conf) utils_libvirtd.libvirtd_restart()
def run_virsh_save(test, params, env): """ Test command: virsh save. The command can save the RAM state of a running domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Run virsh save command with assigned options. 4.Recover test environment.(If the libvirtd service is stopped ,start the libvirtd service.) 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(params["main_vm"]) vm.verify_alive() domid = virsh.domid(vm_name).strip() domuuid = virsh.domuuid(vm_name).strip() savefile = params.get("save_file") pre_vm_state = params.get("save_pre_vm_state", "null") libvirtd = params.get("save_libvirtd") extra_param = params.get("save_extra_param") vm_ref = params.get("save_vm_ref") # prepare the environment if vm_ref == "name" and pre_vm_state == "paused": virsh.suspend(vm_name) elif vm_ref == "name" and pre_vm_state == "shut off": virsh.destroy(vm_name) # set the option if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "save_invalid_id" or vm_ref == "save_invalid_uuid": vm_ref = params.get(vm_ref) elif vm_ref.find("name") != -1 or vm_ref == "extra_param": savefile = "%s %s" % (savefile, extra_param) if vm_ref == "only_name": savefile = " " vm_ref = vm_name if libvirtd == "off": libvirt_vm.libvirtd_stop() status = virsh.save(vm_ref, savefile, ignore_status=True).exit_status # recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() # cleanup if os.path.exists(savefile): virsh.restore(savefile) os.remove(savefile) # check status_error status_error = params.get("save_status_error") if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command")
def run_virsh_managedsave(test, params, env): """ Test command: virsh managedsave. This command can save and destroy a running domain, so it can be restarted from the same state at a later time. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(params["main_vm"]) #define function def vm_recover_check(guest_name): """ Check if the vm can be recovered correctly. @param: guest_name : Checked vm's name. """ ret = virsh.dom_list() #This time vm should not be in the list if re.search(guest_name, ret.stdout): raise error.TestFail("virsh list output invalid") virsh.start(guest_name) if params.get("paused_after_start_vm") == "yes": virsh.resume(guest_name) #This time vm should be in the list ret = virsh.dom_list() if not re.search(guest_name, ret.stdout): raise error.TestFail("virsh list output invalid") domid = virsh.domid(vm_name).strip() domuuid = virsh.domuuid(vm_name).strip() libvirtd = params.get("managedsave_libvirtd","on") #run test case vm_ref = params.get("managedsave_vm_ref") if vm_ref == "id": vm_ref = domid elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "managedsave_invalid_id" or\ vm_ref == "managedsave_invalid_uuid": vm_ref = params.get(vm_ref) elif vm_ref == "name" or vm_ref == "extra_parame": vm_ref = "%s %s" % (vm_name, params.get("managedsave_extra_parame")) #stop the libvirtd service if libvirtd == "off": libvirt_vm.libvirtd_stop() #Ignore exception with "ignore_status=True" ret = virsh.managedsave(vm_ref, ignore_status=True) status = ret.exit_status #recover libvirtd service start if libvirtd == "off": libvirt_vm.libvirtd_start() #check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if not virsh.has_command_help_match('managedsave', r'\s+--running\s+'): # Older libvirt does not have --running parameter raise error.TestNAError("Older libvirt does not handle arguments consistently") else: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") vm_recover_check(vm_name)