def recover(self, params=None): """ Recover test environment """ if self.cpu_status: cpu.offline(self.cpu_num) else: cpu.online(self.cpu_num) tmp_c_file = params.get("tmp_c_file", "/tmp/test.c") tmp_exe_file = params.get("tmp_exe_file", "/tmp/test") if os.path.exists(tmp_c_file): os.remove(tmp_c_file) if os.path.exists(tmp_exe_file): os.remove(tmp_exe_file) if 'memory_pid' in params: pid = int(params.get('memory_pid')) utils_misc.safe_kill(pid, signal.SIGKILL) process.run("swapon -a", shell=True) if 'cpu_pid' in params: pid = int(params.get('cpu_pid')) utils_misc.safe_kill(pid, signal.SIGKILL) tmp_sh_file = params.get("tmp_sh_file") if os.path.exists(tmp_sh_file): os.remove(tmp_sh_file) virsh.destroy(self.vm_name) if len(self.snp_list) < len(self.current_snp_list): self.diff_snp_list = list( set(self.current_snp_list) - set(self.snp_list)) for item in self.diff_snp_list: virsh.snapshot_delete(self.vm_name, item) remove_machine_cgroup()
def _generate_backstore_attribute(params): """ Create one disk with backingStore attribute by creating snapshot :param params: one dict to wrap up parameters """ device_target = params.get("virt_disk_device_target") top_file_image_name = params.get("top_file_image_name") second_file_image_name = params.get("second_file_image_name") tmp_blkpull_path.append(top_file_image_name) tmp_blkpull_path.append(second_file_image_name) backing_chain_list.append(top_file_image_name) if vm.is_dead(): vm.start() snapshot_tmp_name = "blockpull_tmp_snap" options = " %s --disk-only --diskspec %s,file=%s" % ( snapshot_tmp_name, 'vda', second_file_image_name) options += " --diskspec %s,file=%s" % (device_target, top_file_image_name) virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) vm.destroy() virsh.snapshot_delete(vm_name, snapshot_tmp_name, "--metadata", ignore_status=False, debug=True) vmxml_dir = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("backstore prepare readiness :\n%s", vmxml_dir)
def cleanup_snapshots(vm, snap_del_disks=None): """ clean up snapshots :param vm: VM instance :param snap_del_disks: list containing snapshot files """ snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm.name result_output = process.run(snapshot_list_cmd, ignore_status=False, shell=True).stdout_text for line in result_output.rsplit("\n"): strip_line = line.strip() if strip_line and "|" not in strip_line: if '+-' in strip_line: strip_line = strip_line.split()[-1] virsh.snapshot_delete(vm.name, strip_line, "--metadata", ignore_status=False, debug=True) # delete actual snapshot files if exists if snap_del_disks: for disk in snap_del_disks: if os.path.exists(disk): os.remove(disk)
def recover(self, params=None): """ Recover test environment """ cpu_enable = True if self.cpu_status else False utils_misc.set_cpu_status(self.cpu_num, cpu_enable) tmp_c_file = params.get("tmp_c_file", "/tmp/test.c") tmp_exe_file = params.get("tmp_exe_file", "/tmp/test") if os.path.exists(tmp_c_file): os.remove(tmp_c_file) if os.path.exists(tmp_exe_file): os.remove(tmp_exe_file) if 'memory_pid' in params: pid = int(params.get('memory_pid')) utils_misc.safe_kill(pid, signal.SIGKILL) process.run("swapon -a", shell=True) if 'cpu_pid' in params: pid = int(params.get('cpu_pid')) utils_misc.safe_kill(pid, signal.SIGKILL) tmp_sh_file = params.get("tmp_sh_file") if os.path.exists(tmp_sh_file): os.remove(tmp_sh_file) virsh.destroy(self.vm_name) if len(self.snp_list) < len(self.current_snp_list): self.diff_snp_list = list(set(self.current_snp_list) - set(self.snp_list)) for item in self.diff_snp_list: virsh.snapshot_delete(self.vm_name, item) remove_machine_cgroup()
def clean_up_snapshots(vm_name, snapshot_list=[]): """ Do recovery after snapshot :param vm_name: Name of domain :param snapshot_list: The list of snapshot name you want to remove """ if not snapshot_list: # Get all snapshot names from virsh snapshot-list snapshot_list = virsh.snapshot_list(vm_name) # Get snapshot disk path for snap_name in snapshot_list: # Delete useless disk snapshot file if exists snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name).stdout.strip() xtf_xml = xml_utils.XMLTreeFile(snap_xml) disks_path = xtf_xml.findall('disks/disk/source') for disk in disks_path: os.system('rm -f %s' % disk.get('file')) # Delete snapshots of vm virsh.snapshot_delete(vm_name, snap_name) else: # Get snapshot disk path from domain xml because # there is no snapshot info with the name dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile disk_path = dom_xml.find('devices/disk/source').get('file') for name in snapshot_list: snap_disk_path = disk_path.split(".")[0] + "." + name os.system('rm -f %s' % snap_disk_path)
def recover(self, params=None): """ Recover test environment """ cpu_enable = True if self.cpu_status else False utils_misc.set_cpu_status(self.cpu_num, cpu_enable) tmp_c_file = params.get("tmp_c_file", "/tmp/test.c") tmp_exe_file = params.get("tmp_exe_file", "/tmp/test") if os.path.exists(tmp_c_file): os.remove(tmp_c_file) if os.path.exists(tmp_exe_file): os.remove(tmp_exe_file) if params.has_key('memory_pid'): pid = int(params.get('memory_pid')) utils_misc.safe_kill(pid, signal.SIGKILL) utils.run("swapon -a") if params.has_key('cpu_pid'): pid = int(params.get('cpu_pid')) utils_misc.safe_kill(pid, signal.SIGKILL) tmp_sh_file = params.get("tmp_sh_file") if os.path.exists(tmp_sh_file): os.remove(tmp_sh_file) virsh.destroy(self.vm_name) if len(self.snp_list) < len(self.current_snp_list): self.diff_snp_list = list(set(self.current_snp_list) - set(self.snp_list)) for item in self.diff_snp_list: virsh.snapshot_delete(self.vm_name, item) remove_machine_cgroup()
def remove_snapshots(vm): remove_failed = 0 snaps = virsh.snapshot_list(vm) for snap in snaps: try: virsh.snapshot_delete(vm, snap) except error.CmdError: remove_failed = remove_failed + 1 return remove_failed
def remove_snapshots(vm): remove_failed = 0 snaps = virsh.snapshot_list(vm) for snap in snaps: try: virsh.snapshot_delete(vm,snap) except error.CmdError: remove_failed = remove_failed + 1 return remove_failed
def remove_snapshots(vm): remove_failed = 0 snaps = virsh.snapshot_list(vm) for snap in snaps: try: virsh.snapshot_delete(vm, snap) except error.CmdError: logging.debug("Can not remove snapshot %s.", snaps) remove_failed = remove_failed + 1 return remove_failed
def teardown_commit_top_to_base(): """ Clean data. """ LOG.info('Start cleaning up.') for ss in test_obj.snap_name_list: virsh.snapshot_delete(vm_name, '%s --metadata' % ss, debug=True) for sp in test_obj.snap_path_list: process.run('rm -rf %s' % sp) bkxml.sync() libvirt.setup_or_cleanup_iscsi(is_setup=False)
def remove_snapshots(vm): remove_failed = 0 snaps = virsh.snapshot_list(vm) for snap in snaps: try: virsh.snapshot_delete(vm, snap) except error.CmdError: logging.debug("Can not remove snapshot %s.", snap) remove_failed = remove_failed + 1 return remove_failed
def backingchain_common_teardown(self): """ Clean all new created snap """ LOG.info('Start cleaning up.') for ss in self.snap_name_list: virsh.snapshot_delete(self.vm.name, '%s --metadata' % ss, debug=True) for sp in self.snap_path_list: process.run('rm -f %s' % sp) # clean left first disk snap file that created along with new disk image_path = os.path.dirname(self.original_disk_source) if image_path != '': for sf in os.listdir(image_path): if 'snap' in sf: process.run('rm -f %s/%s' % (image_path, sf))
def do_blockcommit_pivot_repeatedly(): """ Validate bugzilla:https://bugzilla.redhat.com/show_bug.cgi?id=1857735 """ # Make external snapshot,pivot and delete snapshot file repeatedly. tmp_snapshot_name = "external_snapshot_" + "repeated.qcow2" block_target = 'vda' for count in range(0, 5): options = "%s " % tmp_snapshot_name options += "--disk-only --atomic" disk_external = os.path.join(tmp_dir, tmp_snapshot_name) options += " --diskspec %s,snapshot=external,file=%s" % (block_target, disk_external) virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) virsh.blockcommit(vm_name, block_target, " --active --pivot ", ignore_status=False, debug=True) virsh.snapshot_delete(vm_name, tmp_snapshot_name, " --metadata") libvirt.delete_local_disk('file', disk_external)
def test_with_label(vm, params, test): """ Test nvdimm with label setting :param vm: vm object :param params: dict, test parameters :param test: test object :raises: test.fail if checkpoints fail """ test_str = params.get('test_str') test_file = params.get('test_file') vm_name = params.get('main_vm') vm_session = vm.wait_for_login() # Create a file on the nvdimm device. create_file_within_nvdimm_disk(vm_session, test_file, test_str, test, block_size=4096) # Reboot the guest, and remount the nvdimm device in the guest. # Check the file foo-label is exited vm_session.close() virsh.reboot(vm_name, debug=True) vm_session = vm.wait_for_login() vm_session.cmd('mount -o dax /dev/pmem0 /mnt') if test_str not in vm_session.cmd('cat /mnt/foo-label '): test.fail('"%s" should be in output' % test_str) vm_session.close() if params.get('check_life_cycle', 'no') == 'yes': virsh.managedsave(vm_name, ignore_status=False, debug=True) vm.start() vm_session = vm.wait_for_login() check_nvdimm_file(test_str, test_file, vm_session, test) vm_session.close() vm_s1 = vm_name + ".s1" virsh.save(vm_name, vm_s1, ignore_status=False, debug=True) virsh.restore(vm_s1, ignore_status=False, debug=True) vm_session = vm.wait_for_login() check_nvdimm_file(test_str, test_file, vm_session, test) vm_session.close() virsh.snapshot_create_as(vm_name, vm_s1, ignore_status=False, debug=True) virsh.snapshot_revert(vm_name, vm_s1, ignore_status=False, debug=True) virsh.snapshot_delete(vm_name, vm_s1, ignore_status=False, debug=True)
flagstr=attach_option) os.remove(disks_xml[i].xml) libvirt.check_exit_status(ret) # Check disks in VM after hotunplug. if check_patitions_hotunplug: if not check_vm_partitions(devices, device_targets, False): raise error.TestFail("See device in VM after hotunplug") finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snapshot in snapshot_lists: virsh.snapshot_delete(vm_name, snapshot, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") virsh.undefine(vm_name) virsh.define(vm_xml_file) os.remove(vm_xml_file) # Delete tmp files/disks. if qemu_conf_bak: shutil.copy(qemu_conf_bak, "/etc/libvirt/qemu.conf") os.remove(qemu_conf_bak) utils_libvirtd.libvirtd_restart()
def run(test, params, env): """ Test the tpm virtual devices 1. prepare a guest with different tpm devices 2. check whether the guest can be started 3. check the xml and qemu cmd line, even swtpm for vtpm 4. check tpm usage in guest os """ # Tpm passthrough supported since libvirt 1.0.5. if not libvirt_version.version_compare(1, 0, 5): test.cancel("Tpm device is not supported " "on current libvirt version.") # Tpm passthrough supported since qemu 2.12.0-49. if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False): test.cancel("Tpm device is not supported " "on current qemu version.") tpm_model = params.get("tpm_model") backend_type = params.get("backend_type") backend_version = params.get("backend_version") device_path = params.get("device_path") tpm_num = int(params.get("tpm_num", 1)) # After first start of vm with vtpm, do operations, check it still works vm_operate = params.get("vm_operate") # Sub-operation(e.g.domrename) under vm_operate(e.g.restart) vm_oprt = params.get("vm_oprt") secret_uuid = params.get("secret_uuid") secret_value = params.get("secret_value") # Change encryption state: from plain to encrypted, or reverse. encrypt_change = params.get("encrypt_change") secret_uuid = params.get("secret_uuid") prepare_secret = ("yes" == params.get("prepare_secret", "no")) remove_dev = ("yes" == params.get("remove_dev", "no")) multi_vms = ("yes" == params.get("multi_vms", "no")) # Remove swtpm state file rm_statefile = ("yes" == params.get("rm_statefile", "no")) test_suite = ("yes" == params.get("test_suite", "no")) restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no")) no_backend = ("yes" == params.get("no_backend", "no")) status_error = ("yes" == params.get("status_error", "no")) err_msg = params.get("xml_errmsg", "") loader = params.get("loader", "") nvram = params.get("nvram", "") uefi_disk_url = params.get("uefi_disk_url", "") download_file_path = os.path.join(data_dir.get_tmp_dir(), "uefi_disk.qcow2") # Check tpm chip on host for passthrough testing if backend_type == "passthrough": dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True) logging.debug("dmesg info about tpm:\n %s", dmesg_info) dmesg_error = re.search("No TPM chip found|TPM is disabled", dmesg_info) if dmesg_error: test.cancel(dmesg_error.group()) else: # Try to check host tpm chip version tpm_v = None if re.search("2.0 TPM", dmesg_info): tpm_v = "2.0" if not utils_package.package_install("tpm2-tools"): # package_install() return 'True' if succeed test.error("Failed to install tpm2-tools on host") else: if re.search("1.2 TPM", dmesg_info): tpm_v = "1.2" # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first if not utils_package.package_install("tpm-tools"): test.error("Failed to install tpm-tools on host") # Check host env for vtpm testing elif backend_type == "emulator": if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False): test.cancel("vtpm(emulator backend) is not supported " "on current qemu version.") # Install swtpm pkgs on host for vtpm emulation if not utils_package.package_install("swtpm*"): test.error("Failed to install swtpm swtpm-tools on host") def replace_os_disk(vm_xml, vm_name, nvram): """ Replace os(nvram) and disk(uefi) for x86 vtpm test :param vm_xml: current vm's xml :param vm_name: current vm name :param nvram: nvram file path of vm """ # Add loader, nvram in <os> nvram = nvram.replace("<VM_NAME>", vm_name) dict_os_attrs = {"loader_readonly": "yes", "secure": "yes", "loader_type": "pflash", "loader": loader, "nvram": nvram} vm_xml.set_os_attrs(**dict_os_attrs) logging.debug("Set smm=on in VMFeaturesXML") # Add smm in <features> features_xml = vm_xml.features features_xml.smm = "on" vm_xml.features = features_xml vm_xml.sync() # Replace disk with an uefi image if not utils_package.package_install("wget"): test.error("Failed to install wget on host") if uefi_disk_url.count("EXAMPLE"): test.error("Please provide the URL %s" % uefi_disk_url) else: download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path)) process.system(download_cmd, verbose=False, shell=True) vm = env.get_vm(vm_name) uefi_disk = {'disk_source_name': download_file_path} libvirt.set_vm_disk(vm, uefi_disk) vm_names = params.get("vms").split() vm_name = vm_names[0] vm = env.get_vm(vm_name) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() os_xml = getattr(vm_xml, "os") host_arch = platform.machine() if backend_type == "emulator" and host_arch == 'x86_64': if not utils_package.package_install("OVMF"): test.error("Failed to install OVMF or edk2-ovmf pkgs on host") if os_xml.xmltreefile.find('nvram') is None: replace_os_disk(vm_xml, vm_name, nvram) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy() vm2 = None if multi_vms: if len(vm_names) > 1: vm2_name = vm_names[1] vm2 = env.get_vm(vm2_name) vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name) vm2_xml_backup = vm2_xml.copy() else: # Clone additional vms if needed try: utils_path.find_command("virt-clone") except utils_path.CmdNotFoundError: if not utils_package.package_install(["virt-install"]): test.cancel("Failed to install virt-install on host") vm2_name = "vm2_" + utils_misc.generate_random_string(5) ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name, True, timeout=360, debug=True) if ret_clone.exit_status: test.error("Need more than one domains, but error occured when virt-clone.") vm2 = vm.clone(vm2_name) vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name) if vm2.is_alive(): vm2.destroy() service_mgr = service.ServiceManager() def check_dumpxml(vm_name): """ Check whether the added devices are shown in the guest xml :param vm_name: current vm name """ logging.info("------Checking guest dumpxml------") if tpm_model: pattern = '<tpm model="%s">' % tpm_model else: # The default tpm model is "tpm-tis" pattern = '<tpm model="tpm-tis">' # Check tpm model xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) logging.debug("xml after add tpm dev is %s", xml_after_adding_device) if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s tpm device xml " "in the guest xml file." % tpm_model) # Check backend type pattern = '<backend type="%s"' % backend_type if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend type xml for tpm dev " "in the guest xml file." % backend_type) # Check backend version if backend_version: check_ver = backend_version if backend_version != 'none' else '2.0' pattern = '"emulator" version="%s"' % check_ver if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend version xml for tpm dev " "in the guest xml file." % check_ver) # Check device path if backend_type == "passthrough": pattern = '<device path="/dev/tpm0"' if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s device path xml for tpm dev " "in the guest xml file." % device_path) # Check encryption secret if prepare_secret: pattern = '<encryption secret="%s" />' % encryption_uuid if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s secret uuid xml for tpm dev " "in the guest xml file." % encryption_uuid) logging.info('------PASS on guest dumpxml check------') def check_qemu_cmd_line(vm, vm_name, domid): """ Check whether the added devices are shown in the qemu cmd line :param vm: current vm :param vm_name: current vm name :param domid: domain id for checking vtpm socket file """ logging.info("------Checking qemu cmd line------") if not vm.get_pid(): test.fail('VM pid file missing.') with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file: cmdline = cmdline_file.read() logging.debug("Qemu cmd line info:\n %s", cmdline) # Check tpm model pattern_list = ["-device.%s" % tpm_model] # Check backend type if backend_type == "passthrough": dev_num = re.search(r"\d+", device_path).group() backend_segment = "id=tpm-tpm%s" % dev_num else: # emulator backend backend_segment = "id=tpm-tpm0,chardev=chrtpm" pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment)) # Check chardev socket for vtpm if backend_type == "emulator": pattern_list.append("-chardev.socket,id=chrtpm," "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)) for pattern in pattern_list: if not re.search(pattern, cmdline): if not remove_dev: test.fail("Can not find the %s for tpm device " "in qemu cmd line." % pattern) elif remove_dev: test.fail("%s still exists after remove vtpm and restart" % pattern) logging.info("------PASS on qemu cmd line check------") def check_swtpm(domid, domuuid, vm_name): """ Check swtpm cmdline and files for vtpm. :param domid: domain id for checking vtpm files :param domuuid: domain uuid for checking vtpm state file :param vm_name: current vm name """ logging.info("------Checking swtpm cmdline and files------") # Check swtpm cmdline swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name) if not swtpm_pid: if not remove_dev: test.fail('swtpm pid file missing.') else: return elif remove_dev: test.fail('swtpm pid file still exists after remove vtpm and restart') with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file: cmdline = cmdline_file.read() logging.debug("Swtpm cmd line info:\n %s", cmdline) pattern_list = ["--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"] if prepare_secret: pattern_list.extend(["--key", "--migration-key"]) for pattern in pattern_list: if not re.search(pattern, cmdline): test.fail("Can not find the %s for tpm device " "in swtpm cmd line." % pattern) # Check swtpm files file_list = ["/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)] file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid) file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name) file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" % (domid, vm_name)) for swtpm_file in file_list: if not os.path.exists(swtpm_file): test.fail("Swtpm file: %s does not exist" % swtpm_file) logging.info("------PASS on Swtpm cmdline and files check------") def get_tpm2_tools_cmd(session=None): """ Get tpm2-tools pkg version and return corresponding getrandom cmd :session: guest console session :return: tpm2_getrandom cmd usage """ cmd = 'rpm -q tpm2-tools' get_v_tools = session.cmd(cmd) if session else process.run(cmd).stdout_text v_tools_list = get_v_tools.strip().split('-') if session: logging.debug("The tpm2-tools version is %s", v_tools_list[2]) v_tools = int(v_tools_list[2].split('.')[0]) return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex" def get_host_tpm_bef(tpm_v): """ Test host tpm function and identify its real version before passthrough Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or tpm2-tools to try the function. :param tpm_v: host tpm version get from dmesg info :return: host tpm version """ logging.info("------Checking host tpm device before passthrough------") # Try tcsd tool for suspected tpm1.2 chip on host tpm_real_v = tpm_v if tpm_v != "2.0": if not service_mgr.start('tcsd'): # service_mgr.start() return 'True' if succeed if tpm_v == "1.2": test.fail("Host tcsd.serivce start failed") else: # Means tpm_v got nothing from dmesg, log failure here and # go to next 'if' to try tpm2.0 tools. logging.info("Host tcsd.serivce start failed") else: tpm_real_v = "1.2" logging.info("Host tpm version info:") result = process.run("tpm_version", ignore_status=False) logging.debug("[host]# tpm_version\n %s", result.stdout) time.sleep(2) service_mgr.stop('tcsd') if tpm_v != "1.2": # Try tpm2.0 tools if not utils_package.package_install("tpm2-tools"): test.error("Failed to install tpm2-tools on host") tpm2_getrandom_cmd = get_tpm2_tools_cmd() if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status: test.cancel("Both tcsd and tpm2-tools can not work, " "pls check your host tpm version and test env.") else: tpm_real_v = "2.0" logging.info("------PASS on host tpm device check------") return tpm_real_v def test_host_tpm_aft(tpm_real_v): """ Test host tpm function after passthrough :param tpm_real_v: host tpm real version indentified from testing """ logging.info("------Checking host tpm device after passthrough------") if tpm_real_v == "1.2": if service_mgr.start('tcsd'): time.sleep(2) service_mgr.stop('tcsd') test.fail("Host tpm should not work after passthrough to guest.") else: logging.info("Expected failure: Tpm is being used by guest.") elif tpm_real_v == "2.0": tpm2_getrandom_cmd = get_tpm2_tools_cmd() if not process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status: test.fail("Host tpm should not work after passthrough to guest.") else: logging.info("Expected failure: Tpm is being used by guest.") logging.info("------PASS on host tpm device check------") def test_guest_tpm(expect_version, session, expect_fail): """ Test tpm function in guest :param expect_version: guest tpm version, as host version, or emulator specified :param session: Guest session to be tested :param expect_fail: guest tpm is expectedly fail to work """ logging.info("------Checking guest tpm device work------") if expect_version == "1.2": # Install tpm-tools and test by tcsd method if not utils_package.package_install(["tpm-tools"], session, 360): test.error("Failed to install tpm-tools package in guest") else: status, output = session.cmd_status_output("systemctl start tcsd") logging.debug("Command output: %s", output) if status: if expect_fail: test.cancel("tpm-crb passthrough only works with host tpm2.0, " "but your host tpm version is 1.2") else: test.fail("Failed to start tcsd.service in guest") else: dev_output = session.cmd_output("ls /dev/|grep tpm") logging.debug("Command output: %s", dev_output) status, output = session.cmd_status_output("tpm_version") logging.debug("Command output: %s", output) if status: test.fail("Guest tpm can not work") else: # If expect_version is tpm2.0, install and test by tpm2-tools if not utils_package.package_install(["tpm2-tools"], session, 360): test.error("Failed to install tpm2-tools package in guest") else: tpm2_getrandom_cmd = get_tpm2_tools_cmd(session) status1, output1 = session.cmd_status_output("ls /dev/|grep tpm") logging.debug("Command output: %s", output1) status2, output2 = session.cmd_status_output(tpm2_getrandom_cmd) logging.debug("Command output: %s", output2) if status1 or status2: if not expect_fail: test.fail("Guest tpm can not work") else: d_status, d_output = session.cmd_status_output("date") if d_status: test.fail("Guest OS doesn't work well") logging.debug("Command output: %s", d_output) elif expect_fail: test.fail("Expect fail but guest tpm still works") logging.info("------PASS on guest tpm device work check------") def run_test_suite_in_guest(session): """ Run kernel test suite for guest tpm. :param session: Guest session to be tested """ logging.info("------Checking kernel test suite for guest tpm------") boot_info = session.cmd('uname -r').strip().split('.') kernel_version = '.'.join(boot_info[:2]) # Download test suite per current guest kernel version parent_path = "https://cdn.kernel.org/pub/linux/kernel" if float(kernel_version) < 5.3: major_version = "5" file_version = "5.3" else: major_version = boot_info[0] file_version = kernel_version src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version, file_version) download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz") output = session.cmd_output(download_cmd, timeout=480) logging.debug("Command output: %s", output) # Install neccessary pkgs to build test suite if not utils_package.package_install(["tar", "make", "gcc", "rsync", "python2"], session, 360): test.fail("Failed to install specified pkgs in guest OS.") # Unzip the downloaded test suite status, output = session.cmd_status_output("tar xvJf /root/linux.tar.xz -C /root") if status: test.fail("Uzip failed: %s" % output) # Specify using python2 to run the test suite per supporting test_path = "/root/linux-%s/tools/testing/selftests" % file_version sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path output = session.cmd_output(sed_cmd) logging.debug("Command output: %s", output) # Build and and run the .sh files of test suite status, output = session.cmd_status_output("make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360) logging.debug("Command output: %s", output) if status: test.fail("Failed to run test suite in guest OS.") for test_sh in ["test_smoke.sh", "test_space.sh"]: pattern = "ok .* selftests: tpm2: %s" % test_sh if not re.search(pattern, output) or ("not ok" in output): test.fail("test suite check failed.") logging.info("------PASS on kernel test suite check------") def reuse_by_vm2(tpm_dev): """ Try to add same tpm to a second guest, when it's being used by one guest. :param tpm_dev: tpm device to be added into guest xml """ logging.info("------Trying to add same tpm to a second domain------") vm2_xml.remove_all_device_by_type('tpm') vm2_xml.add_device(tpm_dev) vm2_xml.sync() ret = virsh.start(vm2_name, ignore_status=True, debug=True) if backend_type == "passthrough": if ret.exit_status: logging.info("Expected failure when try to passthrough a tpm" " that being used by another guest") return test.fail("Reuse a passthroughed tpm should not succeed.") elif ret.exit_status: # emulator backend test.fail("Vtpm for each guest should not interfere with each other") try: tpm_real_v = None sec_uuids = [] new_name = "" virsh_dargs = {"debug": True, "ignore_status": False} vm_xml.remove_all_device_by_type('tpm') tpm_dev = Tpm() if tpm_model: tpm_dev.tpm_model = tpm_model if not no_backend: backend = tpm_dev.Backend() if backend_type != 'none': backend.backend_type = backend_type if backend_type == "passthrough": tpm_real_v = get_host_tpm_bef(tpm_v) logging.debug("The host tpm real version is %s", tpm_real_v) if device_path: backend.device_path = device_path if backend_type == "emulator": if backend_version != 'none': backend.backend_version = backend_version if prepare_secret: auth_sec_dict = {"sec_ephemeral": "no", "sec_private": "yes", "sec_desc": "sample vTPM secret", "sec_usage": "vtpm", "sec_name": "VTPM_example"} encryption_uuid = libvirt.create_secret(auth_sec_dict) if secret_value != 'none': virsh.secret_set_value(encryption_uuid, "open sesame", encode=True, debug=True) sec_uuids.append(encryption_uuid) if encrypt_change != 'encrpt': # plain_to_encrypt will not add encryption on first start if secret_uuid == 'invalid': encryption_uuid = encryption_uuid[:-1] backend.encryption_secret = encryption_uuid if secret_uuid == "change": auth_sec_dict["sec_desc"] = "sample2 vTPM secret" auth_sec_dict["sec_name"] = "VTPM_example2" new_encryption_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(new_encryption_uuid, "open sesame", encode=True, debug=True) sec_uuids.append(new_encryption_uuid) if secret_uuid == 'nonexist': backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tpm_dev.backend = backend logging.debug("tpm dev xml to add is:\n %s", tpm_dev) for num in range(tpm_num): vm_xml.add_device(tpm_dev, True) ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True) expected_match = "" if not err_msg: expected_match = "Domain .*%s.* defined from %s" % (vm_name, vm_xml.xml) libvirt.check_result(ret, err_msg, "", False, expected_match) if err_msg: # Stop test when get expected failure return if vm_operate != "restart": check_dumpxml(vm_name) # For default model, no need start guest to test if tpm_model: expect_fail = False try: vm.start() except VMStartError as detail: if secret_value == 'none' or secret_uuid == 'nonexist': logging.debug("Expected failure: %s", detail) return else: test.fail(detail) domuuid = vm.get_uuid() if vm_operate or restart_libvirtd: # Make sure OS works before vm operate or restart libvirtd session = vm.wait_for_login() test_guest_tpm("2.0", session, False) session.close() if restart_libvirtd: utils_libvirtd.libvirtd_restart() swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid if vm_operate == "resume": virsh.suspend(vm_name, **virsh_dargs) time.sleep(3) virsh.resume(vm_name, **virsh_dargs) elif vm_operate == "snapshot": virsh.snapshot_create_as(vm_name, "sp1 --memspec file=/tmp/testvm_sp1", **virsh_dargs) elif vm_operate in ["restart", "create"]: vm.destroy() if vm_operate == "create": virsh.undefine(vm_name, options="--nvram", **virsh_dargs) if os.path.exists(swtpm_statedir): test.fail("Swtpm state dir: %s still exist after vm undefine" % swtpm_statedir) virsh.create(vm_xml.xml, **virsh_dargs) else: if vm_oprt == "domrename": new_name = "vm_" + utils_misc.generate_random_string(5) virsh.domrename(vm_name, new_name, **virsh_dargs) new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache) vm = new_vm vm_name = new_name elif secret_value == 'change': logging.info("Changing secret value...") virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True) elif not restart_libvirtd: # remove_dev or do other vm operations during restart vm_xml.remove_all_device_by_type('tpm') if secret_uuid == "change" or encrypt_change: # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary if encrypt_change == 'plain': # Change from encrypted state to plain:redefine a tpm dev without encryption tpm_dev = Tpm() tpm_dev.tpm_model = tpm_model backend = tpm_dev.Backend() backend.backend_type = backend_type backend.backend_version = backend_version else: # Use a new secret's uuid if secret_uuid == "change": encryption_uuid = new_encryption_uuid backend.encryption_secret = encryption_uuid tpm_dev.backend = backend logging.debug("The new tpm dev xml to add for restart vm is:\n %s", tpm_dev) vm_xml.add_device(tpm_dev, True) if encrypt_change in ['encrpt', 'plain']: # Avoid sync() undefine removing the state file vm_xml.define() else: vm_xml.sync() if rm_statefile: swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir logging.debug("Removing state file: %s", swtpm_statefile) os.remove(swtpm_statefile) ret = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error and ret.exit_status != 0: return if not remove_dev: check_dumpxml(vm_name) elif vm_operate == 'managedsave': virsh.managedsave(vm_name, **virsh_dargs) time.sleep(5) if secret_value == 'change': logging.info("Changing secret value...") virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True) if rm_statefile: swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir logging.debug("Removing state file: %s", swtpm_statefile) os.remove(swtpm_statefile) ret = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error and ret.exit_status != 0: return domid = vm.get_id() check_qemu_cmd_line(vm, vm_name, domid) if backend_type == "passthrough": if tpm_real_v == "1.2" and tpm_model == "tpm-crb": expect_fail = True expect_version = tpm_real_v test_host_tpm_aft(tpm_real_v) else: # emulator backend if remove_dev: expect_fail = True expect_version = backend_version check_swtpm(domid, domuuid, vm_name) session = vm.wait_for_login() if test_suite: run_test_suite_in_guest(session) else: test_guest_tpm(expect_version, session, expect_fail) session.close() if multi_vms: reuse_by_vm2(tpm_dev) if backend_type != "passthrough": #emulator backend check_dumpxml(vm2_name) domid = vm2.get_id() domuuid = vm2.get_uuid() check_qemu_cmd_line(vm2, vm2_name, domid) check_swtpm(domid, domuuid, vm2_name) session = vm2.wait_for_login() test_guest_tpm(backend_version, session, expect_fail) session.close() finally: # Remove renamed domain if it exists if new_name: virsh.remove_domain(new_name, "--nvram", debug=True) if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name) # Remove snapshot if exists if vm_operate == "snapshot": snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") if os.path.exists("/tmp/testvm_sp1"): os.remove("/tmp/testvm_sp1") # Clear guest os if test_suite: session = vm.wait_for_login() logging.info("Removing dir /root/linux-*") output = session.cmd_output("rm -rf /root/linux-*") logging.debug("Command output:\n %s", output) session.close() if vm_operate == "create": vm.define(vm_xml.xml) vm_xml_backup.sync(options="--nvram --managed-save") # Remove swtpm log file in case of impact on later runs if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name) for sec_uuid in set(sec_uuids): virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True) if vm2: if len(vm_names) > 1: vm2_xml_backup.sync(options="--nvram") else: virsh.remove_domain(vm2_name, "--nvram --remove-all-storage", debug=True) if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name)
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) image_format = params.get("snapshot_image_format", "qcow2") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") # Get a tmp_dir. tmp_dir = data_dir.get_tmp_dir() # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) # Do the attach action. result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra="--persistent --subdriver %s" % image_format) if result.exit_status: raise error.TestNAError("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Init snapshot_name snapshot_name = None snapshot_external_disk = [] try: # Create snapshot. if snapshot_from_xml: snapshot_name = "snapshot_test" lines = ["<domainsnapshot>\n", "<name>%s</name>\n" % snapshot_name, "<description>Snapshot Test</description>\n"] if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snapshot_external_disk.append(memory_external) lines.append("<memory snapshot=\'%s\' file='%s'/>\n" % (snapshot_memory, memory_external)) else: lines.append("<memory snapshot='%s'/>\n" % snapshot_memory) # Add all disks into xml file. disks = vm.get_disk_devices().values() lines.append("<disks>\n") for disk in disks: lines.append("<disk name='%s' snapshot='%s'>\n" % (disk['source'], snapshot_disk)) if snapshot_disk == "external": disk_external = os.path.join(tmp_dir, "%s.snap" % os.path.basename(disk['source'])) snapshot_external_disk.append(disk_external) lines.append("<source file='%s'/>\n" % disk_external) lines.append("</disk>\n") lines.append("</disks>\n") lines.append("</domainsnapshot>") snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() snapshot_result = virsh.snapshot_create( vm_name, ("--xmlfile %s" % snapshot_xml_path)) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) else: options = "" snapshot_result = virsh.snapshot_create(vm_name, options) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: lines = ["<domainsnapshot>\n", "<description>Snapshot Test</description>\n", "<state>running</state>\n", "<creationTime>%s</creationTime>" % snapshot_name, "</domainsnapshot>"] snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() options += "--redefine %s --current" % snapshot_xml_path if snapshot_result.exit_status: raise error.TestFail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: raise error.TestFail("Success to create snapshot in negative case\n" "Detail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() status, output = session.cmd_status_output("touch %s" % tmp_file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) session.close() # Destroy vm for snapshot revert. virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options) if revert_result.exit_status: raise error.TestFail( "Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): raise error.TestFail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: raise error.TestFail("Revert command successed, but VM is not " "paused after reverting with --paused option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() finally: virsh.detach_disk(vm_name, target="vdf", extra="--persistent") image.remove() if snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata")
def run(test, params, env): """ Test DAC in adding nfs pool disk to VM. (1).Init variables for test. (2).Create nfs pool and vol. (3).Attach the nfs pool vol to VM. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing") # Get qemu.conf config variables qemu_user = params.get("qemu_user") qemu_group = params.get("qemu_group") dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about pool vol virt_use_nfs = params.get("virt_use_nfs", "off") nfs_server_dir = params.get("nfs_server_dir", "nfs-server") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") export_options = params.get("export_options", "rw,async,no_root_squash") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") bk_file_name = params.get("bk_file_name") # Get pool vol variables img_tup = ("img_user", "img_group", "img_mode") img_val = [] for i in img_tup: try: img_val.append(int(params.get(i))) except ValueError: test.cancel("%s value '%s' is not a number." % (i, params.get(i))) # False positive - img_val was filled in the for loop above. # pylint: disable=E0632 img_user, img_group, img_mode = img_val # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vm_os_xml = vmxml.os # Backup domain disk label disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in list(disks.values()): disk_path = disk['source'] label = check_ownership(disk_path) if label: backup_labels_of_disks[disk_path] = label try: if vm_os_xml.nvram: nvram_path = vm_os_xml.nvram if not os.path.exists(nvram_path): # Need libvirt automatically generate the path vm.start() vm.destroy(gracefully=False) label = check_ownership(nvram_path) if label: backup_labels_of_disks[nvram_path] = label except xcepts.LibvirtXMLNotFoundError: logging.debug("vm xml don't have nvram element") # Backup selinux status of host. backup_sestatus = utils_selinux.get_status() pvt = None snapshot_name = None disk_snap_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # chown domain disk to qemu:qemu to avoid fail on local disk for file_path in list(backup_labels_of_disks.keys()): if qemu_user == "root": os.chown(file_path, 0, 0) elif qemu_user == "qemu": os.chown(file_path, 107, 107) else: process.run('chown %s %s' % (qemu_user, file_path), shell=True) # Set selinux of host. if backup_sestatus == "disabled": test.cancel("SELinux is in Disabled mode." "It must be Enabled to" "run this test") utils_selinux.set_status(host_sestatus) # set qemu conf qemu_conf.user = qemu_user qemu_conf.group = qemu_user if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s", qemu_conf) libvirtd.restart() # Create dst pool for create attach vol img logging.debug("export_options is: %s", export_options) pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], export_options=export_options) # set virt_use_nfs result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.cancel("Failed to set virt_use_nfs value") # Init a QemuImg instance and create img on nfs server dir. params['image_name'] = vol_name tmp_dir = data_dir.get_tmp_dir() nfs_path = os.path.join(tmp_dir, nfs_server_dir) image = qemu_storage.QemuImg(params, nfs_path, vol_name) # Create a image. server_img_path, result = image.create(params) if params.get("image_name_backing_file"): params['image_name'] = bk_file_name params['has_backing_file'] = "yes" image = qemu_storage.QemuImg(params, nfs_path, bk_file_name) server_img_path, result = image.create(params) # Get vol img path vol_name = server_img_path.split('/')[-1] virsh.pool_refresh(pool_name, debug=True) cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() # Do the attach action. extra = "--persistent --subdriver qcow2" result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: test.fail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Change img ownership and mode on nfs server dir os.chown(server_img_path, img_user, img_group) os.chmod(server_img_path, img_mode) img_label_before = check_ownership(server_img_path) if img_label_before: logging.debug("attached image ownership on nfs server before " "start: %s", img_label_before) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. img_label_after = check_ownership(server_img_path) if img_label_after: logging.debug("attached image ownership on nfs server after" " start: %s", img_label_after) if status_error: test.fail('Test succeeded in negative case.') except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) if params.get("image_name_backing_file"): options = "--disk-only" snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if not status_error: test.fail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_name: disks_snap = vm.get_disk_devices() for disk in list(disks_snap.values()): disk_snap_path.append(disk['source']) virsh.snapshot_delete(vm_name, snapshot_name, "--metadata", debug=True) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except process.CmdError: test.fail("Detach disk 'vdf' from VM %s failed." % vm.name) finally: # clean up vm.destroy() qemu_conf.restore() for path, label in list(backup_labels_of_disks.items()): label_list = label.split(":") os.chown(path, int(label_list[0]), int(label_list[1])) if snapshot_name: backup_xml.sync("--snapshots-metadata") else: backup_xml.sync() for i in disk_snap_path: if i and os.path.exists(i): os.unlink(i) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except test.fail as detail: logging.error(str(detail)) utils_selinux.set_status(backup_sestatus) libvirtd.restart()
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml") def config_ceph(): """ Write the configs to the file. """ src_host = disk_src_host.split() src_port = disk_src_port.split() conf_str = "mon_host = " hosts = [] for host, port in zip(src_host, src_port): hosts.append("%s:%s" % (host, port)) with open(disk_src_config, 'w') as f: f.write(conf_str + ','.join(hosts) + '\n') def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip(disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ( "--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool( pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err) def create_vol(vol_params): """ Create volume. :param p_name. Pool name. :param vol_params. Volume parameters dict. :return: True if create successfully. """ pvt = libvirt.PoolVolumeTest(test, params) if create_by_xml: pvt.pre_vol_by_xml(pool_name, **vol_params) else: pvt.pre_vol(vol_name, None, '2G', None, pool_name) def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) process.run(cmd, shell=True) if disk_src_name: cmd += " | grep file=rbd:%s:" % disk_src_name if auth_user and auth_key: cmd += ('id=%s:auth_supported=cephx' % auth_user) if disk_src_config: cmd += " | grep 'conf=%s'" % disk_src_config elif mon_host: hosts = '\:6789\;'.join(mon_host.split()) cmd += " | grep 'mon_host=%s'" % hosts if driver_iothread: cmd += " | grep iothread%s" % driver_iothread # Run the command process.run(cmd, shell=True) def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = [ "disk name='%s' snapshot='external' type='file'" % target_dev ] xml_dom_exp = [ "source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name ] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot or test_disk_readonly: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml") def check_blockcopy(target): """ Block copy operation test. """ blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd") if os.path.exists(blk_file): os.remove(blk_file) blk_mirror = ("mirror type='file' file='%s' " "format='raw' job='copy'" % blk_file) # Do blockcopy ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_result(ret, skip_if=unsupported_err) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count(blk_mirror): test.fail("Can't see block job in domain xml") # Abort ret = virsh.blockjob(vm_name, target, "--abort") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if dom_xml.count(blk_mirror): test.fail("Failed to abort block job") if os.path.exists(blk_file): os.remove(blk_file) # Sleep for a while after abort operation. time.sleep(5) # Do blockcopy again ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_exit_status(ret) # Wait for complete def wait_func(): ret = virsh.blockjob(vm_name, target, "--info") return ret.stderr.count("Block Copy: [100 %]") timeout = params.get("blockjob_timeout", 600) utils_misc.wait_for(wait_func, int(timeout)) # Pivot ret = virsh.blockjob(vm_name, target, "--pivot") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count("source file='%s'" % blk_file): test.fail("Failed to pivot block job") # Remove the disk file. if os.path.exists(blk_file): os.remove(blk_file) def check_in_vm(vm_obj, target, old_parts, read_only=False): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm_obj.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;" " touch /mnt/testfile; umount /mnt)".format(added_part)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info("Check disk operation in VM:\n, %s, %s", s, o) # Readonly fs, check the error messages. # The command may return True, read-only # messges can be found from the command output if read_only: if "Read-only file system" not in o: return False else: return True # Other errors if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def clean_up_volume_snapshots(): """ Get all snapshots for rbd_vol.img volume,unprotect and then clean up them. """ cmd = ("rbd -m {0} {1} info {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) if process.run(cmd, ignore_status=True, shell=True).exit_status: return # Get snapshot list. cmd = ("rbd -m {0} {1} snap" " list {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text snap_names = [] if snaps_out: for line in snaps_out.rsplit("\n"): if line.startswith("SNAPID") or line == "": continue snap_line = line.rsplit() if len(snap_line) == 4: snap_names.append(snap_line[1]) logging.debug("Find snapshots: %s", snap_names) # Unprotect snapshot first,otherwise it will fail to purge volume for snap_name in snap_names: cmd = ("rbd -m {0} {1} snap" " unprotect {2}@{3}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name)) process.run(cmd, ignore_status=True, shell=True) # Purge volume,and then delete volume. cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) process.run(cmd, ignore_status=True, shell=True) def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = { "s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file) } for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append( first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = results_stdout_52lts( secret_list_result).strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_config = params.get("disk_source_config") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") disk_src_pool = params.get("disk_source_pool") disk_format = params.get("disk_format", "raw") driver_iothread = params.get("driver_iothread") snap_name = params.get("disk_snap_name") attach_device = "yes" == params.get("attach_device", "no") attach_disk = "yes" == params.get("attach_disk", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") test_blockcopy = "yes" == params.get("test_blockcopy", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_vm_parts = "yes" == params.get("test_vm_parts", "no") additional_guest = "yes" == params.get("additional_guest", "no") create_snapshot = "yes" == params.get("create_snapshot", "no") convert_image = "yes" == params.get("convert_image", "no") create_volume = "yes" == params.get("create_volume", "no") create_by_xml = "yes" == params.get("create_by_xml", "no") client_key = params.get("client_key") client_name = params.get("client_name") auth_key = params.get("auth_key") auth_user = params.get("auth_user") auth_type = params.get("auth_type") auth_usage = params.get("secret_usage") pool_name = params.get("pool_name") pool_type = params.get("pool_type") vol_name = params.get("vol_name") cloned_vol_name = params.get("cloned_volume", "cloned_test_volume") create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume") vol_cap = params.get("vol_cap") vol_cap_unit = params.get("vol_cap_unit") start_vm = "yes" == params.get("start_vm", "no") test_disk_readonly = "yes" == params.get("test_disk_readonly", "no") test_disk_internal_snapshot = "yes" == params.get( "test_disk_internal_snapshot", "no") test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no") disk_snapshot_with_sanlock = "yes" == params.get( "disk_internal_with_sanlock", "no") auth_place_in_source = params.get("auth_place_in_source") # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) # After libvirt 3.9.0, auth element can be put into source part. if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0): test.cancel( "place auth in source is not supported in current libvirt version") # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) if additional_guest: guest_name = "%s_%s" % (vm_name, '1') timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout, ignore_status=False) additional_vm = vm.clone(guest_name) if start_vm: virsh.start(guest_name) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) key_opt = "" secret_uuid = None snapshot_path = None key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) front_end_img_file = os.path.join(data_dir.get_tmp_dir(), "%s_frontend_test.img" % vm_name) # Construct a unsupported error message list to skip these kind of tests unsupported_err = [] if driver_iothread: unsupported_err.append('IOThreads not supported') if test_snapshot: unsupported_err.append('live disk snapshot not supported') if test_disk_readonly: if not libvirt_version.version_compare(5, 0, 0): unsupported_err.append('Could not create file: Permission denied') unsupported_err.append('Permission denied') else: unsupported_err.append( 'unsupported configuration: external snapshot ' + 'for readonly disk vdb is not supported') if test_disk_internal_snapshot: unsupported_err.append( 'unsupported configuration: internal snapshot for disk ' + 'vdb unsupported for storage type raw') if test_blockcopy: unsupported_err.append('block copy is not supported') if attach_disk: unsupported_err.append('No such file or directory') if create_volume: unsupported_err.append("backing 'volume' disks isn't yet supported") unsupported_err.append('this function is not supported') try: # Clean up dirty secrets in test environments if there have. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Prepare test environment. qemu_config = LibvirtQemuConfig() if disk_snapshot_with_sanlock: # Install necessary package:sanlock,libvirt-lock-sanlock if not utils_package.package_install(["sanlock"]): test.error("fail to install sanlock") if not utils_package.package_install(["libvirt-lock-sanlock"]): test.error("fail to install libvirt-lock-sanlock") # Set virt_use_sanlock result = process.run("setsebool -P virt_use_sanlock 1", shell=True) if result.exit_status: test.error("Failed to set virt_use_sanlock value") # Update lock_manager in qemu.conf qemu_config.lock_manager = 'sanlock' # Update qemu-sanlock.conf. san_lock_config = LibvirtSanLockConfig() san_lock_config.user = '******' san_lock_config.group = 'sanlock' san_lock_config.host_id = 1 san_lock_config.auto_disk_leases = True process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True) san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock" san_lock_config.require_lease_for_disks = False # Start sanlock service and restart libvirtd to enforce changes. result = process.run("systemctl start wdmd", shell=True) if result.exit_status: test.error("Failed to start wdmd service") result = process.run("systemctl start sanlock", shell=True) if result.exit_status: test.error("Failed to start sanlock service") utils_libvirtd.Libvirtd().restart() # Prepare lockspace and lease file for sanlock in order. sanlock_cmd_dict = OrderedDict() sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS" sanlock_cmd_dict[ "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0" sanlock_cmd_dict[ "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS" sanlock_cmd_dict[ "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock" sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock" sanlock_cmd_dict[ "sanlock direct init -r TEST_LS:test-disk-resource-lock:" + "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock" sanlock_cmd_dict[ "chown sanlock:sanlock " + "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc" sanlock_cmd_dict[ "sanlock client add_lockspace -s TEST_LS:1:" + "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0" for sanlock_cmd in sanlock_cmd_dict.keys(): result = process.run(sanlock_cmd, shell=True) if result.exit_status: test.error(sanlock_cmd_dict[sanlock_cmd]) # Create one lease device and add it to VM. san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) lease_device = Lease() lease_device.lockspace = 'TEST_LS' lease_device.key = 'test-disk-resource-lock' lease_device.target = { 'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock' } san_lock_vmxml.add_device(lease_device) san_lock_vmxml.sync() # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb") # To be compatible with create_disk_xml function, # some parameters need to be updated. params.update({ "type_name": params.get("disk_type", "network"), "target_bus": params.get("disk_target_bus"), "target_dev": targetdev, "secret_uuid": secret_uuid, "source_protocol": params.get("disk_source_protocol"), "source_name": disk_src_name, "source_host_name": disk_src_host, "source_host_port": disk_src_port }) # Prepare disk image if convert_image: first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) elif create_volume: vol_params = { "name": vol_name, "capacity": int(vol_cap), "capacity_unit": vol_cap_unit, "format": disk_format } create_pool() create_vol(vol_params) check_vol(vol_params) else: # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" % (disk_format, img_file, img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_cmd = ( "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) # Create disk snapshot if needed. if create_snapshot: snap_cmd = ("rbd -m %s %s snap create %s@%s" % (mon_host, key_opt, disk_src_name, snap_name)) process.run(snap_cmd, ignore_status=False, shell=True) if test_json_pseudo_protocol: # Create one frontend image with the rbd backing file. json_str = ('json:{"file.driver":"rbd",' '"file.filename":"rbd:%s:mon_host=%s"}' % (disk_src_name, mon_host)) # pass different json string according to the auth config if auth_user and auth_key: json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key)) disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" % (json_str, front_end_img_file)) disk_path = front_end_img_file process.run(disk_cmd, ignore_status=False, shell=True) # If hot plug, start VM first, and then wait the OS boot. # Otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() if attach_device: if create_volume: params.update({"source_pool": pool_name}) params.update({"type_name": "volume"}) # No need auth options for volume if "auth_user" in params: params.pop("auth_user") if "auth_type" in params: params.pop("auth_type") if "secret_type" in params: params.pop("secret_type") if "secret_uuid" in params: params.pop("secret_uuid") if "secret_usage" in params: params.pop("secret_usage") # After 3.9.0,the auth element can be place in source part. if auth_place_in_source: params.update({"auth_in_source": auth_place_in_source}) xml_file = libvirt.create_disk_xml(params) if additional_guest: # Copy xml_file for additional guest VM. shutil.copyfile(xml_file, additional_xml_file) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) if additional_guest: # Make sure the additional VM is running if additional_vm.is_dead(): additional_vm.start() additional_vm.wait_for_login().close() ret = virsh.attach_device(guest_name, additional_xml_file, "", debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif attach_disk: opts = params.get("attach_option", "") ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_readonly: params.update({'readonly': "yes"}) xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_internal_snapshot: xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif disk_snapshot_with_sanlock: if vm.is_dead(): vm.start() snapshot_path = make_snapshot() if vm.is_alive(): vm.destroy() elif not create_volume: libvirt.set_vm_disk(vm, params) if test_blockcopy: logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): vm.start() # Wait for vm is running vm.wait_for_login(timeout=600).close() if additional_guest: if additional_vm.is_dead(): additional_vm.start() # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check partitions in vm if test_vm_parts: if not check_in_vm( vm, targetdev, old_parts, read_only=create_snapshot): test.fail("Failed to check vm partitions") if additional_guest: if not check_in_vm(additional_vm, targetdev, old_parts): test.fail("Failed to check vm partitions") # Save and restore operation if test_save_restore: check_save_restore() if test_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option) if test_blockcopy: check_blockcopy(targetdev) if test_disk_readonly: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, 'vdb') if test_disk_internal_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, targetdev) # Detach the device. if attach_device: xml_file = libvirt.create_disk_xml(params) ret = virsh.detach_device(vm_name, xml_file) libvirt.check_exit_status(ret) if additional_guest: ret = virsh.detach_device(guest_name, xml_file) libvirt.check_exit_status(ret) elif attach_disk: ret = virsh.detach_disk(vm_name, targetdev) libvirt.check_exit_status(ret) # Check disk in vm after detachment. if attach_device or attach_disk: session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) if len(new_parts) != len(old_parts): test.fail("Disk still exists in vm" " after detachment") session.close() except virt_vm.VMStartError as details: for msg in unsupported_err: if msg in str(details): test.cancel(str(details)) else: test.fail("VM failed to start." "Error: %s" % str(details)) finally: # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) if additional_guest: virsh.remove_domain(guest_name, "--remove-all-storage", ignore_stauts=True) # Remove the snapshot. if create_snapshot: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) elif create_volume: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name))) process.run(cmd, ignore_status=True, shell=True) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format( mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume))) process.run(cmd, ignore_status=True, shell=True) clean_up_volume_snapshots() else: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Delete tmp files. if os.path.exists(key_file): os.remove(key_file) if os.path.exists(img_file): os.remove(img_file) # Clean up volume, pool if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout): virsh.vol_delete(vol_name, pool_name) if pool_name and pool_name in virsh.pool_state_dict(): virsh.pool_destroy(pool_name, **virsh_dargs) virsh.pool_undefine(pool_name, **virsh_dargs) # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) logging.info("Restoring vm...") vmxml_backup.sync() if disk_snapshot_with_sanlock: # Restore virt_use_sanlock setting. process.run("setsebool -P virt_use_sanlock 0", shell=True) # Restore qemu config qemu_config.restore() utils_libvirtd.Libvirtd().restart() # Force shutdown sanlock service. process.run("sanlock client shutdown -f 1", shell=True) # Clean up lockspace folder process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True) if snapshot_path is not None: for snapshot in snapshot_path: if os.path.exists(snapshot): os.remove(snapshot)
def run(test, params, env): """ Test rng device options. 1.Prepare test environment, destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def modify_rng_xml(dparams, sync=True): """ Modify interface xml options """ rng_model = dparams.get("rng_model", "virtio") rng_rate = dparams.get("rng_rate") backend_model = dparams.get("backend_model", "random") backend_type = dparams.get("backend_type") backend_dev = dparams.get("backend_dev", "") backend_source_list = dparams.get("backend_source", "").split() backend_protocol = dparams.get("backend_protocol") vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) rng_xml = rng.Rng() rng_xml.rng_model = rng_model if rng_rate: rng_xml.rate = ast.literal_eval(rng_rate) backend = rng.Rng.Backend() backend.backend_model = backend_model if backend_type: backend.backend_type = backend_type if backend_dev: backend.backend_dev = backend_dev if backend_source_list: source_list = [ast.literal_eval(source) for source in backend_source_list] backend.source = source_list if backend_protocol: backend.backend_protocol = backend_protocol rng_xml.backend = backend logging.debug("Rng xml: %s", rng_xml) if sync: vmxml.add_device(rng_xml) vmxml.xmltreefile.write() vmxml.sync() else: status = libvirt.exec_virsh_edit( vm_name, [(r":/<devices>/s/$/%s" % re.findall(r"<rng.*<\/rng>", str(rng_xml), re.M )[0].replace("/", "\/"))]) if not status: test.fail("Failed to edit vm xml") def check_qemu_cmd(dparams): """ Verify qemu-kvm command line. """ rng_model = dparams.get("rng_model", "virtio") rng_rate = dparams.get("rng_rate") backend_type = dparams.get("backend_type") backend_source_list = dparams.get("backend_source", "").split() cmd = ("ps -ef | grep %s | grep -v grep" % vm_name) chardev = src_host = src_port = None if backend_type == "tcp": chardev = "socket" elif backend_type == "udp": chardev = "udp" for bc_source in backend_source_list: source = ast.literal_eval(bc_source) if "mode" in source and source['mode'] == "connect": src_host = source['host'] src_port = source['service'] if chardev and src_host and src_port: cmd += (" | grep 'chardev %s,.*host=%s,port=%s'" % (chardev, src_host, src_port)) if rng_model == "virtio": cmd += (" | grep 'device virtio-rng-pci'") if rng_rate: rate = ast.literal_eval(rng_rate) cmd += (" | grep 'max-bytes=%s,period=%s'" % (rate['bytes'], rate['period'])) if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Cann't see rng option" " in command line") def check_host(): """ Check random device on host """ backend_dev = params.get("backend_dev") if backend_dev: cmd = "lsof |grep %s" % backend_dev ret = process.run(cmd, ignore_status=True, shell=True) if ret.exit_status or not ret.stdout.count("qemu"): test.fail("Failed to check random device" " on host, command output: %s" % ret.stdout) def check_snapshot(bgjob=None): """ Do snapshot operation and check the results """ snapshot_name1 = "snap.s1" snapshot_name2 = "snap.s2" if not snapshot_vm_running: vm.destroy(gracefully=False) ret = virsh.snapshot_create_as(vm_name, snapshot_name1) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name) if snapshot_name not in snap_lists: test.fail("Snapshot %s doesn't exist" % snapshot_name) if snapshot_vm_running: options = "--force" else: options = "" ret = virsh.snapshot_revert( vm_name, ("%s %s" % (snapshot_name, options))) libvirt.check_exit_status(ret) ret = virsh.dumpxml(vm_name) if ret.stdout.count("<rng model="): test.fail("Found rng device in xml") if snapshot_with_rng: if vm.is_alive(): vm.destroy(gracefully=False) if bgjob: bgjob.kill_func() modify_rng_xml(params, False) # Start the domain before disk-only snapshot if vm.is_dead(): # Add random server if params.get("backend_type") == "tcp": cmd = "cat /dev/random | nc -4 -l localhost 1024" bgjob = utils.AsyncJob(cmd) vm.start() vm.wait_for_login().close() err_msgs = ("live disk snapshot not supported" " with this QEMU binary") ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot_name2) if ret.exit_status: if ret.stderr.count(err_msgs): test.skip(err_msgs) else: test.fail("Failed to create external snapshot") snap_lists = virsh.snapshot_list(vm_name) if snapshot_name2 not in snap_lists: test.fail("Failed to check snapshot list") ret = virsh.domblklist(vm_name) if not ret.stdout.count(snapshot_name2): test.fail("Failed to find snapshot disk") def check_guest(session): """ Check random device on guest """ rng_files = ( "/sys/devices/virtual/misc/hw_random/rng_available", "/sys/devices/virtual/misc/hw_random/rng_current") rng_avail = session.cmd_output("cat %s" % rng_files[0], timeout=600).strip() rng_currt = session.cmd_output("cat %s" % rng_files[1], timeout=600).strip() logging.debug("rng avail:%s, current:%s", rng_avail, rng_currt) if not rng_currt.count("virtio") or rng_currt not in rng_avail: test.fail("Failed to check rng file on guest") # Read the random device cmd = ("dd if=/dev/hwrng of=rng.test count=100" " && rm -f rng.test") ret, output = session.cmd_status_output(cmd, timeout=600) if ret: test.fail("Failed to read the random device") rng_rate = params.get("rng_rate") if rng_rate: rate_bytes, rate_period = ast.literal_eval(rng_rate).values() rate_conf = float(rate_bytes) / (float(rate_period)/1000) ret = re.search(r"(\d+) bytes.*copied, (\d+.\d+) s", output, re.M) if not ret: test.fail("Can't find rate from output") rate_real = float(ret.group(1)) / float(ret.group(2)) logging.debug("Find rate: %s, config rate: %s", rate_real, rate_conf) if rate_real > rate_conf * 1.2: test.fail("The rate of reading exceed" " the limitation of configuration") if device_num > 1: rng_dev = rng_avail.split() if len(rng_dev) != device_num: test.skip("Multiple virtio-rng devices are not" " supported on this guest kernel. " "Bug: https://bugzilla.redhat.com/" "show_bug.cgi?id=915335") session.cmd("echo -n %s > %s" % (rng_dev[1], rng_files[1])) # Read the random device if session.cmd_status(cmd, timeout=120): test.fail("Failed to read the random device") start_error = "yes" == params.get("start_error", "no") test_host = "yes" == params.get("test_host", "no") test_guest = "yes" == params.get("test_guest", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") snapshot_vm_running = "yes" == params.get("snapshot_vm_running", "no") snapshot_with_rng = "yes" == params.get("snapshot_with_rng", "no") snapshot_name = params.get("snapshot_name") device_num = int(params.get("device_num", 1)) if device_num > 1 and not libvirt_version.version_compare(1, 2, 7): test.skip("Multiple virtio-rng devices not " "supported on this libvirt version") # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Try to install rng-tools on host, it can speed up random rate # if installation failed, ignore the error and continue the test if utils_package.package_install(["rng-tools"]): rngd_conf = "/etc/sysconfig/rngd" rngd_srv = "/usr/lib/systemd/system/rngd.service" if os.path.exists(rngd_conf): # For rhel6 host, add extraoptions with open(rngd_conf, 'w') as f_rng: f_rng.write('EXTRAOPTIONS="--rng-device /dev/urandom"') elif os.path.exists(rngd_srv): # For rhel7 host, modify start options rngd_srv_conf = "/etc/systemd/system/rngd.service" if not os.path.exists(rngd_srv_conf): shutil.copy(rngd_srv, rngd_srv_conf) process.run("sed -i -e 's#^ExecStart=.*#ExecStart=/sbin/rngd" " -f -r /dev/urandom -o /dev/random#' %s" % rngd_srv_conf, shell=True) process.run('systemctl daemon-reload') process.run("service rngd start") # Build the xml and run test. try: bgjob = None # Take snapshot if needed if snapshot_name: if snapshot_vm_running: vm.start() vm.wait_for_login().close() ret = virsh.snapshot_create_as(vm_name, snapshot_name) libvirt.check_exit_status(ret) # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Build vm xml. dparams = {} if device_num > 1: for i in xrange(device_num): dparams[i] = {"rng_model": params.get( "rng_model_%s" % i, "virtio")} dparams[i].update({"backend_model": params.get( "backend_model_%s" % i, "random")}) bk_type = params.get("backend_type_%s" % i) if bk_type: dparams[i].update({"backend_type": bk_type}) bk_dev = params.get("backend_dev_%s" % i) if bk_dev: dparams[i].update({"backend_dev": bk_dev}) bk_src = params.get("backend_source_%s" % i) if bk_src: dparams[i].update({"backend_source": bk_src}) bk_pro = params.get("backend_protocol_%s" % i) if bk_pro: dparams[i].update({"backend_protocol": bk_pro}) modify_rng_xml(dparams[i], False) else: modify_rng_xml(params, not test_snapshot) try: # Add random server if params.get("backend_type") == "tcp": cmd = "cat /dev/random | nc -4 -l localhost 1024" bgjob = utils.AsyncJob(cmd) # Start the VM. vm.start() if start_error: test.fail("VM started unexpectedly") if test_qemu_cmd: if device_num > 1: for i in xrange(device_num): check_qemu_cmd(dparams[i]) else: check_qemu_cmd(params) if test_host: check_host() session = vm.wait_for_login() if test_guest: check_guest(session) session.close() if test_snapshot: check_snapshot(bgjob) except virt_vm.VMStartError as details: logging.info(str(details)) if not start_error: test.fail('VM failed to start, ' 'please refer to https://bugzilla.' 'redhat.com/show_bug.cgi?id=1220252:' '\n%s' % details) finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snapshot in snapshot_lists: virsh.snapshot_delete(vm_name, snapshot, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() if bgjob: bgjob.kill_func()
def run(test, params, env): """ 1. prepare a fc lun with one of following methods - create a scsi pool&vol - create a vhba 2. prepare the virtual disk xml, as one of following - source = /dev/disk/by-path - source = /dev/mapper/mpathX - source = pool&vol format 3. start a vm with above disk as vdb 4. create disk-only snapshot of vdb 5. check the snapshot-list and snapshot file's existence 6. mount vdb and touch file to it 7. revert the snapshot and check file's existence 8. delete snapshot 9. cleanup env. """ vm_name = params.get("main_vm", "avocado-vt-vm1") wwpn = params.get("wwpn", "WWPN_EXAMPLE") wwnn = params.get("wwnn", "WWNN_EXAMPLE") disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "file") disk_size = params.get("disk_size", "100M") device_target = params.get("device_target", "vdb") driver_name = params.get("driver_name", "qemu") driver_type = params.get("driver_type", "raw") target_bus = params.get("target_bus", "virtio") vd_format = params.get("vd_format", "") snapshot_dir = params.get("snapshot_dir", "/tmp") snapshot_name = params.get("snapshot_name", "s1") pool_name = params.get("pool_name", "") pool_target = params.get("pool_target", "/dev") snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no") new_vhbas = [] current_vhbas = [] new_vhba = [] path_to_blk = "" lun_sl = [] new_disk = "" pool_ins = None old_mpath_conf = "" mpath_conf_path = "/etc/multipath.conf" original_mpath_conf_exist = os.path.exists(mpath_conf_path) vm = env.get_vm(vm_name) online_hbas = utils_npiv.find_hbas("hba") if not online_hbas: raise exceptions.TestSkipError("There is no online hba cards.") old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, replace_existing=True) first_online_hba = online_hbas[0] old_vhbas = utils_npiv.find_hbas("vhba") if vm.is_dead(): vm.start() session = vm.wait_for_login() virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) old_disks = virt_vm.get_disks() if vm.is_alive(): vm.destroy(gracefully=False) if pool_name: pool_ins = libvirt_storage.StoragePool() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() try: # prepare a fc lun if vd_format in ['scsi_vol']: if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba, pool_target) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT) if not utils_npiv.is_vhbas_added(old_vhbas): raise exceptions.TestFail("vHBA not successfully created") current_vhbas = utils_npiv.find_hbas("vhba") new_vhba = list(set(current_vhbas).difference( set(old_vhbas)))[0] new_vhbas.append(new_vhba) new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("block device not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for( lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail("lun symbolic links not found under " "/dev/disk/by-path/ for blk dev %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) elif vd_format in ['mpath', 'by_path']: old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml( {"nodedev_parent": first_online_hba, "scsi_wwnn": wwnn, "scsi_wwpn": wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) if vd_format == "mpath": utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_TIMEOUT*5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) path_to_blk = "/dev/mapper/" + new_mpath_devs[0] elif vd_format == "by_path": new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("blk dev not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for( lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail("lun symbolic links not found in " "/dev/disk/by-path/ for %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) else: pass else: raise exceptions.TestSkipError("Not provided how to pass" "virtual disk to VM.") # create qcow2 file on the block device with specified size if path_to_blk: cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size) try: process.run(cmd, shell=True) except process.cmdError as detail: raise exceptions.TestFail("Fail to create qcow2 on blk dev: %s", detail) else: raise exceptions.TestFail("Don't have a vaild path to blk dev.") # prepare disk xml if "vol" in vd_format: vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_TIMEOUT*3) test_vol = list(vol_list.keys())[0] disk_params = {'type_name': disk_type, 'target_dev': device_target, 'target_bus': target_bus, 'source_pool': pool_name, 'source_volume': test_vol, 'driver_type': driver_type} else: disk_params = {'type_name': disk_type, 'device': disk_device, 'driver_name': driver_name, 'driver_type': driver_type, 'source_file': path_to_blk, 'target_dev': device_target, 'target_bus': target_bus} if vm.is_alive(): vm.destroy(gracefully=False) new_disk = disk.Disk() new_disk.xml = open(utlv.create_disk_xml(disk_params)).read() # start vm with the virtual disk vmxml.devices = vmxml.devices.append(new_disk) vmxml.sync() vm.start() session = vm.wait_for_login() cur_disks = virt_vm.get_disks() mount_disk = "".join(list(set(old_disks) ^ set(cur_disks))) # mkfs and mount disk in vm, create a file on that disk. if not mount_disk: logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks) raise exceptions.TestFail("No new disk found in vm.") mkfs_and_mount(session, mount_disk) create_file_in_vm(session, "/mnt/before_snapshot.txt", "before") # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) options = "%s --disk-only" % snapshot_name for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name if os.path.exists(snapshot_file): os.remove(snapshot_file) options = options + " --diskspec %s,file=%s" % (vm_blk, snapshot_file) else: options = snapshot_name utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options)) # check virsh snapshot-list logging.debug("Running: snapshot-list %s", vm_name) snapshot_list = virsh.snapshot_list(vm_name) logging.debug("snapshot list is: %s", snapshot_list) if not snapshot_list: raise exceptions.TestFail("snapshots not found after creation.") # snapshot-revert doesn't support external snapshot for now. so # only check this with internal snapshot. if not snapshot_disk_only: create_file_in_vm(session, "/mnt/after_snapshot.txt", "after") logging.debug("Running: snapshot-revert %s %s", vm_name, snapshot_name) utlv.check_exit_status(virsh.snapshot_revert(vm_name, snapshot_name)) session = vm.wait_for_login() file_existence, file_content = get_file_in_vm(session, "/mnt/after_snapshot.txt") logging.debug("file exist = %s, file content = %s", file_existence, file_content) if file_existence: raise exceptions.TestFail("The file created " "after snapshot still exists.") file_existence, file_content = get_file_in_vm(session, "/mnt/before_snapshot.txt") logging.debug("file eixst = %s, file content = %s", file_existence, file_content) if ((not file_existence) or (file_content.strip() != "before")): raise exceptions.TestFail("The file created " "before snapshot is lost.") # delete snapshots # if diskonly, delete --metadata and remove files # if not diskonly, delete snapshot if snapshot_disk_only: options = "--metadata" else: options = "" for snap in snapshot_list: logging.debug("deleting snapshot %s with options %s", snap, options) result = virsh.snapshot_delete(vm_name, snap, options) logging.debug("result of snapshot-delete: %s", result.stdout.strip()) if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap if os.path.exists(snapshot_file): os.remove(snapshot_file) snapshot_list = virsh.snapshot_list(vm_name) if snapshot_list: raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list) except Exception as detail: raise exceptions.TestFail("exception happens: %s", detail) finally: logging.debug("Start to clean up env...") vmxml_backup.sync() if pool_ins and pool_ins.pool_exists(pool_name): virsh.pool_destroy(pool_name) for new_vhba in new_vhbas: virsh.nodedev_destroy(new_vhba) utils_npiv.restart_multipathd() if old_mpath_conf: utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, conf_content=old_mpath_conf, replace_existing=True) if not original_mpath_conf_exist and os.path.exists(mpath_conf_path): os.remove(mpath_conf_path)
def run(test, params, env): """ Integration test of backup and backing_chain. Steps: 1. craete a vm with extra disk vdb 2. create some data on vdb 3. start a pull mode full backup on vdb 4. create some data on vdb 5. start a pull mode incremental backup 6. repeat step 5 to 7 7. before the last round of backup job, do a blockcommit/pull/copy 8. check the full/incremental backup file data """ def run_blk_cmd(): """ Run blockcommit/blockpull/blockcopy command. """ def run_blockpull(): """ Run blockpull command. """ if from_to == "mid_to_top": cmd_option = ("--base {0}[{1}] --wait").format( original_disk_target, middle_layer1_index) elif from_to == "base_to_top": cmd_option = ("--base {0}[{1}] --wait").format( original_disk_target, base_layer_index) virsh.blockpull(vm_name, original_disk_target, cmd_option, debug=True, ignore_status=False) def run_blockcommit(): """ Run blockcommit command. """ if from_to == "top_to_base": # Do blockcommit from top layer to base layer cmd_option = ( "--top {0}[{1}] --base {0}[{2}] --active --pivot " "--wait".format(original_disk_target, top_layer_index, base_layer_index)) elif from_to == "mid_to_mid": # Do blockcommit from middle layer to another middle layer if len(indice) < 4: test.fail( "At lease 4 layers required for the test 'mid_to_mid'") cmd_option = ("--top {0}[{1}] --base {0}[{2}] " "--wait".format(original_disk_target, middle_layer1_index, middle_layer2_index)) elif from_to == "top_to_mid": # Do blockcommit from top layer to middle layer cmd_option = ( "--top {0}[{1}] --base {0}[{2}] --active --pivot " "--wait".format(original_disk_target, top_layer_index, middle_layer1_index)) elif from_to == "mid_to_base": # Do blockcommit from middle layer to base layer cmd_option = ("--top {0}[{1}] --base {0}[{2}] " "--wait".format(original_disk_target, middle_layer1_index, base_layer_index)) virsh.blockcommit(vm_name, original_disk_target, cmd_option, debug=True, ignore_stauts=False) def run_blockcopy(): """ Run blockcopy command. """ copy_dest = os.path.join(tmp_dir, "copy_dest.qcow2") cmd_option = "--wait --verbose --transient-job --pivot" if blockcopy_method == "shallow_copy": cmd_option += " --shallow" if blockcopy_reuse == "reuse_external": cmd_option += " --reuse-external" if blockcopy_method == "shallow_copy": create_img_cmd = "qemu-img create -f qcow2 -F qcow2 -b %s %s" create_img_cmd %= (backend_img, copy_dest) else: create_img_cmd = "qemu-img create -f qcow2 %s %s" create_img_cmd %= (copy_dest, original_disk_size) process.run(create_img_cmd, shell=True, ignore_status=False) virsh.blockcopy(vm_name, original_disk_target, copy_dest, cmd_option, debug=True, ignore_status=False) # Get disk backing store indice info in vm disk xml cur_vm_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) cur_disk_xmls = cur_vm_xml.get_devices(device_type="disk") cur_test_disk_xml = '' for disk_xml in cur_disk_xmls: if disk_xml.target['dev'] == original_disk_target: cur_test_disk_xml = disk_xml logging.debug("Current disk xml for %s is:\n %s", original_disk_target, cur_test_disk_xml) break indice = re.findall(r".*index=['|\"](\d+)['|\"].*", str(cur_test_disk_xml)) logging.debug("backing store indice for %s is: %s", original_disk_target, indice) if len(indice) < 3: test.fail("At least 3 layers required for the test.") top_layer_index = indice[0] middle_layer1_index = indice[1] middle_layer2_index = indice[-2] base_layer_index = indice[-1] logging.debug( "Following backing store will be used: %s", "top:%s; middle_1: %s, middle_2:%s, base: %s" % (top_layer_index, middle_layer1_index, middle_layer2_index, base_layer_index)) # Start the block command if blockcommand == "blockpull": run_blockpull() if blockcommand == "blockcommit": run_blockcommit() if blockcommand == "blockcopy": run_blockcopy() def create_shutoff_snapshot(original_img, snapshot_img): """ Create shutoff snapshot, which means the disk snapshot is not controlled by libvirt, but created directly by qemu command. :param original_img: The image we will take shutoff snapshot for. :param snapshot_img: The newly created shutoff snapshot image. """ cmd = "qemu-img info --output=json -f qcow2 {}".format(original_img) img_info = process.run(cmd, shell=True, ignore_status=False).stdout_text json_data = json.loads(img_info) cmd = "qemu-img create -f qcow2 -F qcow2 -b {0} {1}".format( original_img, snapshot_img) process.run(cmd, shell=True, ignore_status=False) try: bitmaps = json_data['format-specific']['data']['bitmaps'] for bitmap in bitmaps: bitmap_flags = bitmap['flags'] bitmap_name = bitmap['name'] if 'auto' in bitmap_flags and 'in-use' not in bitmap_flags: cmd = "qemu-img bitmap -f qcow2 {0} --add {1}".format( snapshot_img, bitmap_name) process.run(cmd, shell=True, ignore_status=False) except Exception as bitmap_error: logging.debug("Cannot add bitmap to new image, skip it: %s", bitmap_error) # Cancel the test if libvirt version is too low if not libvirt_version.version_compare(6, 0, 0): test.cancel("Current libvirt version doesn't support " "incremental backup.") # vm's origianl disk config original_disk_size = params.get("original_disk_size", "100M") original_disk_type = params.get("original_disk_type", "local") original_disk_target = params.get("original_disk_target", "vdb") # pull mode backup config scratch_type = params.get("scratch_type", "file") nbd_protocol = params.get("nbd_protocol", "tcp") nbd_tcp_port = params.get("nbd_tcp_port", "10809") # test config backup_rounds = int(params.get("backup_rounds", 4)) shutoff_snapshot = "yes" == params.get("shutoff_snapshot") blockcommand = params.get("blockcommand") from_to = params.get("from_to") blockcopy_method = params.get("blockcopy_method") blockcopy_reuse = params.get("blockcopy_reuse") backup_error = "yes" == params.get("backup_error") tmp_dir = data_dir.get_tmp_dir() try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Make sure there is no checkpoint metadata before test utils_backup.clean_checkpoints(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() disks_not_tested = list(vmxml.get_disk_all().keys()) logging.debug("Not tested disks are: %s", disks_not_tested) utils_backup.enable_inc_backup_for_vm(vm) # Destroy vm before test if vm.is_alive(): vm.destroy(gracefully=False) # Prepare the disk to be backuped. disk_params = {} disk_path = "" if original_disk_type == "local": image_name = "%s_image.qcow2" % original_disk_target disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, original_disk_size, "qcow2") disk_params = { "device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": original_disk_target, "source_file": disk_path } if original_disk_target: disk_params["target_dev"] = original_disk_target else: logging.cancel("The disk type '%s' not supported in this script.", original_disk_type) disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True) vm.start() session = vm.wait_for_login() new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys()) session.close() if len(new_disks_in_vm) != 1: test.fail("Test disk not prepared in vm") # Use the newly added disk as the test disk test_disk_in_vm = "/dev/" + new_disks_in_vm[0] vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_disks = list(vmxml.get_disk_all().keys()) checkpoint_list = [] is_incremental = False backup_file_list = [] snapshot_list = [] cur_disk_xml = disk_xml cur_disk_path = disk_path cur_disk_params = disk_params backend_img = "" for backup_index in range(backup_rounds): # Do external snapshot if shutoff_snapshot: virsh.detach_disk(vm.name, original_disk_target, extra="--persistent", ignore_status=False, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) shutoff_snapshot_name = "shutoff_snap_%s" % str(backup_index) shutoff_snapshot_path = os.path.join(tmp_dir, shutoff_snapshot_name) create_shutoff_snapshot(cur_disk_path, shutoff_snapshot_path) cur_disk_params["source_file"] = shutoff_snapshot_path cur_disk_xml = libvirt.create_disk_xml(cur_disk_params) virsh.attach_device(vm.name, cur_disk_xml, flagstr="--config", ignore_status=False, debug=True) vm.start() vm.wait_for_login().close() cur_disk_path = shutoff_snapshot_path else: snapshot_name = "snap_%s" % str(backup_index) snapshot_option = "" snapshot_file_name = os.path.join(tmp_dir, snapshot_name) for disk_name in disks_not_tested: snapshot_option += "--diskspec %s,snapshot=no " % disk_name snapshot_option += "--diskspec %s,file=%s" % ( original_disk_target, snapshot_file_name) virsh.snapshot_create_as(vm_name, "%s --disk-only %s" % (snapshot_name, snapshot_option), debug=True) snapshot_list.append(snapshot_name) # Prepare backup xml backup_params = {"backup_mode": "pull"} if backup_index > 0: is_incremental = True backup_params["backup_incremental"] = "checkpoint_" + str( backup_index - 1) # Set libvirt default nbd export name and bitmap name nbd_export_name = original_disk_target nbd_bitmap_name = "backup-" + original_disk_target backup_server_dict = {"name": "localhost", "port": nbd_tcp_port} backup_params["backup_server"] = backup_server_dict backup_disk_xmls = [] for vm_disk in vm_disks: backup_disk_params = {"disk_name": vm_disk} if vm_disk != original_disk_target: backup_disk_params["enable_backup"] = "no" else: backup_disk_params["enable_backup"] = "yes" backup_disk_params["disk_type"] = scratch_type # Prepare nbd scratch file/dev params scratch_params = {"attrs": {}} scratch_file_name = "scratch_file_%s" % backup_index scratch_file_path = os.path.join(tmp_dir, scratch_file_name) scratch_params["attrs"]["file"] = scratch_file_path logging.debug("scratch_params: %s", scratch_params) backup_disk_params["backup_scratch"] = scratch_params backup_disk_xml = utils_backup.create_backup_disk_xml( backup_disk_params) backup_disk_xmls.append(backup_disk_xml) logging.debug("disk list %s", backup_disk_xmls) backup_xml = utils_backup.create_backup_xml( backup_params, backup_disk_xmls) logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml) # Prepare checkpoint xml checkpoint_name = "checkpoint_%s" % backup_index checkpoint_list.append(checkpoint_name) cp_params = {"checkpoint_name": checkpoint_name} cp_params["checkpoint_desc"] = params.get( "checkpoint_desc", "desc of cp_%s" % backup_index) disk_param_list = [] for vm_disk in vm_disks: cp_disk_param = {"name": vm_disk} if vm_disk != original_disk_target: cp_disk_param["checkpoint"] = "no" else: cp_disk_param["checkpoint"] = "bitmap" cp_disk_bitmap = params.get("cp_disk_bitmap") if cp_disk_bitmap: cp_disk_param["bitmap"] = cp_disk_bitmap + str( backup_index) disk_param_list.append(cp_disk_param) checkpoint_xml = utils_backup.create_checkpoint_xml( cp_params, disk_param_list) logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index, checkpoint_xml) # Start backup backup_options = backup_xml.xml + " " + checkpoint_xml.xml # Create some data in vdb dd_count = "1" dd_seek = str(backup_index * 10 + 10) dd_bs = "1M" session = vm.wait_for_login() utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs, dd_seek, dd_count) session.close() backup_result = virsh.backup_begin(vm_name, backup_options, debug=True) if backup_result.exit_status: raise utils_backup.BackupBeginError( backup_result.stderr.strip()) backup_file_path = os.path.join( tmp_dir, "backup_file_%s.qcow2" % str(backup_index)) backup_file_list.append(backup_file_path) nbd_params = { "nbd_protocol": nbd_protocol, "nbd_hostname": "localhost", "nbd_export": nbd_export_name, "nbd_tcp_port": nbd_tcp_port } if not is_incremental: # Do full backup utils_backup.pull_full_backup_to_file(nbd_params, backup_file_path) logging.debug("Full backup to: %s", backup_file_path) else: # Do incremental backup utils_backup.pull_incremental_backup_to_file( nbd_params, backup_file_path, nbd_bitmap_name, original_disk_size) virsh.domjobabort(vm_name, debug=True) # Start to run the blockcommit/blockpull cmd before the last round # of backup job, this is to test if the block command will keep the # dirty bitmap data. if backup_index == backup_rounds - 2: run_blk_cmd() cur_disk_path = vm.get_blk_devices( )[original_disk_target]['source'] if backup_index == backup_rounds - 3: backend_img = vm.get_blk_devices( )[original_disk_target]['source'] # Get current active image for the test disk vm_disks = vm.get_blk_devices() current_active_image = vm_disks[original_disk_target]['source'] logging.debug("The current active image for '%s' is '%s'", original_disk_target, current_active_image) for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True, ignore_status=False) if vm.is_alive(): vm.destroy(gracefully=False) # Compare the backup data and original data original_data_file = os.path.join(tmp_dir, "original_data.qcow2") cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % ( current_active_image, original_data_file) process.run(cmd, shell=True, verbose=True) for backup_file in backup_file_list: if not utils_backup.cmp_backup_data(original_data_file, backup_file): test.fail("Backup and original data are not identical for" "'%s' and '%s'" % (current_active_image, backup_file)) else: logging.debug("'%s' contains correct backup data", backup_file) except utils_backup.BackupBeginError as details: if backup_error: logging.debug("Backup failed as expected.") else: test.fail(details) finally: # Remove checkpoints' metadata again to make sure vm has no checkpoints if "checkpoint_list" in locals(): for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, options="--metadata") # Remove snapshots if "snapshot_list" in locals(): for snapshot_name in snapshot_list: virsh.snapshot_delete(vm_name, "%s --metadata" % snapshot_name, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync() for file_name in os.listdir(tmp_dir): file_path = os.path.join(tmp_dir, file_name) if 'env' not in file_path: if os.path.isfile(file_path): os.remove(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Global variable to store max/current memory, # it may change after attach/detach new_max_mem = None new_cur_mem = None def consume_vm_mem(size=1000, timeout=360): """ To consume guest memory, default size is 1000M """ session = vm.wait_for_login() # Mount tmpfs on /mnt and write to a file on it, # it is the memory operation sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs " "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M" " count={0}".format(size)) session.cmd(sh_cmd, timeout=timeout) session.close() def mount_hugepages(page_size): """ To mount hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ if page_size == 4: perm = "" else: perm = "pagesize=%dK" % page_size tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_status: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm) def setup_hugepages(page_size=2048, shp_num=2000): """ To setup hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc :param shp_num: number of hugepage, string type """ mount_hugepages(page_size) utils_memory.set_num_huge_pages(shp_num) config.hugetlbfs_mount = ["/dev/hugepages"] utils_libvirtd.libvirtd_restart() def restore_hugepages(page_size=4): """ To recover hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ mount_hugepages(page_size) config.restore() utils_libvirtd.libvirtd_restart() def check_qemu_cmd(max_mem_rt, tg_size): """ Check qemu command line options. :param max_mem_rt: size of max memory :param tg_size: Target hotplug memory size :return: None """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if max_mem_rt: cmd += (" | grep 'slots=%s,maxmem=%sk'" % (max_mem_slots, max_mem_rt)) if tg_size: size = int(tg_size) * 1024 cmd_str = 'memdimm.\|memory-backend-ram,id=ram-node.' cmd += (" | grep 'memory-backend-ram,id=%s' | grep 'size=%s" % (cmd_str, size)) if pg_size: cmd += ",host-nodes=%s" % node_mask if numa_memnode: for node in numa_memnode: if ('nodeset' in node and node['nodeset'] in node_mask): cmd += ",policy=%s" % node['mode'] cmd += ".*pc-dimm,node=%s" % tg_node if mem_addr: cmd += (".*slot=%s,addr=%s" % (mem_addr['slot'], int(mem_addr['base'], 16))) cmd += "'" # Run the command result = process.run(cmd, shell=True, verbose=True, ignore_status=True) if result.exit_status: test.fail('Qemu command check fail.') def check_guest_meminfo(old_mem, check_option): """ Check meminfo on guest. """ assert old_mem is not None session = vm.wait_for_login() # Hot-plugged memory should be online by udev rules udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules" udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",' ' ATTR{state}=="offline", ATTR{state}="online"') cmd = ("grep memory %s || echo '%s' >> %s" % (udev_file, udev_rules, udev_file)) session.cmd(cmd) # Wait a while for new memory to be detected. utils_misc.wait_for( lambda: vm.get_totalmem_sys(online) != int(old_mem), 30, first=20.0) new_mem = vm.get_totalmem_sys(online) session.close() logging.debug("Memtotal on guest: %s", new_mem) no_of_times = 1 if at_times: no_of_times = at_times if check_option == "attach": if new_mem != int(old_mem) + (int(tg_size) * no_of_times): test.fail("Total memory on guest couldn't changed after " "attach memory device") if check_option == "detach": if new_mem != int(old_mem) - (int(tg_size) * no_of_times): test.fail("Total memory on guest couldn't changed after " "detach memory device") def check_dom_xml(at_mem=False, dt_mem=False): """ Check domain xml options. """ # Global variable to store max/current memory global new_max_mem global new_cur_mem if attach_option.count("config"): dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) else: dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) try: xml_max_mem_rt = int(dom_xml.max_mem_rt) xml_max_mem = int(dom_xml.max_mem) xml_cur_mem = int(dom_xml.current_mem) assert int(max_mem_rt) == xml_max_mem_rt # Check attached/detached memory if at_mem: if at_times: assert int(max_mem) + (int(tg_size) * at_times) == xml_max_mem else: assert int(max_mem) + int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory if at_times: assert int(cur_mem) + (int(tg_size) * at_times) == xml_cur_mem else: assert int(cur_mem) + int(tg_size) == xml_cur_mem new_max_mem = xml_max_mem new_cur_mem = xml_cur_mem mem_dev = dom_xml.get_devices("memory") memory_devices = 1 if at_times: memory_devices = at_times if len(mem_dev) != memory_devices: test.fail("Found wrong number of memory device") assert int(tg_size) == int(mem_dev[0].target.size) assert int(tg_node) == int(mem_dev[0].target.node) elif dt_mem: if at_times: assert int(new_max_mem) - (int(tg_size) * at_times) == xml_max_mem assert int(new_cur_mem) - (int(tg_size) * at_times) == xml_cur_mem else: assert int(new_max_mem) - int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory assert int(new_cur_mem) - int(tg_size) == xml_cur_mem except AssertionError: utils_misc.log_last_traceback() test.fail("Found unmatched memory setting from domain xml") def check_mem_align(): """ Check if set memory align to 256 """ dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) dom_mem = {} dom_mem['maxMemory'] = int(dom_xml.max_mem_rt) dom_mem['memory'] = int(dom_xml.memory) dom_mem['currentMemory'] = int(dom_xml.current_mem) cpuxml = dom_xml.cpu numa_cell = cpuxml.numa_cell dom_mem['numacellMemory'] = int(numa_cell[0]['memory']) sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell]) attached_mem = dom_xml.get_devices(device_type='memory')[0] dom_mem['attached_mem'] = attached_mem.target.size all_align = True for key in dom_mem: logging.info('%-20s:%15d', key, dom_mem[key]) if dom_mem[key] % 256: logging.error('%s not align to 256', key) all_align = False if not all_align: test.fail('Memory not align to 256') if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']: logging.info('Check Pass: Memory is equal to (all numa memory + memory device)') else: test.fail('Memory is not equal to (all numa memory + memory device)') return dom_mem def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def add_device(dev_xml, attach, at_error=False): """ Add memory device by attachment or modify domain xml. """ if attach: ret = virsh.attach_device(vm_name, dev_xml.xml, flagstr=attach_option, debug=True) libvirt.check_exit_status(ret, at_error) else: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if numa_cells: del vmxml.max_mem del vmxml.current_mem vmxml.add_device(dev_xml) vmxml.sync() def modify_domain_xml(): """ Modify domain xml and define it. """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) mem_unit = params.get("mem_unit", "KiB") vcpu = params.get("vcpu", "4") if max_mem_rt: vmxml.max_mem_rt = int(max_mem_rt) vmxml.max_mem_rt_slots = max_mem_slots vmxml.max_mem_rt_unit = mem_unit if memory_val: vmxml.memory = int(memory_val) if vcpu: vmxml.vcpu = int(vcpu) vcpu_placement = params.get("vcpu_placement", "static") vmxml.placement = vcpu_placement if numa_memnode: vmxml.numa_memory = {} vmxml.numa_memnode = numa_memnode else: try: del vmxml.numa_memory del vmxml.numa_memnode except Exception: # Not exists pass if numa_cells: cells = [ast.literal_eval(x) for x in numa_cells] # Rounding the numa memory values if align_mem_values: for cell in range(cells.__len__()): memory_value = str(utils_numeric.align_value( cells[cell]["memory"], align_to_value)) cells[cell]["memory"] = memory_value cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu><numa/></cpu>" cpu_mode = params.get("cpu_mode") model_fallback = params.get("model_fallback") if cpu_mode: cpu_xml.mode = cpu_mode if model_fallback: cpu_xml.fallback = model_fallback cpu_xml.numa_cell = cells vmxml.cpu = cpu_xml # Delete memory and currentMemory tag, # libvirt will fill it automatically del vmxml.max_mem del vmxml.current_mem # hugepages setting if huge_pages: membacking = vm_xml.VMMemBackingXML() hugepages = vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(huge_pages)): pagexml = hugepages.PageXML() pagexml.update(huge_pages[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug("vm xml: %s", vmxml) vmxml.sync() pre_vm_state = params.get("pre_vm_state", "running") attach_device = "yes" == params.get("attach_device", "no") detach_device = "yes" == params.get("detach_device", "no") attach_error = "yes" == params.get("attach_error", "no") start_error = "yes" == params.get("start_error", "no") detach_error = "yes" == params.get("detach_error", "no") maxmem_error = "yes" == params.get("maxmem_error", "no") attach_option = params.get("attach_option", "") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_managedsave = "yes" == params.get("test_managedsave", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_mem_binding = "yes" == params.get("test_mem_binding", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") add_mem_device = "yes" == params.get("add_mem_device", "no") test_dom_xml = "yes" == params.get("test_dom_xml", "no") max_mem = params.get("max_mem") max_mem_rt = params.get("max_mem_rt") max_mem_slots = params.get("max_mem_slots", "16") memory_val = params.get('memory_val', '') mem_align = 'yes' == params.get('mem_align', 'no') hot_plug = 'yes' == params.get('hot_plug', 'no') cur_mem = params.get("current_mem") numa_cells = params.get("numa_cells", "").split() set_max_mem = params.get("set_max_mem") align_mem_values = "yes" == params.get("align_mem_values", "no") align_to_value = int(params.get("align_to_value", "65536")) hot_reboot = "yes" == params.get("hot_reboot", "no") rand_reboot = "yes" == params.get("rand_reboot", "no") guest_known_unplug_errors = [] guest_known_unplug_errors.append(params.get("guest_known_unplug_errors")) host_known_unplug_errors = [] host_known_unplug_errors.append(params.get("host_known_unplug_errors")) # params for attached device mem_model = params.get("mem_model", "dimm") tg_size = params.get("tg_size") tg_sizeunit = params.get("tg_sizeunit", 'KiB') tg_node = params.get("tg_node", 0) pg_size = params.get("page_size") pg_unit = params.get("page_unit", "KiB") node_mask = params.get("node_mask", "0") mem_addr = ast.literal_eval(params.get("memory_addr", "{}")) huge_pages = [ast.literal_eval(x) for x in params.get("huge_pages", "").split()] numa_memnode = [ast.literal_eval(x) for x in params.get("numa_memnode", "").split()] at_times = int(params.get("attach_times", 1)) online = params.get("mem_online", "no") config = utils_config.LibvirtQemuConfig() setup_hugepages_flag = params.get("setup_hugepages") if (setup_hugepages_flag == "yes"): setup_hugepages(int(pg_size)) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not libvirt_version.version_compare(1, 2, 14): test.cancel("Memory hotplug not supported in current libvirt version.") if 'align_256m' in params.get('name', ''): arch = platform.machine() if arch.lower() != 'ppc64le': test.cancel('This case is for ppc64le only.') if align_mem_values: # Rounding the following values to 'align' max_mem = utils_numeric.align_value(max_mem, align_to_value) max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value) cur_mem = utils_numeric.align_value(cur_mem, align_to_value) tg_size = utils_numeric.align_value(tg_size, align_to_value) try: # Drop caches first for host has enough memory drop_caches() # Destroy domain first if vm.is_alive(): vm.destroy(gracefully=False) modify_domain_xml() # Start the domain any way if attach memory device old_mem_total = None if attach_device: vm.start() session = vm.wait_for_login() old_mem_total = vm.get_totalmem_sys(online) logging.debug("Memtotal on guest: %s", old_mem_total) session.close() dev_xml = None # To attach the memory device. if add_mem_device and not hot_plug: at_times = int(params.get("attach_times", 1)) dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model) randvar = 0 rand_value = random.randint(15, 25) logging.debug("reboots at %s", rand_value) for x in xrange(at_times): # If any error excepted, command error status should be # checked in the last time randvar = randvar + 1 logging.debug("attaching device count = %s", x) if x == at_times - 1: add_device(dev_xml, attach_device, attach_error) else: add_device(dev_xml, attach_device) if hot_reboot: vm.reboot() vm.wait_for_login() if rand_reboot and randvar == rand_value: randvar = 0 rand_value = random.randint(15, 25) logging.debug("reboots at %s", rand_value) vm.reboot() vm.wait_for_login() # Check domain xml after attach device. if test_dom_xml: check_dom_xml(at_mem=attach_device) # Set domain state if pre_vm_state == "transient": logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Cann't create the domain") elif vm.is_dead(): try: vm.start() vm.wait_for_login().close() except virt_vm.VMStartError as detail: if start_error: pass else: except_msg = "memory hotplug isn't supported by this QEMU binary" if except_msg in detail.reason: test.cancel(detail) test.fail(detail) # Set memory operation if set_max_mem: max_mem_option = params.get("max_mem_option", "") ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option) libvirt.check_exit_status(ret, maxmem_error) # Hotplug memory device if add_mem_device and hot_plug: process.run('ps -ef|grep qemu', shell=True, verbose=True) session = vm.wait_for_login() original_mem = vm.get_totalmem_sys() dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model) add_device(dev_xml, True) mem_after = vm.get_totalmem_sys() params['delta'] = mem_after - original_mem # Check domain xml after start the domain. if test_dom_xml: check_dom_xml(at_mem=attach_device) if mem_align: dom_mem = check_mem_align() check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem']) if hot_plug and params['delta'] != dom_mem['attached_mem']: test.fail('Memory after attach not equal to original mem + attached mem') # Check qemu command line if test_qemu_cmd: check_qemu_cmd(max_mem_rt, tg_size) # Check guest meminfo after attachment if (attach_device and not attach_option.count("config") and not any([attach_error, start_error])): check_guest_meminfo(old_mem_total, check_option="attach") # Consuming memory on guest, # to verify memory changes by numastat if test_mem_binding: pid = vm.get_pid() old_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", old_numastat) consume_vm_mem() new_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", new_numastat) # Only check total memory which is the last element if float(new_numastat[-1]) - float(old_numastat[-1]) < 0: test.fail("Numa memory can't be consumed on guest") # Run managedsave command to check domain xml. if test_managedsave: ret = virsh.managedsave(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) vm.start() vm.wait_for_login().close() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Run save and restore command to check domain xml if test_save_restore: check_save_restore() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Check domain xml after restarting libvirtd if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Detach the memory device unplug_failed_with_known_error = False if detach_device: if not dev_xml: dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model) for x in xrange(at_times): ret = virsh.detach_device(vm_name, dev_xml.xml, flagstr=attach_option) if ret.stderr and host_known_unplug_errors: for known_error in host_known_unplug_errors: if (known_error[0] == known_error[-1]) and \ known_error.startswith(("'")): known_error = known_error[1:-1] if known_error in ret.stderr: unplug_failed_with_known_error = True logging.debug("Known error occured in Host, while" " hot unplug: %s", known_error) if unplug_failed_with_known_error: break try: libvirt.check_exit_status(ret, detach_error) except Exception as detail: dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) try: session = vm.wait_for_login() utils_misc.verify_dmesg(dmesg_log_file=dmesg_file, ignore_result=True, session=session, level_check=5) except Exception: session.close() test.fail("After memory unplug Unable to connect to VM" " or unable to collect dmesg") session.close() if os.path.exists(dmesg_file): with open(dmesg_file, 'r') as f: flag = re.findall( r'memory memory\d+?: Offline failed', f.read()) if not flag: # The attached memory is used by vm, and it could not be unplugged # The result is expected os.remove(dmesg_file) test.fail(detail) unplug_failed_with_known_error = True os.remove(dmesg_file) # Check whether a known error occured or not dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) try: session = vm.wait_for_login() utils_misc.verify_dmesg(dmesg_log_file=dmesg_file, ignore_result=True, session=session, level_check=4) except Exception: session.close() test.fail("After memory unplug Unable to connect to VM" " or unable to collect dmesg") session.close() if guest_known_unplug_errors and os.path.exists(dmesg_file): for known_error in guest_known_unplug_errors: if (known_error[0] == known_error[-1]) and \ known_error.startswith(("'")): known_error = known_error[1:-1] with open(dmesg_file, 'r') as f: if known_error in f.read(): unplug_failed_with_known_error = True logging.debug("Known error occured, while hot unplug" ": %s", known_error) if test_dom_xml and not unplug_failed_with_known_error: check_dom_xml(dt_mem=detach_device) # Remove dmesg temp file if os.path.exists(dmesg_file): os.remove(dmesg_file) finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") if (setup_hugepages_flag == "yes"): restore_hugepages() vmxml_backup.sync()
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) image_format = params.get("snapshot_image_format", "qcow2") snapshot_del_test = "yes" == params.get("snapshot_del_test", "no") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) # Pool variables. snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") lazy_refcounts = "yes" == params.get("lazy_refcounts") options = params.get("snapshot_options", "") # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts supported_pool_list = ["dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster"] if snapshot_with_pool: if pool_type not in supported_pool_list: raise error.TestNAError("%s not in support list %s" % (pool_target, supported_pool_list)) # Do xml backup for final recovery vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") # Skip 'qed' cases for libvirt version greater than 1.1.0 if libvirt_version.version_compare(1, 1, 0): if vol_format == "qed": raise error.TestNAError("QED support changed, check bug: " "https://bugzilla.redhat.com/show_bug.cgi" "?id=731570") # Init snapshot_name snapshot_name = None snapshot_external_disk = [] snapshot_xml_path = None del_status = None image = None pvt = None # Get a tmp dir tmp_dir = data_dir.get_tmp_dir() snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name try: if snapshot_with_pool: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"]) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = pv.list_volumes().keys() if vols: vol_name = vols[0] else: raise error.TestNAError("No volume in pool: %s", pool_name) else: # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["logical", "iscsi", "disk"]: # Use qemu-img to format logical, iscsi and disk block device if vol_format != "raw": cmd = "qemu-img create -f %s %s 10M" % (vol_format, img_path) cmd_result = utils.run(cmd, ignore_status=True) if cmd_result.exit_status: raise error.TestNAError("Failed to format volume, %s" % cmd_result.stdout.strip()) extra = "--persistent --subdriver %s" % vol_format else: # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) extra = "--persistent --subdriver %s" % image_format # Do the attach action. out = utils.run("qemu-img info %s" % img_path) logging.debug("The img info is:\n%s" % out.stdout.strip()) result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestNAError("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Create snapshot. if snapshot_from_xml: snapshot_name = "snapshot_test" lines = ["<domainsnapshot>\n", "<name>%s</name>\n" % snapshot_name, "<description>Snapshot Test</description>\n"] if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snapshot_external_disk.append(memory_external) lines.append("<memory snapshot=\'%s\' file='%s'/>\n" % (snapshot_memory, memory_external)) else: lines.append("<memory snapshot='%s'/>\n" % snapshot_memory) # Add all disks into xml file. disks = vm.get_disk_devices().values() lines.append("<disks>\n") for disk in disks: lines.append("<disk name='%s' snapshot='%s'>\n" % (disk['source'], snapshot_disk)) if snapshot_disk == "external": snap_path = "%s.snap" % os.path.basename(disk['source']) disk_external = os.path.join(tmp_dir, snap_path) snapshot_external_disk.append(disk_external) lines.append("<source file='%s'/>\n" % disk_external) lines.append("</disk>\n") lines.append("</disks>\n") lines.append("</domainsnapshot>") snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() logging.debug("The xml content for snapshot create is:") with open(snapshot_xml_path, 'r') as fin: logging.debug(fin.read()) options += " --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) out_err = snapshot_result.stderr.strip() if snapshot_result.exit_status: if status_error: return else: if re.search("live disk snapshot not supported with this QEMU binary", out_err): raise error.TestNAError(out_err) if libvirt_version.version_compare(1, 2, 5): # As commit d2e668e in 1.2.5, internal active snapshot # without memory state is rejected. Handle it as SKIP # for now. This could be supportted in future by bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1103063 if re.search("internal snapshot of a running VM" + " must include the memory state", out_err): raise error.TestNAError("Check Bug #1083345, %s" % out_err) raise error.TestFail("Failed to create snapshot. Error:%s." % out_err) else: snapshot_result = virsh.snapshot_create(vm_name, options) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: lines = ["<domainsnapshot>\n", "<description>Snapshot Test</description>\n", "<state>running</state>\n", "<creationTime>%s</creationTime>" % snapshot_name, "</domainsnapshot>"] snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() logging.debug("The xml content for snapshot create is:") with open(snapshot_xml_path, 'r') as fin: logging.debug(fin.read()) options += "--redefine %s --current" % snapshot_xml_path if snapshot_result.exit_status: raise error.TestFail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: if not snapshot_del_test: raise error.TestFail("Success to create snapshot in negative" " case\nDetail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path status, output = session.cmd_status_output(echo_cmd) logging.debug("The echo output in domain is: '%s'", output) if status: raise error.TestFail("'%s' run failed with '%s'" % (tmp_file_path, output)) status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("File created with content: '%s'", output) session.close() # Destroy vm for snapshot revert. if not libvirt_version.version_compare(1, 2, 3): virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options, debug=True) if revert_result.exit_status: # As commit d410e6f for libvirt 1.2.3, attempts to revert external # snapshots will FAIL with an error "revert to external snapshot # not supported yet". Thus, let's check for that and handle as a # SKIP for now. Check bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1071264 if libvirt_version.version_compare(1, 2, 3): if re.search("revert to external snapshot not supported yet", revert_result.stderr): raise error.TestNAError(revert_result.stderr.strip()) else: raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): raise error.TestFail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: raise error.TestFail("Revert command successed, but VM is not " "paused after reverting with --paused" " option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("After revert cat file output='%s'", output) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() # Test delete snapshot without "--metadata", delete external disk # snapshot will fail for now. # Only do this when snapshot creat succeed which filtered in cfg file. if snapshot_del_test: if snapshot_name: del_result = virsh.snapshot_delete(vm_name, snapshot_name, debug=True, ignore_status=True) del_status = del_result.exit_status snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name if del_status: if not status_error: raise error.TestFail("Failed to delete snapshot.") else: if not os.path.exists(snap_xml_path): raise error.TestFail("Snapshot xml file %s missing" % snap_xml_path) else: if status_error: err_msg = "Snapshot delete succeed but expect fail." raise error.TestFail(err_msg) else: if os.path.exists(snap_xml_path): raise error.TestFail("Snapshot xml file %s still" % snap_xml_path + " exist") finally: virsh.detach_disk(vm_name, target="vdf", extra="--persistent") if image: image.remove() if del_status and snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata") if snapshot_xml_path: if os.path.exists(snapshot_xml_path): os.unlink(snapshot_xml_path) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except error.TestFail, detail: logging.error(str(detail))
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") image_format = params.get("snapshot_image_format", "qcow2") snapshot_del_test = "yes" == params.get("snapshot_del_test", "no") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no") # Pool variables. snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image", "emulated-image") vol_format = params.get("vol_format") lazy_refcounts = "yes" == params.get("lazy_refcounts") options = params.get("snapshot_options", "") export_options = params.get("export_options", "rw,no_root_squash") # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in list(params.keys()): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts supported_pool_list = ["dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster"] if snapshot_with_pool: if pool_type not in supported_pool_list: test.cancel("%s not in support list %s" % (pool_target, supported_pool_list)) # Do xml backup for final recovery vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") no_memory_snap = "yes" == params.get("no_memory_snap", "no") # Skip 'qed' cases for libvirt version greater than 1.1.0 if libvirt_version.version_compare(1, 1, 0): if vol_format == "qed" or image_format == "qed": test.cancel("QED support changed, check bug: " "https://bugzilla.redhat.com/show_bug.cgi" "?id=731570") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_source_protocol == 'gluster': test.cancel("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") # Init snapshot_name snapshot_name = None snapshot_external_disk = [] snapshot_xml_path = None del_status = None image = None pvt = None # Get a tmp dir snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name try: if replace_vm_disk: utlv.set_vm_disk(vm, params, tmp_dir) if multi_gluster_disks: new_params = params.copy() new_params["pool_name"] = "gluster-pool2" new_params["vol_name"] = "gluster-vol2" new_params["disk_target"] = "vdf" new_params["image_convert"] = 'no' utlv.set_vm_disk(vm, new_params, tmp_dir) if snapshot_with_pool: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], source_name=vol_name, export_options=export_options) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = list(pv.list_volumes().keys()) if vols: vol_name = vols[0] else: test.cancel("No volume in pool: %s" % pool_name) else: # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: test.cancel("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["logical", "iscsi", "disk"]: # Use qemu-img to format logical, iscsi and disk block device if vol_format != "raw": cmd = "qemu-img create -f %s %s 10M" % (vol_format, img_path) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.cancel("Failed to format volume, %s" % cmd_result.stdout_text.strip()) extra = "--persistent --subdriver %s" % vol_format else: # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) extra = "--persistent --subdriver %s" % image_format if not multi_gluster_disks: # Do the attach action. out = process.run("qemu-img info %s" % img_path, shell=True) logging.debug("The img info is:\n%s" % out.stdout.strip()) result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: test.cancel("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Create snapshot. if snapshot_from_xml: snap_xml = libvirt_xml.SnapshotXML() snapshot_name = "snapshot_test" snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test" if not no_memory_snap: if "--disk-only" not in options: if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snap_xml.mem_snap_type = snapshot_memory snap_xml.mem_file = memory_external snapshot_external_disk.append(memory_external) else: snap_xml.mem_snap_type = snapshot_memory # Add all disks into xml file. vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') # Remove non-storage disk such as 'cdrom' for disk in disks: if disk.device != 'disk': disks.remove(disk) new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = snapshot_disk disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr if snapshot_disk == 'external': new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: new_file = "%s.snap" % disk_xml.source.attrs['file'] snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif 'name' in disk_xml.source.attrs: new_name = "%s.snap" % disk_xml.source.attrs['name'] new_attrs.update({'name': new_name}) hosts = disk_xml.source.hosts elif ('dev' in disk_xml.source.attrs and disk_xml.type_name == 'block'): # Use local file as external snapshot target for block type. # As block device will be treat as raw format by default, # it's not fit for external disk snapshot target. A work # around solution is use qemu-img again with the target. disk_xml.type_name = 'file' del new_attrs['dev'] new_file = "%s/blk_src_file.snap" % tmp_dir snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) else: del disk_xml.source new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options += " --xmlfile %s " % snapshot_xml_path if vm_state == "shut off": vm.destroy(gracefully=False) snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) out_err = snapshot_result.stderr.strip() if snapshot_result.exit_status: if status_error: return else: if re.search("live disk snapshot not supported with this " "QEMU binary", out_err): test.cancel(out_err) if libvirt_version.version_compare(1, 2, 5): # As commit d2e668e in 1.2.5, internal active snapshot # without memory state is rejected. Handle it as SKIP # for now. This could be supportted in future by bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1103063 if re.search("internal snapshot of a running VM" + " must include the memory state", out_err): test.cancel("Check Bug #1083345, %s" % out_err) test.fail("Failed to create snapshot. Error:%s." % out_err) else: snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if status_error: return else: test.fail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: snap_xml = libvirt_xml.SnapshotXML() new_snap = snap_xml.new_from_snapshot_dumpxml(vm_name, snapshot_name) # update an element new_snap.creation_time = snapshot_name snapshot_xml_path = new_snap.xml options += "--redefine %s --current" % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: test.fail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: if not snapshot_del_test: test.fail("Success to create snapshot in negative" " case\nDetail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path status, output = session.cmd_status_output(echo_cmd) logging.debug("The echo output in domain is: '%s'", output) if status: test.fail("'%s' run failed with '%s'" % (tmp_file_path, output)) status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("File created with content: '%s'", output) session.close() # As only internal snapshot revert works now, let's only do revert # with internal, and move the all skip external cases back to pass. # After external also supported, just move the following code back. if snapshot_disk == 'internal': # Destroy vm for snapshot revert. if not libvirt_version.version_compare(1, 2, 3): virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options, debug=True) if revert_result.exit_status: # Attempts to revert external snapshots will FAIL with an error # "revert to external disk snapshot not supported yet" or "revert # to external snapshot not supported yet" since d410e6f. Thus, # let's check for that and handle as a SKIP for now. Check bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1071264 if re.search("revert to external \w* ?snapshot not supported yet", revert_result.stderr): test.cancel(revert_result.stderr.strip()) else: test.fail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): test.fail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: test.fail("Revert command successed, but VM is not " "paused after reverting with --paused" " option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("After revert cat file output='%s'", output) if not status: test.fail("Tmp file exists, revert failed.") # Close the session. session.close() # Test delete snapshot without "--metadata", delete external disk # snapshot will fail for now. # Only do this when snapshot creat succeed which filtered in cfg file. if snapshot_del_test: if snapshot_name: del_result = virsh.snapshot_delete(vm_name, snapshot_name, debug=True, ignore_status=True) del_status = del_result.exit_status snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name if del_status: if not status_error: test.fail("Failed to delete snapshot.") else: if not os.path.exists(snap_xml_path): test.fail("Snapshot xml file %s missing" % snap_xml_path) else: if status_error: err_msg = "Snapshot delete succeed but expect fail." test.fail(err_msg) else: if os.path.exists(snap_xml_path): test.fail("Snapshot xml file %s still" % snap_xml_path + " exist") finally: if vm.is_alive(): vm.destroy(gracefully=False) virsh.detach_disk(vm_name, target="vdf", extra="--persistent") if image: image.remove() if del_status and snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata") libvirtd = utils_libvirtd.Libvirtd() if disk_source_protocol == 'gluster': utlv.setup_or_cleanup_gluster(False, vol_name, brick_path) if multi_gluster_disks: brick_path = os.path.join(tmp_dir, "gluster-pool2") utlv.setup_or_cleanup_gluster(False, "gluster-vol2", brick_path) libvirtd.restart() if snapshot_xml_path: if os.path.exists(snapshot_xml_path): os.unlink(snapshot_xml_path) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, source_name=vol_name) except exceptions.TestFail as detail: libvirtd.restart() logging.error(str(detail))
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Global variable to store max/current memory, # it may change after attach/detach new_max_mem = None new_cur_mem = None def get_vm_memtotal(session): """ Get guest total memory """ proc_meminfo = session.cmd_output("cat /proc/meminfo") # verify format and units are expected return int(re.search(r'MemTotal:\s+(\d+)\s+[kK]B', proc_meminfo).group(1)) def consume_vm_mem(size=1000, timeout=360): """ To consume guest memory, default size is 1000M """ session = vm.wait_for_login() # Mount tmpfs on /mnt and write to a file on it, # it is the memory operation sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs " "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M" " count={0}".format(size)) session.cmd(sh_cmd, timeout=timeout) session.close() def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if max_mem_rt: cmd += (" | grep 'slots=%s,maxmem=%sk'" % (max_mem_slots, max_mem_rt)) if tg_size: size = int(tg_size) * 1024 cmd += (" | grep 'memory-backend-ram,id=memdimm0,size=%s" % size) if pg_size: cmd += ",host-nodes=%s" % node_mask if numa_memnode: for node in numa_memnode: if ('nodeset' in node and node['nodeset'] in node_mask): cmd += ",policy=%s" % node['mode'] cmd += ".*pc-dimm,node=%s" % tg_node if mem_addr: cmd += (".*slot=%s,addr=%s" % (mem_addr['slot'], int(mem_addr['base'], 16))) cmd += "'" # Run the command utils.run(cmd) def check_guest_meminfo(old_mem): """ Check meminfo on guest. """ assert old_mem is not None session = vm.wait_for_login() # Hot-plugged memory should be online by udev rules udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules" udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",' ' ATTR{state}=="offline", ATTR{state}="online"') cmd = ("grep memory %s || echo '%s' >> %s" % (udev_file, udev_rules, udev_file)) session.cmd(cmd) # Wait a while for new memory to be detected. utils_misc.wait_for( lambda: get_vm_memtotal(session) != int(old_mem), 5) new_mem = get_vm_memtotal(session) session.close() logging.debug("Memtotal on guest: %s", new_mem) if new_mem != int(old_mem) + int(tg_size): raise error.TestFail("Total memory on guest couldn't" " changed after attach memory " "device") def check_dom_xml(at_mem=False, dt_mem=False): """ Check domain xml options. """ # Global variable to store max/current memory global new_max_mem global new_cur_mem if attach_option.count("config"): dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) else: dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) try: xml_max_mem_rt = int(dom_xml.max_mem_rt) xml_max_mem = int(dom_xml.max_mem) xml_cur_mem = int(dom_xml.current_mem) assert int(max_mem_rt) == xml_max_mem_rt # Check attached/detached memory if at_mem: assert int(max_mem) + int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory #assert int(cur_mem) + int(tg_size) == xml_cur_mem new_max_mem = xml_max_mem new_cur_mem = xml_cur_mem mem_dev = dom_xml.get_devices("memory") if len(mem_dev) != 1: raise error.TestFail("Found wrong number of" " memory device") assert int(tg_size) == int(mem_dev[0].target.size) assert int(tg_node) == int(mem_dev[0].target.node) elif dt_mem: assert int(new_max_mem) - int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory #assert int(new_cur_mem) - int(tg_size) == xml_cur_mem except AssertionError: utils.log_last_traceback() raise error.TestFail("Found unmatched memory setting" " from domain xml") def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(test.tmpdir, "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def create_mem_xml(): """ Create memory device xml. """ mem_xml = memory.Memory() mem_model = params.get("mem_model", "dimm") mem_xml.mem_model = mem_model if tg_size: tg_xml = memory.Memory.Target() tg_xml.size = int(tg_size) tg_xml.size_unit = tg_sizeunit tg_xml.node = int(tg_node) mem_xml.target = tg_xml if pg_size: src_xml = memory.Memory.Source() src_xml.pagesize = int(pg_size) src_xml.pagesize_unit = pg_unit src_xml.nodemask = node_mask mem_xml.source = src_xml if mem_addr: mem_xml.address = mem_xml.new_mem_address( **{"attrs": mem_addr}) logging.debug("Memory device xml: %s", mem_xml) return mem_xml.copy() def add_device(dev_xml, at_error=False): """ Add memory device by attachment or modify domain xml. """ if attach_device: ret = virsh.attach_device(vm_name, dev_xml.xml, flagstr=attach_option) libvirt.check_exit_status(ret, at_error) else: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if numa_cells: del vmxml.max_mem del vmxml.current_mem vmxml.add_device(dev_xml) vmxml.sync() def modify_domain_xml(): """ Modify domain xml and define it. """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) mem_unit = params.get("mem_unit", "KiB") vcpu = params.get("vcpu", "4") if max_mem_rt: vmxml.max_mem_rt = int(max_mem_rt) vmxml.max_mem_rt_slots = max_mem_slots vmxml.max_mem_rt_unit = mem_unit if vcpu: vmxml.vcpu = int(vcpu) vcpu_placement = params.get("vcpu_placement", "static") vmxml.placement = vcpu_placement if numa_memnode: vmxml.numa_memory = {} vmxml.numa_memnode = numa_memnode else: try: del vmxml.numa_memory del vmxml.numa_memnode except: # Not exists pass if numa_cells: cells = [ast.literal_eval(x) for x in numa_cells] cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu><numa/></cpu>" cpu_mode = params.get("cpu_mode") model_fallback = params.get("model_fallback") if cpu_mode: cpu_xml.mode = cpu_mode if model_fallback: cpu_xml.fallback = model_fallback cpu_xml.numa_cell = cells vmxml.cpu = cpu_xml # Delete memory and currentMemory tag, # libvirt will fill it automatically del vmxml.max_mem del vmxml.current_mem # hugepages setting if huge_pages: membacking = vm_xml.VMMemBackingXML() hugepages = vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(huge_pages)): pagexml = hugepages.PageXML() pagexml.update(huge_pages[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug("vm xml: %s", vmxml) vmxml.sync() pre_vm_state = params.get("pre_vm_state", "running") attach_device = "yes" == params.get("attach_device", "no") detach_device = "yes" == params.get("detach_device", "no") attach_error = "yes" == params.get("attach_error", "no") start_error = "yes" == params.get("start_error", "no") detach_error = "yes" == params.get("detach_error", "no") maxmem_error = "yes" == params.get("maxmem_error", "no") attach_option = params.get("attach_option", "") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_managedsave = "yes" == params.get("test_managedsave", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_mem_binding = "yes" == params.get("test_mem_binding", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") add_mem_device = "yes" == params.get("add_mem_device", "no") test_dom_xml = "yes" == params.get("test_dom_xml", "no") max_mem = params.get("max_mem") max_mem_rt = params.get("max_mem_rt") max_mem_slots = params.get("max_mem_slots", "16") #cur_mem = params.get("current_mem") numa_cells = params.get("numa_cells", "").split() set_max_mem = params.get("set_max_mem") # params for attached device tg_size = params.get("tg_size") tg_sizeunit = params.get("tg_sizeunit", 'KiB') tg_node = params.get("tg_node", 0) pg_size = params.get("page_size") pg_unit = params.get("page_unit", "KiB") node_mask = params.get("node_mask", "0") mem_addr = ast.literal_eval(params.get("memory_addr", "{}")) huge_pages = [ast.literal_eval(x) for x in params.get("huge_pages", "").split()] numa_memnode = [ast.literal_eval(x) for x in params.get("numa_memnode", "").split()] # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Drop caches first for host has enough memory drop_caches() # Destroy domain first if vm.is_alive(): vm.destroy(gracefully=False) modify_domain_xml() # Start the domain any way if attach memory device old_mem_total = None if attach_device: vm.start() session = vm.wait_for_login() old_mem_total = get_vm_memtotal(session) logging.debug("Memtotal on guest: %s", old_mem_total) session.close() dev_xml = None # To attach the memory device. if add_mem_device: at_times = int(params.get("attach_times", 1)) dev_xml = create_mem_xml() for x in xrange(at_times): # If any error excepted, command error status should be # checked in the last time if x == at_times - 1: add_device(dev_xml, attach_error) else: add_device(dev_xml) # Check domain xml after attach device. if test_dom_xml: check_dom_xml(at_mem=attach_device) # Set domain state if pre_vm_state == "transient": logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() raise error.TestFail("Cann't create the domain") elif vm.is_dead(): try: vm.start() vm.wait_for_login().close() except virt_vm.VMStartError: if start_error: pass else: raise error.TestFail("VM Failed to start" " for some reason!") # Set memory operation if set_max_mem: max_mem_option = params.get("max_mem_option", "") ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option) libvirt.check_exit_status(ret, maxmem_error) # Check domain xml after start the domain. if test_dom_xml: check_dom_xml(at_mem=attach_device) # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check guest meminfo after attachment if (attach_device and not attach_option.count("config") and not any([attach_error, start_error])): check_guest_meminfo(old_mem_total) # Consuming memory on guest, # to verify memory changes by numastat if test_mem_binding: pid = vm.get_pid() old_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", old_numastat) consume_vm_mem() new_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", new_numastat) # Only check total memory which is the last element if float(new_numastat[-1]) - float(old_numastat[-1]) < 0: raise error.TestFail("Numa memory can't be consumed" " on guest") # Run managedsave command to check domain xml. if test_managedsave: ret = virsh.managedsave(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) vm.start() vm.wait_for_login().close() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Run save and restore command to check domain xml if test_save_restore: check_save_restore() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Check domain xml after restarting libvirtd if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Detach the memory device if detach_device: if not dev_xml: dev_xml = create_mem_xml() ret = virsh.detach_device(vm_name, dev_xml.xml, flagstr=attach_option) libvirt.check_exit_status(ret, detach_error) if test_dom_xml: check_dom_xml(dt_mem=detach_device) finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync()
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" + " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" + " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, chap_user=chap_user, chap_passwd=chap_passwd) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get volume name") # Create iscsi network disk XML disk_params = { 'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly } disk_params_src = {} if disk_type == "network": disk_params_src = { 'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/1", 'source_host_name': disk_src_host, 'source_host_port': disk_src_port } elif disk_type == "volume": disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'source_mode': disk_src_mode } else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = { 'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target } disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") disk_xml_f = open(disk_xml) disk_xml_content = disk_xml_f.read() disk_xml_f.close() logging.debug("Attach disk by XML: %s", disk_xml_content) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2, **virsh_dargs) libvirt.check_exit_status(cmd_result) pass else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def run(test, params, env): """ Test for virt-xml-validate """ # Get the full path of virt-xml-validate command. try: VIRT_XML_VALIDATE = os_dep.command("virt-xml-validate") except ValueError: raise error.TestNAError("Not find virt-xml-validate command on host.") vm_name = params.get("main_vm", "virt-tests-vm1") net_name = params.get("net_dumpxml_name", "default") pool_name = params.get("pool_dumpxml_name", "default") schema = params.get("schema", "domain") output = params.get("output_file", "output") output_path = os.path.join(data_dir.get_tmp_dir(), output) valid_schemas = [ 'domain', 'domainsnapshot', 'network', 'storagepool', 'storagevol', 'nodedev', 'capability', 'nwfilter', 'secret', 'interface' ] if schema not in valid_schemas: raise error.TestFail("invalid %s specified" % schema) virsh_dargs = {'ignore_status': True, 'debug': True} if schema == "domainsnapshot": domainsnapshot_validate(vm_name, file=output_path, **virsh_dargs) elif schema == "network": network_validate(net_name, file=output_path, **virsh_dargs) elif schema == "storagepool": storagepool_validate(pool_name, file=output_path, **virsh_dargs) elif schema == "storagevol": storagevol_validate(pool_name, file=output_path, **virsh_dargs) elif schema == "nodedev": nodedev_validate(file=output_path, **virsh_dargs) elif schema == "capability": capability_validate(file=output_path, **virsh_dargs) elif schema == "nwfilter": nwfilter_validate(file=output_path, **virsh_dargs) elif schema == "secret": secret_validate(file=output_path, **virsh_dargs) elif schema == "interface": interface_validate(file=output_path, **virsh_dargs) else: # domain virsh.dumpxml(vm_name, to_file=output_path) cmd = "%s %s %s" % (VIRT_XML_VALIDATE, output_path, schema) cmd_result = utils.run(cmd, ignore_status=True) # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snapshot in snapshot_lists: virsh.snapshot_delete(vm_name, snapshot, "--metadata") if cmd_result.exit_status: raise error.TestFail("virt-xml-validate command failed.\n" "Detail: %s." % cmd_result) if cmd_result.stdout.count("fail"): raise error.TestFail("xml fails to validate\n" "Detail: %s." % cmd_result)
def run_virsh_snapshot_disk(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) image_format = params.get("snapshot_image_format", "qcow2") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) # Get a tmp_dir. tmp_dir = test.tmpdir # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) # Do the attach action. virsh.attach_disk(vm_name, source=img_path, target="vdf", extra="--persistent --subdriver %s" % image_format) # Init snapshot_name snapshot_name = None try: # Create snapshot. if snapshot_from_xml: snapshot_name = "snapshot_test" lines = [ "<domainsnapshot>\n", "<name>%s</name>\n" % snapshot_name, "<description>Snapshot Test</description>\n", "<memory snapshot=\'internal\'/>\n", "</domainsnapshot>" ] snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() snapshot_result = virsh.snapshot_create( vm_name, ("--xmlfile %s" % snapshot_xml_path)) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail( "Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) else: snapshot_result = virsh.snapshot_create(vm_name) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail( "Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) # Touch a file in VM. session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() status, output = session.cmd_status_output("touch %s" % tmp_file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) session.close() # Destroy vm for snapshot revert. virsh.destroy(vm_name) # Revert snapshot. revert_result = virsh.snapshot_revert(vm_name, snapshot_name) if revert_result.exit_status: raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if not vm.is_alive(): raise error.TestFail("Revert snapshot failed.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() finally: virsh.detach_disk(vm_name, target="vdf", extra="--persistent") image.remove() if snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocal = params.get("disk_source_protocal", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" + " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocal secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, chap_user=chap_user, chap_passwd=chap_passwd) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.hostname = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_host poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get volume name") # Create iscsi network disk XML disk_params = {'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly} disk_params_src = {} if disk_type == "network": disk_params_src = {'source_protocol': disk_src_protocal, 'source_name': iscsi_target + "/1", 'source_host_name': disk_src_host, 'source_host_port': disk_src_port} elif disk_type == "volume": disk_params_src = {'source_pool': disk_src_pool, 'source_volume': vol_name, 'source_mode': disk_src_mode} else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = {'auth_user': chap_user, 'secret_type': disk_src_protocal, 'secret_usage': secret_xml.target} disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") # Attach the iscsi network disk to domain logging.debug("Attach disk by XML: %s", open(disk_xml).read()) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstrs=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): vm.start() cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2, **virsh_dargs) libvirt.check_exit_status(cmd_result) pass else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml") def config_ceph(): """ Write the configs to the file. """ src_host = disk_src_host.split() src_port = disk_src_port.split() conf_str = "mon_host = " hosts = [] for host, port in zip(src_host, src_port): hosts.append("%s:%s" % (host, port)) with open(disk_src_config, 'w') as f: f.write(conf_str + ','.join(hosts) + '\n') def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip( disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool(pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err) def create_vol(vol_params): """ Create volume. :param p_name. Pool name. :param vol_params. Volume parameters dict. :return: True if create successfully. """ pvt = libvirt.PoolVolumeTest(test, params) if create_by_xml: pvt.pre_vol_by_xml(pool_name, **vol_params) else: pvt.pre_vol(vol_name, None, '2G', None, pool_name) def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) process.run(cmd, shell=True) if disk_src_name: cmd += " | grep file=rbd:%s:" % disk_src_name if auth_user and auth_key: cmd += ('id=%s:auth_supported=cephx' % auth_user) if disk_src_config: cmd += " | grep 'conf=%s'" % disk_src_config elif mon_host: hosts = '\:6789\;'.join(mon_host.split()) cmd += " | grep 'mon_host=%s'" % hosts if driver_iothread: cmd += " | grep iothread%s" % driver_iothread # Run the command process.run(cmd, shell=True) def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev] xml_dom_exp = ["source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot or test_disk_readonly: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml") def check_blockcopy(target): """ Block copy operation test. """ blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd") if os.path.exists(blk_file): os.remove(blk_file) blk_mirror = ("mirror type='file' file='%s' " "format='raw' job='copy'" % blk_file) # Do blockcopy ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_result(ret, skip_if=unsupported_err) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count(blk_mirror): test.fail("Can't see block job in domain xml") # Abort ret = virsh.blockjob(vm_name, target, "--abort") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if dom_xml.count(blk_mirror): test.fail("Failed to abort block job") if os.path.exists(blk_file): os.remove(blk_file) # Sleep for a while after abort operation. time.sleep(5) # Do blockcopy again ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_exit_status(ret) # Wait for complete def wait_func(): ret = virsh.blockjob(vm_name, target, "--info") return ret.stderr.count("Block Copy: [100 %]") timeout = params.get("blockjob_timeout", 600) utils_misc.wait_for(wait_func, int(timeout)) # Pivot ret = virsh.blockjob(vm_name, target, "--pivot") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count("source file='%s'" % blk_file): test.fail("Failed to pivot block job") # Remove the disk file. if os.path.exists(blk_file): os.remove(blk_file) def check_in_vm(vm_obj, target, old_parts, read_only=False): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm_obj.wait_for_login() new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;" " touch /mnt/testfile; umount /mnt)" .format(added_part)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info("Check disk operation in VM:\n, %s, %s", s, o) # Readonly fs, check the error messages. # The command may return True, read-only # messges can be found from the command output if read_only: if "Read-only file system" not in o: return False else: return True # Other errors if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def clean_up_volume_snapshots(): """ Get all snapshots for rbd_vol.img volume,unprotect and then clean up them. """ cmd = ("rbd -m {0} {1} info {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) if process.run(cmd, ignore_status=True, shell=True).exit_status: return # Get snapshot list. cmd = ("rbd -m {0} {1} snap" " list {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text snap_names = [] if snaps_out: for line in snaps_out.rsplit("\n"): if line.startswith("SNAPID") or line == "": continue snap_line = line.rsplit() if len(snap_line) == 4: snap_names.append(snap_line[1]) logging.debug("Find snapshots: %s", snap_names) # Unprotect snapshot first,otherwise it will fail to purge volume for snap_name in snap_names: cmd = ("rbd -m {0} {1} snap" " unprotect {2}@{3}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name)) process.run(cmd, ignore_status=True, shell=True) # Purge volume,and then delete volume. cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) process.run(cmd, ignore_status=True, shell=True) def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)} for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = results_stdout_52lts(secret_list_result).strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_config = params.get("disk_source_config") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") disk_src_pool = params.get("disk_source_pool") disk_format = params.get("disk_format", "raw") driver_iothread = params.get("driver_iothread") snap_name = params.get("disk_snap_name") attach_device = "yes" == params.get("attach_device", "no") attach_disk = "yes" == params.get("attach_disk", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") test_blockcopy = "yes" == params.get("test_blockcopy", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_vm_parts = "yes" == params.get("test_vm_parts", "no") additional_guest = "yes" == params.get("additional_guest", "no") create_snapshot = "yes" == params.get("create_snapshot", "no") convert_image = "yes" == params.get("convert_image", "no") create_volume = "yes" == params.get("create_volume", "no") create_by_xml = "yes" == params.get("create_by_xml", "no") client_key = params.get("client_key") client_name = params.get("client_name") auth_key = params.get("auth_key") auth_user = params.get("auth_user") auth_type = params.get("auth_type") auth_usage = params.get("secret_usage") pool_name = params.get("pool_name") pool_type = params.get("pool_type") vol_name = params.get("vol_name") cloned_vol_name = params.get("cloned_volume", "cloned_test_volume") create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume") vol_cap = params.get("vol_cap") vol_cap_unit = params.get("vol_cap_unit") start_vm = "yes" == params.get("start_vm", "no") test_disk_readonly = "yes" == params.get("test_disk_readonly", "no") test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no") test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no") disk_snapshot_with_sanlock = "yes" == params.get("disk_internal_with_sanlock", "no") # Create /etc/ceph/ceph.conf file to suppress false warning error message. process.run("mkdir -p /etc/ceph", ignore_status=True, shell=True) cmd = ("echo 'mon_host = {0}' >/etc/ceph/ceph.conf" .format(mon_host)) process.run(cmd, ignore_status=True, shell=True) # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) if additional_guest: guest_name = "%s_%s" % (vm_name, '1') timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout, ignore_status=False) additional_vm = vm.clone(guest_name) if start_vm: virsh.start(guest_name) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) key_opt = "" secret_uuid = None snapshot_path = None key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) front_end_img_file = os.path.join(data_dir.get_tmp_dir(), "%s_frontend_test.img" % vm_name) # Construct a unsupported error message list to skip these kind of tests unsupported_err = [] if driver_iothread: unsupported_err.append('IOThreads not supported') if test_snapshot: unsupported_err.append('live disk snapshot not supported') if test_disk_readonly: if not libvirt_version.version_compare(5, 0, 0): unsupported_err.append('Could not create file: Permission denied') unsupported_err.append('Permission denied') else: unsupported_err.append('unsupported configuration: external snapshot ' + 'for readonly disk vdb is not supported') if test_disk_internal_snapshot: unsupported_err.append('unsupported configuration: internal snapshot for disk ' + 'vdb unsupported for storage type raw') if test_blockcopy: unsupported_err.append('block copy is not supported') if attach_disk: unsupported_err.append('No such file or directory') if create_volume: unsupported_err.append("backing 'volume' disks isn't yet supported") unsupported_err.append('this function is not supported') try: # Clean up dirty secrets in test environments if there have. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Prepare test environment. qemu_config = LibvirtQemuConfig() if disk_snapshot_with_sanlock: # Install necessary package:sanlock,libvirt-lock-sanlock if not utils_package.package_install(["sanlock"]): test.error("fail to install sanlock") if not utils_package.package_install(["libvirt-lock-sanlock"]): test.error("fail to install libvirt-lock-sanlock") # Set virt_use_sanlock result = process.run("setsebool -P virt_use_sanlock 1", shell=True) if result.exit_status: test.error("Failed to set virt_use_sanlock value") # Update lock_manager in qemu.conf qemu_config.lock_manager = 'sanlock' # Update qemu-sanlock.conf. san_lock_config = LibvirtSanLockConfig() san_lock_config.user = '******' san_lock_config.group = 'sanlock' san_lock_config.host_id = 1 san_lock_config.auto_disk_leases = True process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True) san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock" san_lock_config.require_lease_for_disks = False # Start sanlock service and restart libvirtd to enforce changes. result = process.run("systemctl start wdmd", shell=True) if result.exit_status: test.error("Failed to start wdmd service") result = process.run("systemctl start sanlock", shell=True) if result.exit_status: test.error("Failed to start sanlock service") utils_libvirtd.Libvirtd().restart() # Prepare lockspace and lease file for sanlock in order. sanlock_cmd_dict = OrderedDict() sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS" sanlock_cmd_dict["sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0" sanlock_cmd_dict["chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS" sanlock_cmd_dict["restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock" sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock" sanlock_cmd_dict["sanlock direct init -r TEST_LS:test-disk-resource-lock:" + "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock" sanlock_cmd_dict["chown sanlock:sanlock " + "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc" sanlock_cmd_dict["sanlock client add_lockspace -s TEST_LS:1:" + "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0" for sanlock_cmd in sanlock_cmd_dict.keys(): result = process.run(sanlock_cmd, shell=True) if result.exit_status: test.error(sanlock_cmd_dict[sanlock_cmd]) # Create one lease device and add it to VM. san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) lease_device = Lease() lease_device.lockspace = 'TEST_LS' lease_device.key = 'test-disk-resource-lock' lease_device.target = {'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'} san_lock_vmxml.add_device(lease_device) san_lock_vmxml.sync() # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb") # To be compatible with create_disk_xml function, # some parameters need to be updated. params.update({ "type_name": params.get("disk_type", "network"), "target_bus": params.get("disk_target_bus"), "target_dev": targetdev, "secret_uuid": secret_uuid, "source_protocol": params.get("disk_source_protocol"), "source_name": disk_src_name, "source_host_name": disk_src_host, "source_host_port": disk_src_port}) # Prepare disk image if convert_image: first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) elif create_volume: vol_params = {"name": vol_name, "capacity": int(vol_cap), "capacity_unit": vol_cap_unit, "format": disk_format} create_pool() create_vol(vol_params) check_vol(vol_params) else: # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" % (disk_format, img_file, img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) # Create disk snapshot if needed. if create_snapshot: snap_cmd = ("rbd -m %s %s snap create %s@%s" % (mon_host, key_opt, disk_src_name, snap_name)) process.run(snap_cmd, ignore_status=False, shell=True) if test_json_pseudo_protocol: # Create one frontend image with the rbd backing file. json_str = ('json:{"file.driver":"rbd",' '"file.filename":"rbd:%s:mon_host=%s"}' % (disk_src_name, mon_host)) # pass different json string according to the auth config if auth_user and auth_key: json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key)) disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" % (json_str, front_end_img_file)) disk_path = front_end_img_file process.run(disk_cmd, ignore_status=False, shell=True) # If hot plug, start VM first, and then wait the OS boot. # Otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() if attach_device: if create_volume: params.update({"source_pool": pool_name}) params.update({"type_name": "volume"}) # No need auth options for volume if "auth_user" in params: params.pop("auth_user") if "auth_type" in params: params.pop("auth_type") if "secret_type" in params: params.pop("secret_type") if "secret_uuid" in params: params.pop("secret_uuid") if "secret_usage" in params: params.pop("secret_usage") xml_file = libvirt.create_disk_xml(params) if additional_guest: # Copy xml_file for additional guest VM. shutil.copyfile(xml_file, additional_xml_file) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) if additional_guest: # Make sure the additional VM is running if additional_vm.is_dead(): additional_vm.start() additional_vm.wait_for_login().close() ret = virsh.attach_device(guest_name, additional_xml_file, "", debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif attach_disk: opts = params.get("attach_option", "") ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_readonly: params.update({'readonly': "yes"}) xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_internal_snapshot: xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif disk_snapshot_with_sanlock: if vm.is_dead(): vm.start() snapshot_path = make_snapshot() if vm.is_alive(): vm.destroy() elif not create_volume: libvirt.set_vm_disk(vm, params) if test_blockcopy: logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): vm.start() # Wait for vm is running vm.wait_for_login(timeout=600).close() if additional_guest: if additional_vm.is_dead(): additional_vm.start() # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check partitions in vm if test_vm_parts: if not check_in_vm(vm, targetdev, old_parts, read_only=create_snapshot): test.fail("Failed to check vm partitions") if additional_guest: if not check_in_vm(additional_vm, targetdev, old_parts): test.fail("Failed to check vm partitions") # Save and restore operation if test_save_restore: check_save_restore() if test_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option) if test_blockcopy: check_blockcopy(targetdev) if test_disk_readonly: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, 'vdb') if test_disk_internal_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, targetdev) # Detach the device. if attach_device: xml_file = libvirt.create_disk_xml(params) ret = virsh.detach_device(vm_name, xml_file) libvirt.check_exit_status(ret) if additional_guest: ret = virsh.detach_device(guest_name, xml_file) libvirt.check_exit_status(ret) elif attach_disk: ret = virsh.detach_disk(vm_name, targetdev) libvirt.check_exit_status(ret) # Check disk in vm after detachment. if attach_device or attach_disk: session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) if len(new_parts) != len(old_parts): test.fail("Disk still exists in vm" " after detachment") session.close() except virt_vm.VMStartError as details: for msg in unsupported_err: if msg in str(details): test.cancel(str(details)) else: test.fail("VM failed to start." "Error: %s" % str(details)) finally: # Remove /etc/ceph/ceph.conf file if exists. if os.path.exists('/etc/ceph/ceph.conf'): os.remove('/etc/ceph/ceph.conf') # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) if additional_guest: virsh.remove_domain(guest_name, "--remove-all-storage", ignore_stauts=True) # Remove the snapshot. if create_snapshot: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) elif create_volume: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name))) process.run(cmd, ignore_status=True, shell=True) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume))) process.run(cmd, ignore_status=True, shell=True) clean_up_volume_snapshots() else: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Delete tmp files. if os.path.exists(key_file): os.remove(key_file) if os.path.exists(img_file): os.remove(img_file) # Clean up volume, pool if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout): virsh.vol_delete(vol_name, pool_name) if pool_name and pool_name in virsh.pool_state_dict(): virsh.pool_destroy(pool_name, **virsh_dargs) virsh.pool_undefine(pool_name, **virsh_dargs) # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) logging.info("Restoring vm...") vmxml_backup.sync() if disk_snapshot_with_sanlock: # Restore virt_use_sanlock setting. process.run("setsebool -P virt_use_sanlock 0", shell=True) # Restore qemu config qemu_config.restore() utils_libvirtd.Libvirtd().restart() # Force shutdown sanlock service. process.run("sanlock client shutdown -f 1", shell=True) # Clean up lockspace folder process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True) if snapshot_path is not None: for snapshot in snapshot_path: if os.path.exists(snapshot): os.remove(snapshot)
def run(test, params, env): """ Test rng device options. 1.Prepare test environment, destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def check_rng_xml(xml_set, exists=True): """ Check rng xml in/not in domain xml :param xml_set: rng xml object for setting :param exists: Check xml exists or not in domain xml :return: boolean """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) # Get all current xml rng devices xml_devices = vmxml.devices rng_devices = xml_devices.by_device_tag("rng") logging.debug("rng_devices is %s", rng_devices) # check if xml attr same with checking try: rng_index = xml_devices.index(rng_devices[0]) xml_get = xml_devices[rng_index] if not exists: # should be detach device check return False except IndexError: if exists: # should be attach device check return False else: logging.info("Can not find rng xml as expected") return True def get_compare_values(xml_set, xml_get, rng_attr): """ Get set and get value to compare :param xml_set: seting xml object :param xml_get: getting xml object :param rng_attr: attribute of rng device :return: set and get value in xml """ try: set_value = xml_set[rng_attr] except xcepts.LibvirtXMLNotFoundError: set_value = None try: get_value = xml_get[rng_attr] except xcepts.LibvirtXMLNotFoundError: get_value = None logging.debug("get xml_set value(%s) is %s, get xml_get value is %s", rng_attr, set_value, get_value) return (set_value, get_value) match = True for rng_attr in xml_set.__slots__: set_value, get_value = get_compare_values(xml_set, xml_get, rng_attr) logging.debug("rng_attr=%s, set_value=%s, get_value=%s", rng_attr, set_value, get_value) if set_value and set_value != get_value: if rng_attr == 'backend': for bak_attr in xml_set.backend.__slots__: set_backend, get_backend = get_compare_values(xml_set.backend, xml_get.backend, bak_attr) if set_backend and set_backend != get_backend: if bak_attr == 'source': set_source = xml_set.backend.source get_source = xml_get.backend.source find = False for i in range(len(set_source)): for j in get_source: if set(set_source[i].items()).issubset(j.items()): find = True break if not find: logging.debug("set source(%s) not in get source(%s)", set_source[i], get_source) match = False break else: continue else: logging.debug("set backend(%s)- %s not equal to get backend-%s", rng_attr, set_backend, get_backend) match = False break else: continue if not match: break else: logging.debug("set value(%s)-%s not equal to get value-%s", rng_attr, set_value, get_value) match = False break else: continue if not match: break if match: logging.info("Find same rng xml as hotpluged") else: test.fail("Rng xml in VM not same with attached xml") return True def modify_rng_xml(dparams, sync=True, get_xml=False): """ Modify interface xml options :params dparams: parameters for organize xml :params sync: whether sync to domain xml, if get_xml is True, then sync will not take effect :params get_xml: whether get device xml :return: if get_xml=True, return xml file """ rng_model = dparams.get("rng_model", "virtio") rng_rate = dparams.get("rng_rate") backend_model = dparams.get("backend_model", "random") backend_type = dparams.get("backend_type") backend_dev = dparams.get("backend_dev", "") backend_source_list = dparams.get("backend_source", "").split() backend_protocol = dparams.get("backend_protocol") rng_alias = dparams.get("rng_alias") vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) rng_xml = rng.Rng() rng_xml.rng_model = rng_model if rng_rate: rng_xml.rate = ast.literal_eval(rng_rate) backend = rng.Rng.Backend() backend.backend_model = backend_model if backend_type: backend.backend_type = backend_type if backend_dev: backend.backend_dev = backend_dev if backend_source_list: source_list = [ast.literal_eval(source) for source in backend_source_list] backend.source = source_list if backend_protocol: backend.backend_protocol = backend_protocol rng_xml.backend = backend if detach_alias: rng_xml.alias = dict(name=rng_alias) logging.debug("Rng xml: %s", rng_xml) if get_xml: return rng_xml if sync: vmxml.add_device(rng_xml) vmxml.xmltreefile.write() vmxml.sync() else: status = libvirt.exec_virsh_edit( vm_name, [(r":/<devices>/s/$/%s" % re.findall(r"<rng.*<\/rng>", str(rng_xml), re.M )[0].replace("/", "\/"))]) if not status: test.fail("Failed to edit vm xml") def check_qemu_cmd(dparams): """ Verify qemu-kvm command line. """ rng_model = dparams.get("rng_model", "virtio") rng_rate = dparams.get("rng_rate") backend_type = dparams.get("backend_type") backend_source_list = dparams.get("backend_source", "").split() cmd = ("ps -ef | grep %s | grep -v grep" % vm_name) chardev = src_host = src_port = None if backend_type == "tcp": chardev = "socket" elif backend_type == "udp": chardev = "udp" for bc_source in backend_source_list: source = ast.literal_eval(bc_source) if "mode" in source and source['mode'] == "connect": src_host = source['host'] src_port = source['service'] if chardev and src_host and src_port: cmd += (" | grep 'chardev %s,.*host=%s,port=%s'" % (chardev, src_host, src_port)) if rng_model == "virtio": cmd += (" | grep 'device %s'" % dparams.get("rng_device")) if rng_rate: rate = ast.literal_eval(rng_rate) cmd += (" | grep 'max-bytes=%s,period=%s'" % (rate['bytes'], rate['period'])) if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Can't see rng option" " in command line") def check_host(): """ Check random device on host """ backend_dev = params.get("backend_dev") if backend_dev: cmd = "lsof |grep %s" % backend_dev ret = process.run(cmd, ignore_status=True, shell=True) if ret.exit_status or not ret.stdout_text.count("qemu"): test.fail("Failed to check random device" " on host, command output: %s" % ret.stdout_text) def check_snapshot(bgjob=None): """ Do snapshot operation and check the results """ snapshot_name1 = "snap.s1" snapshot_name2 = "snap.s2" if not snapshot_vm_running: vm.destroy(gracefully=False) ret = virsh.snapshot_create_as(vm_name, snapshot_name1, debug=True) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name, debug=True) if snapshot_name not in snap_lists: test.fail("Snapshot %s doesn't exist" % snapshot_name) if snapshot_vm_running: options = "--force" else: options = "" ret = virsh.snapshot_revert( vm_name, ("%s %s" % (snapshot_name, options)), debug=True) libvirt.check_exit_status(ret) ret = virsh.dumpxml(vm_name, debug=True) if ret.stdout.strip().count("<rng model="): test.fail("Found rng device in xml") if snapshot_with_rng: if vm.is_alive(): vm.destroy(gracefully=False) if bgjob: bgjob.kill_func() modify_rng_xml(params, False) # Start the domain before disk-only snapshot if vm.is_dead(): # Add random server if params.get("backend_type") == "tcp": cmd = "cat /dev/random | nc -4 -l localhost 1024" bgjob = utils_misc.AsyncJob(cmd) vm.start() vm.wait_for_login().close() err_msgs = ("live disk snapshot not supported" " with this QEMU binary") ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot_name2, debug=True) if ret.exit_status: if ret.stderr.count(err_msgs): test.skip(err_msgs) else: test.fail("Failed to create external snapshot") snap_lists = virsh.snapshot_list(vm_name, debug=True) if snapshot_name2 not in snap_lists: test.fail("Failed to check snapshot list") ret = virsh.domblklist(vm_name, debug=True) if not ret.stdout.strip().count(snapshot_name2): test.fail("Failed to find snapshot disk") def check_guest_dump(session, exists=True): """ Check guest with hexdump :param session: ssh session to guest :param exists: check rng device exists/not exists """ check_cmd = "hexdump /dev/hwrng" try: status = session.cmd_status(check_cmd, 5) if status != 0 and exists: test.fail("Fail to check hexdump in guest") elif not exists: logging.info("hexdump cmd failed as expected") except aexpect.exceptions.ShellTimeoutError: if not exists: test.fail("Still can find rng device in guest") else: logging.info("Hexdump do not fail with error") def check_guest(session, expect_fail=False): """ Check random device on guest :param session: ssh session to guest :param expect_fail: expect the dd cmd pass or fail """ rng_files = ( "/sys/devices/virtual/misc/hw_random/rng_available", "/sys/devices/virtual/misc/hw_random/rng_current") rng_avail = session.cmd_output("cat %s" % rng_files[0], timeout=timeout).strip() rng_currt = session.cmd_output("cat %s" % rng_files[1], timeout=timeout).strip() logging.debug("rng avail:%s, current:%s", rng_avail, rng_currt) if not rng_currt.count("virtio") or rng_currt not in rng_avail: test.fail("Failed to check rng file on guest") # Read the random device rng_rate = params.get("rng_rate") # For rng rate test this command and return in a short time # but for other test it will hang cmd = ("dd if=/dev/hwrng of=rng.test count=100" " && rm -f rng.test") try: ret, output = session.cmd_status_output(cmd, timeout=timeout) if ret and expect_fail: logging.info("dd cmd failed as expected") elif ret: test.fail("Failed to read the random device") except aexpect.exceptions.ShellTimeoutError: logging.info("dd cmd timeout") # Close session as the current session still hang on last cmd session.close() session = vm.wait_for_login() if expect_fail: test.fail("Still can find rng device in guest") else: logging.info("dd cmd do not fail with error") # Check if file have data size = session.cmd_output("wc -c rng.test").split()[0] if int(size) > 0: logging.info("/dev/hwrng is not empty, size %s", size) else: test.fail("/dev/hwrng is empty") finally: session.cmd("rm -f rng.test") if rng_rate: rate_bytes, rate_period = list(ast.literal_eval(rng_rate).values()) rate_conf = float(rate_bytes) / (float(rate_period)/1000) ret = re.search(r"(\d+) bytes.*copied, (\d+.\d+) s", output, re.M) if not ret: test.fail("Can't find rate from output") rate_real = float(ret.group(1)) / float(ret.group(2)) logging.debug("Find rate: %s, config rate: %s", rate_real, rate_conf) if rate_real > rate_conf * 1.2: test.fail("The rate of reading exceed" " the limitation of configuration") if device_num > 1: rng_dev = rng_avail.split() if len(rng_dev) != device_num: test.cancel("Multiple virtio-rng devices are not" " supported on this guest kernel. " "Bug: https://bugzilla.redhat.com/" "show_bug.cgi?id=915335") session.cmd("echo -n %s > %s" % (rng_dev[1], rng_files[1])) # Read the random device if session.cmd_status(cmd, timeout=timeout): test.fail("Failed to read the random device") def get_rng_device(guest_arch, rng_model): """ Return the expected rng device in qemu cmd :param guest_arch: e.g. x86_64 :param rng_model: the value for //rng@model, e.g. "virtio" :return: expected device type in qemu cmd """ if "virtio" in rng_model: return "virtio-rng-pci" if "s390x" not in guest_arch else "virtio-rng-ccw" else: test.fail("Unknown rng model %s" % rng_model) start_error = "yes" == params.get("start_error", "no") status_error = "yes" == params.get("status_error", "no") test_host = "yes" == params.get("test_host", "no") test_guest = "yes" == params.get("test_guest", "no") test_guest_dump = "yes" == params.get("test_guest_dump", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") snapshot_vm_running = "yes" == params.get("snapshot_vm_running", "no") snapshot_with_rng = "yes" == params.get("snapshot_with_rng", "no") snapshot_name = params.get("snapshot_name") device_num = int(params.get("device_num", 1)) detach_alias = "yes" == params.get("rng_detach_alias", "no") detach_alias_options = params.get("rng_detach_alias_options") attach_rng = "yes" == params.get("rng_attach_device", "no") attach_options = params.get("rng_attach_options", "") random_source = "yes" == params.get("rng_random_source", "yes") timeout = int(params.get("timeout", 600)) wait_timeout = int(params.get("wait_timeout", 60)) if device_num > 1 and not libvirt_version.version_compare(1, 2, 7): test.skip("Multiple virtio-rng devices not " "supported on this libvirt version") guest_arch = params.get("vm_arch_name", "x86_64") # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("vm xml is %s", vmxml_backup) # Try to install rng-tools on host, it can speed up random rate # if installation failed, ignore the error and continue the test if utils_package.package_install(["rng-tools"]): rngd_conf = "/etc/sysconfig/rngd" rngd_srv = "/usr/lib/systemd/system/rngd.service" if os.path.exists(rngd_conf): # For rhel6 host, add extraoptions with open(rngd_conf, 'w') as f_rng: f_rng.write('EXTRAOPTIONS="--rng-device /dev/urandom"') elif os.path.exists(rngd_srv): # For rhel7 host, modify start options rngd_srv_conf = "/etc/systemd/system/rngd.service" if not os.path.exists(rngd_srv_conf): shutil.copy(rngd_srv, rngd_srv_conf) process.run("sed -i -e 's#^ExecStart=.*#ExecStart=/sbin/rngd" " -f -r /dev/urandom -o /dev/random#' %s" % rngd_srv_conf, shell=True) process.run('systemctl daemon-reload') process.run("service rngd start") # Build the xml and run test. try: bgjob = None # Prepare xml, make sure no extra rng dev. vmxml = vmxml_backup.copy() vmxml.remove_all_device_by_type('rng') vmxml.sync() logging.debug("Prepared vm xml without rng dev is %s", vmxml) # Take snapshot if needed if snapshot_name: if snapshot_vm_running: vm.start() vm.wait_for_login().close() ret = virsh.snapshot_create_as(vm_name, snapshot_name, debug=True) libvirt.check_exit_status(ret) # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Build vm xml. dparams = {} if device_num > 1: for i in xrange(device_num): rng_model = params.get("rng_model_%s" % i, "virtio") dparams[i] = {"rng_model": rng_model} dparams[i].update({"backend_model": params.get( "backend_model_%s" % i, "random")}) dparams[i].update({"rng_device": get_rng_device( guest_arch, rng_model)}) bk_type = params.get("backend_type_%s" % i) if bk_type: dparams[i].update({"backend_type": bk_type}) bk_dev = params.get("backend_dev_%s" % i) if bk_dev: dparams[i].update({"backend_dev": bk_dev}) bk_src = params.get("backend_source_%s" % i) if bk_src: dparams[i].update({"backend_source": bk_src}) bk_pro = params.get("backend_protocol_%s" % i) if bk_pro: dparams[i].update({"backend_protocol": bk_pro}) modify_rng_xml(dparams[i], False) else: params.update({"rng_device": get_rng_device( guest_arch, params.get("rng_model", "virtio"))}) if detach_alias: device_alias = "ua-" + str(uuid.uuid4()) params.update({"rng_alias": device_alias}) rng_xml = modify_rng_xml(params, not test_snapshot, attach_rng) try: # Add random server if random_source and params.get("backend_type") == "tcp": cmd = "cat /dev/random | nc -4 -l localhost 1024" bgjob = utils_misc.AsyncJob(cmd) vm.start() if attach_rng: ret = virsh.attach_device(vm_name, rng_xml.xml, flagstr=attach_options, debug=True, ignore_status=True) libvirt.check_exit_status(ret, status_error) if status_error: return if not check_rng_xml(rng_xml, True): test.fail("Can not find rng device in xml") else: # Start the VM. if start_error: test.fail("VM started unexpectedly") if test_qemu_cmd and not attach_rng: if device_num > 1: for i in xrange(device_num): check_qemu_cmd(dparams[i]) else: check_qemu_cmd(params) if test_host: check_host() session = vm.wait_for_login() if test_guest: check_guest(session) if test_guest_dump: check_guest_dump(session, True) if test_snapshot: check_snapshot(bgjob) if detach_alias: result = virsh.detach_device_alias(vm_name, device_alias, detach_alias_options, debug=True) if "--config" in detach_alias_options: vm.destroy() def have_rng_xml(): """ check if xml have rng item """ output = virsh.dumpxml(vm_name) return not output.stdout.strip().count("<rng model=") if utils_misc.wait_for(have_rng_xml, wait_timeout): logging.info("Cannot find rng device in xml after detach") else: test.fail("Found rng device in xml after detach") # Detach after attach if attach_rng: ret = virsh.detach_device(vm_name, rng_xml.xml, flagstr=attach_options, debug=True, ignore_status=True) libvirt.check_exit_status(ret, status_error) if utils_misc.wait_for(lambda: check_rng_xml(rng_xml, False), wait_timeout): logging.info("Find same rng xml as hotpluged") else: test.fail("Rng device still exists after detach!") if test_guest_dump: check_guest_dump(session, False) session.close() except virt_vm.VMStartError as details: logging.info(str(details)) if not start_error: test.fail('VM failed to start, ' 'please refer to https://bugzilla.' 'redhat.com/show_bug.cgi?id=1220252:' '\n%s' % details) finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name, debug=True) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snapshot in snapshot_lists: virsh.snapshot_delete(vm_name, snapshot, "--metadata", debug=True) # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() if bgjob: bgjob.kill_func()
logging.debug("file eixst = %s, file content = %s", file_existence, file_content) if ((not file_existence) or (file_content.strip() != "before")): raise exceptions.TestFail("The file created " "before snapshot is lost.") # delete snapshots # if diskonly, delete --metadata and remove files # if not diskonly, delete snapshot if snapshot_disk_only: options = "--metadata" else: options = "" for snap in snapshot_list: logging.debug("deleting snapshot %s with options %s", snap, options) result = virsh.snapshot_delete(vm_name, snap, options) logging.debug("result of snapshot-delete: %s", result.stdout.strip()) if snapshot_disk_only: vm_blks = get_vm_blks(vm_name) for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap if os.path.exists(snapshot_file): os.remove(snapshot_file) snapshot_list = virsh.snapshot_list(vm_name) if snapshot_list: raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list) except Exception, detail: raise exceptions.TestFail("exception happens: %s", detail) finally: logging.debug("Start to clean up env...")
def run_virsh_snapshot_disk(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) image_format = params.get("snapshot_image_format", "qcow2") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) # Get a tmp_dir. tmp_dir = test.tmpdir # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) # Do the attach action. virsh.attach_disk(vm_name, source=img_path, target="vdf", extra="--persistent --subdriver %s" % image_format) # Init snapshot_name snapshot_name = None try: # Create snapshot. if snapshot_from_xml: snapshot_name = "snapshot_test" lines = ["<domainsnapshot>\n", "<name>%s</name>\n" % snapshot_name, "<description>Snapshot Test</description>\n", "<memory snapshot=\'internal\'/>\n", "</domainsnapshot>"] snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() snapshot_result = virsh.snapshot_create(vm_name, ("--xmlfile %s" % snapshot_xml_path)) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) else: snapshot_result = virsh.snapshot_create(vm_name) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) # Touch a file in VM. session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() status, output = session.cmd_status_output("touch %s" % tmp_file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) session.close() # Destroy vm for snapshot revert. virsh.destroy(vm_name) # Revert snapshot. revert_result = virsh.snapshot_revert(vm_name, snapshot_name) if revert_result.exit_status: raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if not vm.is_alive(): raise error.TestFail("Revert snapshot failed.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() finally: virsh.detach_disk(vm_name, target="vdf", extra="--persistent") image.remove() if snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name)
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) vm_state = params.get("vm_state", "running") image_format = params.get("snapshot_image_format", "qcow2") snapshot_del_test = "yes" == params.get("snapshot_del_test", "no") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no") # Pool variables. snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image", "emulated-image") vol_format = params.get("vol_format") lazy_refcounts = "yes" == params.get("lazy_refcounts") options = params.get("snapshot_options", "") export_options = params.get("export_options", "rw,no_root_squash,fsid=0") # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts supported_pool_list = [ "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster" ] if snapshot_with_pool: if pool_type not in supported_pool_list: raise error.TestNAError("%s not in support list %s" % (pool_target, supported_pool_list)) # Do xml backup for final recovery vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") no_memory_snap = "yes" == params.get("no_memory_snap", "no") # Skip 'qed' cases for libvirt version greater than 1.1.0 if libvirt_version.version_compare(1, 1, 0): if vol_format == "qed" or image_format == "qed": raise error.TestNAError("QED support changed, check bug: " "https://bugzilla.redhat.com/show_bug.cgi" "?id=731570") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_source_protocol == 'gluster': raise error.TestNAError("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") # Init snapshot_name snapshot_name = None snapshot_external_disk = [] snapshot_xml_path = None del_status = None image = None pvt = None # Get a tmp dir snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name try: if replace_vm_disk: utlv.set_vm_disk(vm, params, tmp_dir) if multi_gluster_disks: new_params = params.copy() new_params["pool_name"] = "gluster-pool2" new_params["vol_name"] = "gluster-vol2" new_params["disk_target"] = "vdf" new_params["image_convert"] = 'no' utlv.set_vm_disk(vm, new_params, tmp_dir) if snapshot_with_pool: # Create dst pool for create attach vol img pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], source_name=vol_name, export_options=export_options) if pool_type in ["iscsi", "disk"]: # iscsi and disk pool did not support create volume in libvirt, # logical pool could use libvirt to create volume but volume # format is not supported and will be 'raw' as default. pv = libvirt_storage.PoolVolume(pool_name) vols = pv.list_volumes().keys() if vols: vol_name = vols[0] else: raise error.TestNAError("No volume in pool: %s" % pool_name) else: # Set volume xml file volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] # Run virsh_vol_create to create vol logging.debug("create volume from xml: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create(pool_name, vol_xml, ignore_status=True, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to create attach volume.") cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() if pool_type in ["logical", "iscsi", "disk"]: # Use qemu-img to format logical, iscsi and disk block device if vol_format != "raw": cmd = "qemu-img create -f %s %s 10M" % (vol_format, img_path) cmd_result = utils.run(cmd, ignore_status=True) if cmd_result.exit_status: raise error.TestNAError("Failed to format volume, %s" % cmd_result.stdout.strip()) extra = "--persistent --subdriver %s" % vol_format else: # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) extra = "--persistent --subdriver %s" % image_format if not multi_gluster_disks: # Do the attach action. out = utils.run("qemu-img info %s" % img_path) logging.debug("The img info is:\n%s" % out.stdout.strip()) result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestNAError("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Create snapshot. if snapshot_from_xml: snap_xml = libvirt_xml.SnapshotXML() snapshot_name = "snapshot_test" snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test" if not no_memory_snap: if "--disk-only" not in options: if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snap_xml.mem_snap_type = snapshot_memory snap_xml.mem_file = memory_external snapshot_external_disk.append(memory_external) else: snap_xml.mem_snap_type = snapshot_memory # Add all disks into xml file. vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = snapshot_disk disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr if snapshot_disk == 'external': new_attrs = disk_xml.source.attrs if disk_xml.source.attrs.has_key('file'): new_file = "%s.snap" % disk_xml.source.attrs['file'] snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif disk_xml.source.attrs.has_key('name'): new_name = "%s.snap" % disk_xml.source.attrs['name'] new_attrs.update({'name': new_name}) hosts = disk_xml.source.hosts elif (disk_xml.source.attrs.has_key('dev') and disk_xml.type_name == 'block'): # Use local file as external snapshot target for block type. # As block device will be treat as raw format by default, # it's not fit for external disk snapshot target. A work # around solution is use qemu-img again with the target. disk_xml.type_name = 'file' del new_attrs['dev'] new_file = "%s/blk_src_file.snap" % tmp_dir snapshot_external_disk.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) else: del disk_xml.source new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options += " --xmlfile %s " % snapshot_xml_path if vm_state == "shut off": vm.destroy(gracefully=False) snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) out_err = snapshot_result.stderr.strip() if snapshot_result.exit_status: if status_error: return else: if re.search( "live disk snapshot not supported with this " "QEMU binary", out_err): raise error.TestNAError(out_err) if libvirt_version.version_compare(1, 2, 5): # As commit d2e668e in 1.2.5, internal active snapshot # without memory state is rejected. Handle it as SKIP # for now. This could be supportted in future by bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1103063 if re.search( "internal snapshot of a running VM" + " must include the memory state", out_err): raise error.TestNAError("Check Bug #1083345, %s" % out_err) raise error.TestFail( "Failed to create snapshot. Error:%s." % out_err) else: snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail( "Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: snap_xml = libvirt_xml.SnapshotXML() new_snap = snap_xml.new_from_snapshot_dumpxml( vm_name, snapshot_name) # update an element new_snap.creation_time = snapshot_name snapshot_xml_path = new_snap.xml options += "--redefine %s --current" % snapshot_xml_path snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: raise error.TestFail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: if not snapshot_del_test: raise error.TestFail("Success to create snapshot in negative" " case\nDetail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path status, output = session.cmd_status_output(echo_cmd) logging.debug("The echo output in domain is: '%s'", output) if status: raise error.TestFail("'%s' run failed with '%s'" % (tmp_file_path, output)) status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("File created with content: '%s'", output) session.close() # As only internal snapshot revert works now, let's only do revert # with internal, and move the all skip external cases back to pass. # After external also supported, just move the following code back. if snapshot_disk == 'internal': # Destroy vm for snapshot revert. if not libvirt_version.version_compare(1, 2, 3): virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options, debug=True) if revert_result.exit_status: # Attempts to revert external snapshots will FAIL with an error # "revert to external disk snapshot not supported yet" or "revert # to external snapshot not supported yet" since d410e6f. Thus, # let's check for that and handle as a SKIP for now. Check bug: # https://bugzilla.redhat.com/show_bug.cgi?id=1071264 if re.search( "revert to external \w* ?snapshot not supported yet", revert_result.stderr): raise error.TestNAError(revert_result.stderr.strip()) else: raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): raise error.TestFail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: raise error.TestFail( "Revert command successed, but VM is not " "paused after reverting with --paused" " option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) logging.debug("After revert cat file output='%s'", output) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() # Test delete snapshot without "--metadata", delete external disk # snapshot will fail for now. # Only do this when snapshot creat succeed which filtered in cfg file. if snapshot_del_test: if snapshot_name: del_result = virsh.snapshot_delete(vm_name, snapshot_name, debug=True, ignore_status=True) del_status = del_result.exit_status snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name if del_status: if not status_error: raise error.TestFail("Failed to delete snapshot.") else: if not os.path.exists(snap_xml_path): raise error.TestFail( "Snapshot xml file %s missing" % snap_xml_path) else: if status_error: err_msg = "Snapshot delete succeed but expect fail." raise error.TestFail(err_msg) else: if os.path.exists(snap_xml_path): raise error.TestFail("Snapshot xml file %s still" % snap_xml_path + " exist") finally: if vm.is_alive(): vm.destroy(gracefully=False) virsh.detach_disk(vm_name, target="vdf", extra="--persistent") if image: image.remove() if del_status and snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk) vmxml_backup.sync("--snapshots-metadata") libvirtd = utils_libvirtd.Libvirtd() if disk_source_protocol == 'gluster': utlv.setup_or_cleanup_gluster(False, vol_name, brick_path) if multi_gluster_disks: brick_path = os.path.join(tmp_dir, "gluster-pool2") utlv.setup_or_cleanup_gluster(False, "gluster-vol2", brick_path) libvirtd.restart() if snapshot_xml_path: if os.path.exists(snapshot_xml_path): os.unlink(snapshot_xml_path) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, source_name=vol_name) except error.TestFail, detail: libvirtd.restart() logging.error(str(detail))
def run(test, params, env): """ Test vm backingchain, blockcopy """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) status_error = 'yes' == params.get('status_error', 'no') error_msg = params.get('error_msg', '') case = params.get('case', '') blockcommand = params.get('blockcommand', '') blk_top = int(params.get('top', 0)) blk_base = int(params.get('base', 0)) opts = params.get('opts', '--verbose --wait') check_func = params.get('check_func', '') disk_type = params.get('disk_type', '') disk_src = params.get('disk_src', '') driver_type = params.get('driver_type', 'qcow2') vol_name = params.get('vol_name', 'vol_blockpull') pool_name = params.get('pool_name', '') brick_path = os.path.join(data_dir.get_tmp_dir(), pool_name) vg_name = params.get('vg_name', 'HostVG') vol_size = params.get('vol_size', '10M') vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) bkxml = vmxml.copy() # List to collect paths to delete after test file_to_del = [] virsh_dargs = {'debug': True, 'ignore_status': False} try: all_disks = vmxml.get_disk_source(vm_name) if not all_disks: test.error('Not found any disk file in vm.') image_file = all_disks[0].find('source').get('file') logging.debug('Image file of vm: %s', image_file) # Get all dev of virtio disks to calculate the dev of new disk all_vdisks = [disk for disk in all_disks if disk.find('target').get('dev').startswith('vd')] disk_dev = all_vdisks[-1].find('target').get('dev') new_dev = disk_dev[:-1] + chr(ord(disk_dev[-1]) + 1) # Setup iscsi target if disk_src == 'iscsi': disk_target = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size='1G') logging.debug('ISCSI target: %s', disk_target) # Setup lvm elif disk_src == 'lvm': # Stop multipathd to avoid vgcreate fail multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Setup iscsi target device_name = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size='1G') logging.debug('ISCSI target for lvm: %s', device_name) # Create logical device logical_device = device_name lv_utils.vg_create(vg_name, logical_device) vg_created = True # Create logical volume as backing store vol_bk, vol_disk = 'vol1', 'vol2' lv_utils.lv_create(vg_name, vol_bk, vol_size) disk_target = '/dev/%s/%s' % (vg_name, vol_bk) src_vol = '/dev/%s/%s' % (vg_name, vol_disk) # Setup gluster elif disk_src == 'gluster': host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, brick_path=brick_path, **params) logging.debug(host_ip) gluster_img = 'test.img' img_create_cmd = "qemu-img create -f raw /mnt/%s 10M" % gluster_img process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt" % (host_ip, vol_name, img_create_cmd), shell=True) disk_target = 'gluster://%s/%s/%s' % (host_ip, vol_name, gluster_img) else: test.error('Wrong disk source, unsupported by this test.') new_image = os.path.join(os.path.split(image_file)[0], 'test.img') params['snapshot_list'] = ['s%d' % i for i in range(1, 5)] if disk_src == 'lvm': new_image = src_vol if disk_type == 'block': new_image = disk_target for i in range(2, 6): lv_utils.lv_create(vg_name, 'vol%s' % i, vol_size) snapshot_image_list = ['/dev/%s/vol%s' % (vg_name, i) for i in range(2, 6)] else: file_to_del.append(new_image) snapshot_image_list = [new_image.replace('img', i) for i in params['snapshot_list']] cmd_create_img = 'qemu-img create -f %s -b %s %s -F raw' % (driver_type, disk_target, new_image) if disk_type == 'block' and driver_type == 'raw': pass else: process.run(cmd_create_img, verbose=True, shell=True) info_new = utils_misc.get_image_info(new_image) logging.debug(info_new) # Create xml of new disk and add it to vmxml if disk_type: new_disk = Disk() new_disk.xml = libvirt.create_disk_xml({ 'type_name': disk_type, 'driver_type': driver_type, 'target_dev': new_dev, 'source_file': new_image }) logging.debug(new_disk.xml) vmxml.devices = vmxml.devices.append(new_disk) vmxml.xmltreefile.write() logging.debug(vmxml) vmxml.sync() vm.start() logging.debug(virsh.dumpxml(vm_name)) # Create backing chain for i in range(len(params['snapshot_list'])): virsh.snapshot_create_as( vm_name, '%s --disk-only --diskspec %s,file=%s,stype=%s' % (params['snapshot_list'][i], new_dev, snapshot_image_list[i], disk_type), **virsh_dargs ) # Get path of each snapshot file snaps = virsh.domblklist(vm_name, debug=True).stdout.splitlines() for line in snaps: if line.lstrip().startswith(('hd', 'sd', 'vd')): file_to_del.append(line.split()[-1]) qemu_img_cmd = 'qemu-img info --backing-chain %s' % snapshot_image_list[-1] if libvirt_storage.check_qemu_image_lock_support(): qemu_img_cmd += " -U" bc_info = process.run(qemu_img_cmd, verbose=True, shell=True).stdout_text if not disk_type == 'block': bc_chain = snapshot_image_list[::-1] + [new_image, disk_target] else: bc_chain = snapshot_image_list[::-1] + [new_image] bc_result = check_backingchain(bc_chain, bc_info) if not bc_result: test.fail('qemu-img info output of backing chain is not correct: %s' % bc_info) # Generate blockpull/blockcommit options virsh_blk_cmd = eval('virsh.%s' % blockcommand) if blockcommand == 'blockpull' and blk_base != 0: opts += '--base {dev}[{}]'.format(blk_base, dev=new_dev) elif blockcommand == 'blockcommit': opt_top = ' --top {dev}[{}]'.format(blk_top, dev=new_dev) if blk_top != 0 else '' opt_base = ' --base {dev}[{}]'.format(blk_base, dev=new_dev) if blk_base != 0 else '' opts += opt_top + opt_base + ' --active' if blk_top == 0 else '' # Do blockpull/blockcommit virsh_blk_cmd(vm_name, new_dev, opts, **virsh_dargs) if blockcommand == 'blockcommit': virsh.blockjob(vm_name, new_dev, '--pivot', **virsh_dargs) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("XML after %s: %s" % (blockcommand, vmxml)) # Check backing chain after blockpull/blockcommit check_bc_func_name = 'check_bc_%s' % check_func if check_bc_func_name in globals(): check_bc = eval(check_bc_func_name) if not callable(check_bc): logging.warning('Function "%s" is not callable.', check_bc_func_name) if not check_bc(blockcommand, vmxml, new_dev, bc_chain): test.fail('Backing chain check after %s failed' % blockcommand) else: logging.warning('Function "%s" is not implemented.', check_bc_func_name) virsh.dumpxml(vm_name, debug=True) # Check whether login is successful try: vm.wait_for_login().close() except Exception as e: test.fail('Vm login failed') finally: logging.info('Start cleaning up.') for ss in params.get('snapshot_list', []): virsh.snapshot_delete(vm_name, '%s --metadata' % ss, debug=True) bkxml.sync() for path in file_to_del: logging.debug('Remove %s', path) if os.path.exists(path): os.remove(path) if disk_src == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False) elif disk_src == 'lvm': process.run('rm -rf /dev/%s/%s' % (vg_name, vol_disk), ignore_status=True) if 'vol_bk' in locals(): lv_utils.lv_remove(vg_name, vol_bk) if 'vg_created' in locals() and vg_created: lv_utils.vg_remove(vg_name) cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.system_output(cmd, shell=True, verbose=True).strip() if pv_name: process.run("pvremove %s" % pv_name, verbose=True, ignore_status=True) libvirt.setup_or_cleanup_iscsi(is_setup=False) elif disk_src == 'gluster': gluster.setup_or_cleanup_gluster( is_setup=False, brick_path=brick_path, **params) if 'multipathd_status' in locals() and multipathd_status: multipathd.start()
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Global variable to store max/current memory, # it may change after attach/detach new_max_mem = None new_cur_mem = None def consume_vm_mem(size=1000, timeout=360): """ To consume guest memory, default size is 1000M """ session = vm.wait_for_login() # Mount tmpfs on /mnt and write to a file on it, # it is the memory operation sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs " "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M" " count={0}".format(size)) session.cmd(sh_cmd, timeout=timeout) session.close() def mount_hugepages(page_size): """ To mount hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ if page_size == 4: perm = "" else: perm = "pagesize=%dK" % page_size tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_status: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm) def setup_hugepages(page_size=2048, shp_num=2000): """ To setup hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc :param shp_num: number of hugepage, string type """ mount_hugepages(page_size) utils_memory.set_num_huge_pages(shp_num) config.hugetlbfs_mount = ["/dev/hugepages"] utils_libvirtd.libvirtd_restart() def restore_hugepages(page_size=4): """ To recover hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ mount_hugepages(page_size) config.restore() utils_libvirtd.libvirtd_restart() def check_qemu_cmd(max_mem_rt, tg_size): """ Check qemu command line options. :param max_mem_rt: size of max memory :param tg_size: Target hotplug memory size :return: None """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if discard: if libvirt_version.version_compare(7, 3, 0): cmd = cmd + " | grep " + '\\"discard-data\\":true' else: cmd += " | grep 'discard-data=yes'" elif max_mem_rt: cmd += (" | grep 'slots=%s,maxmem=%sk'" % (max_mem_slots, max_mem_rt)) if tg_size: size = int(tg_size) * 1024 if huge_pages or discard or cold_plug_discard: cmd_str = 'memdimm.\|memory-backend-file,id=ram-node.' cmd += ( " | grep 'memory-backend-file,id=%s' | grep 'size=%s" % (cmd_str, size)) else: cmd_str = 'mem.\|memory-backend-ram,id=ram-node.' cmd += ( " | grep 'memory-backend-ram,id=%s' | grep 'size=%s" % (cmd_str, size)) if pg_size: cmd += ",host-nodes=%s" % node_mask if numa_memnode: for node in numa_memnode: if ('nodeset' in node and node['nodeset'] in node_mask): cmd += ",policy=%s" % node['mode'] cmd += ".*pc-dimm,node=%s" % tg_node if mem_addr: cmd += (".*slot=%s" % (mem_addr['slot'])) cmd += "'" if cold_plug_discard: cmd += " | grep 'discard-data=yes'" # Run the command result = process.run(cmd, shell=True, verbose=True, ignore_status=True) if result.exit_status: test.fail('Qemu command check fail.') def check_guest_meminfo(old_mem, check_option): """ Check meminfo on guest. """ assert old_mem is not None session = vm.wait_for_login() # Hot-plugged memory should be online by udev rules udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules" udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",' ' ATTR{state}=="offline", ATTR{state}="online"') cmd = ("grep memory %s || echo '%s' >> %s" % (udev_file, udev_rules, udev_file)) session.cmd(cmd) # Wait a while for new memory to be detected. utils_misc.wait_for( lambda: vm.get_totalmem_sys(online) != int(old_mem), 30, first=20.0) new_mem = vm.get_totalmem_sys(online) session.close() logging.debug("Memtotal on guest: %s", new_mem) no_of_times = 1 if at_times: no_of_times = at_times if check_option == "attach": if new_mem != int(old_mem) + (int(tg_size) * no_of_times): test.fail("Total memory on guest couldn't changed after " "attach memory device") if check_option == "detach": if new_mem != int(old_mem) - (int(tg_size) * no_of_times): test.fail("Total memory on guest couldn't changed after " "detach memory device") def check_dom_xml(at_mem=False, dt_mem=False): """ Check domain xml options. """ # Global variable to store max/current memory global new_max_mem global new_cur_mem if attach_option.count("config"): dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) else: dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) try: xml_max_mem_rt = int(dom_xml.max_mem_rt) xml_max_mem = int(dom_xml.max_mem) xml_cur_mem = int(dom_xml.current_mem) assert int(max_mem_rt) == xml_max_mem_rt # Check attached/detached memory logging.info("at_mem=%s,dt_mem=%s", at_mem, dt_mem) logging.info("detach_device is %s", detach_device) if at_mem: if at_times: assert int(max_mem) + (int(tg_size) * at_times) == xml_max_mem else: assert int(max_mem) + int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory if at_times: assert int(cur_mem) + (int(tg_size) * at_times) == xml_cur_mem else: assert int(cur_mem) + int(tg_size) == xml_cur_mem new_max_mem = xml_max_mem new_cur_mem = xml_cur_mem mem_dev = dom_xml.get_devices("memory") memory_devices = 1 if at_times: memory_devices = at_times if len(mem_dev) != memory_devices: test.fail("Found wrong number of memory device") assert int(tg_size) == int(mem_dev[0].target.size) assert int(tg_node) == int(mem_dev[0].target.node) elif dt_mem: if at_times: assert int(new_max_mem) - (int(tg_size) * at_times) == xml_max_mem assert int(new_cur_mem) - (int(tg_size) * at_times) == xml_cur_mem else: assert int(new_max_mem) - int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory assert int(new_cur_mem) - int(tg_size) == xml_cur_mem except AssertionError: utils_misc.log_last_traceback() test.fail("Found unmatched memory setting from domain xml") def check_mem_align(): """ Check if set memory align to 256 """ dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) dom_mem = {} dom_mem['maxMemory'] = int(dom_xml.max_mem_rt) dom_mem['memory'] = int(dom_xml.memory) dom_mem['currentMemory'] = int(dom_xml.current_mem) cpuxml = dom_xml.cpu numa_cell = cpuxml.numa_cell dom_mem['numacellMemory'] = int(numa_cell[0]['memory']) sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell]) attached_mem = dom_xml.get_devices(device_type='memory')[0] dom_mem['attached_mem'] = attached_mem.target.size all_align = True for key in dom_mem: logging.info('%-20s:%15d', key, dom_mem[key]) if dom_mem[key] % 262144: logging.error('%s not align to 256', key) if key == 'currentMemory': continue all_align = False if not all_align: test.fail('Memory not align to 256') if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']: logging.info( 'Check Pass: Memory is equal to (all numa memory + memory device)' ) else: test.fail( 'Memory is not equal to (all numa memory + memory device)') return dom_mem def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) def _wait_for_restore(): try: virsh.restore(save_file, debug=True, ignore_status=False) return True except Exception as e: logging.error(e) utils_misc.wait_for(_wait_for_restore, 30, step=5) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def add_device(dev_xml, attach, at_error=False): """ Add memory device by attachment or modify domain xml. """ if attach: ret = virsh.attach_device(vm_name, dev_xml.xml, flagstr=attach_option, debug=True) libvirt.check_exit_status(ret, at_error) else: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if numa_cells: del vmxml.max_mem del vmxml.current_mem vmxml.add_device(dev_xml) vmxml.sync() def modify_domain_xml(): """ Modify domain xml and define it. """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) mem_unit = params.get("mem_unit", "KiB") vcpu = params.get("vcpu", "4") if max_mem_rt: vmxml.max_mem_rt = int(max_mem_rt) vmxml.max_mem_rt_slots = max_mem_slots vmxml.max_mem_rt_unit = mem_unit if max_mem: vmxml.max_mem = int(max_mem) if cur_mem: vmxml.current_mem = int(cur_mem) if memory_val: vmxml.memory = int(memory_val) if vcpu: vmxml.vcpu = int(vcpu) vcpu_placement = params.get("vcpu_placement", "static") vmxml.placement = vcpu_placement if numa_memnode: vmxml.numa_memory = {} vmxml.numa_memnode = numa_memnode else: try: del vmxml.numa_memory del vmxml.numa_memnode except Exception: # Not exists pass if numa_cells: cells = [ast.literal_eval(x) for x in numa_cells] # Rounding the numa memory values if align_mem_values: for cell in range(cells.__len__()): memory_value = str( utils_numeric.align_value(cells[cell]["memory"], align_to_value)) cells[cell]["memory"] = memory_value cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu mode='host-model'><numa/></cpu>" cpu_mode = params.get("cpu_mode") model_fallback = params.get("model_fallback") if cpu_mode: cpu_xml.mode = cpu_mode if model_fallback: cpu_xml.fallback = model_fallback cpu_xml.numa_cell = cpu_xml.dicts_to_cells(cells) vmxml.cpu = cpu_xml # Delete memory and currentMemory tag, # libvirt will fill it automatically del vmxml.max_mem del vmxml.current_mem # hugepages setting if huge_pages or discard or cold_plug_discard: membacking = vm_xml.VMMemBackingXML() membacking.discard = True membacking.source = '' membacking.source_type = 'file' if huge_pages: hugepages = vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(huge_pages)): pagexml = hugepages.PageXML() pagexml.update(huge_pages[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug("vm xml: %s", vmxml) vmxml.sync() pre_vm_state = params.get("pre_vm_state", "running") attach_device = "yes" == params.get("attach_device", "no") detach_device = "yes" == params.get("detach_device", "no") detach_alias = "yes" == params.get("detach_alias", "no") detach_alias_options = params.get("detach_alias_options") attach_error = "yes" == params.get("attach_error", "no") start_error = "yes" == params.get("start_error", "no") define_error = "yes" == params.get("define_error", "no") detach_error = "yes" == params.get("detach_error", "no") maxmem_error = "yes" == params.get("maxmem_error", "no") attach_option = params.get("attach_option", "") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") wait_before_save_secs = int(params.get("wait_before_save_secs", 0)) test_managedsave = "yes" == params.get("test_managedsave", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_mem_binding = "yes" == params.get("test_mem_binding", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") add_mem_device = "yes" == params.get("add_mem_device", "no") test_dom_xml = "yes" == params.get("test_dom_xml", "no") max_mem = params.get("max_mem") max_mem_rt = params.get("max_mem_rt") max_mem_slots = params.get("max_mem_slots", "16") memory_val = params.get('memory_val', '') mem_align = 'yes' == params.get('mem_align', 'no') hot_plug = 'yes' == params.get('hot_plug', 'no') cur_mem = params.get("current_mem") numa_cells = params.get("numa_cells", "").split() set_max_mem = params.get("set_max_mem") align_mem_values = "yes" == params.get("align_mem_values", "no") align_to_value = int(params.get("align_to_value", "65536")) hot_reboot = "yes" == params.get("hot_reboot", "no") rand_reboot = "yes" == params.get("rand_reboot", "no") guest_known_unplug_errors = [] guest_known_unplug_errors.append(params.get("guest_known_unplug_errors")) host_known_unplug_errors = [] host_known_unplug_errors.append(params.get("host_known_unplug_errors")) discard = "yes" == params.get("discard", "no") cold_plug_discard = "yes" == params.get("cold_plug_discard", "no") if cold_plug_discard or discard: mem_discard = 'yes' else: mem_discard = 'no' # params for attached device mem_model = params.get("mem_model", "dimm") tg_size = params.get("tg_size") tg_sizeunit = params.get("tg_sizeunit", 'KiB') tg_node = params.get("tg_node", 0) pg_size = params.get("page_size") pg_unit = params.get("page_unit", "KiB") huge_page_num = int(params.get('huge_page_num', 2000)) node_mask = params.get("node_mask", "0") mem_addr = ast.literal_eval(params.get("memory_addr", "{}")) huge_pages = [ ast.literal_eval(x) for x in params.get("huge_pages", "").split() ] numa_memnode = [ ast.literal_eval(x) for x in params.get("numa_memnode", "").split() ] at_times = int(params.get("attach_times", 1)) online = params.get("mem_online", "no") config = utils_config.LibvirtQemuConfig() setup_hugepages_flag = params.get("setup_hugepages") if (setup_hugepages_flag == "yes"): cpu_arch = cpu_util.get_family() if hasattr(cpu_util, 'get_family')\ else cpu_util.get_cpu_arch() if cpu_arch == 'power8': pg_size = '16384' huge_page_num = 200 elif cpu_arch == 'power9': pg_size = '2048' huge_page_num = 2000 [x.update({'size': pg_size}) for x in huge_pages] setup_hugepages(int(pg_size), shp_num=huge_page_num) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not libvirt_version.version_compare(1, 2, 14): test.cancel("Memory hotplug not supported in current libvirt version.") if 'align_256m' in params.get('name', ''): arch = platform.machine() if arch.lower() != 'ppc64le': test.cancel('This case is for ppc64le only.') if align_mem_values: # Rounding the following values to 'align' max_mem = utils_numeric.align_value(max_mem, align_to_value) max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value) cur_mem = utils_numeric.align_value(cur_mem, align_to_value) tg_size = utils_numeric.align_value(tg_size, align_to_value) try: # Drop caches first for host has enough memory drop_caches() # Destroy domain first if vm.is_alive(): vm.destroy(gracefully=False) modify_domain_xml() numa_info = utils_misc.NumaInfo() logging.debug(numa_info.get_all_node_meminfo()) # Start the domain any way if attach memory device old_mem_total = None if attach_device: vm.start() session = vm.wait_for_login() old_mem_total = vm.get_totalmem_sys(online) logging.debug("Memtotal on guest: %s", old_mem_total) session.close() elif discard: vm.start() session = vm.wait_for_login() check_qemu_cmd(max_mem_rt, tg_size) dev_xml = None # To attach the memory device. if (add_mem_device and not hot_plug) or cold_plug_discard: at_times = int(params.get("attach_times", 1)) randvar = 0 if rand_reboot: rand_value = random.randint(15, 25) logging.debug("reboots at %s", rand_value) for x in xrange(at_times): # If any error excepted, command error status should be # checked in the last time device_alias = "ua-" + str(uuid.uuid4()) dev_xml = utils_hotplug.create_mem_xml( tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model, mem_discard, device_alias) randvar = randvar + 1 logging.debug("attaching device count = %s", x) if x == at_times - 1: add_device(dev_xml, attach_device, attach_error) else: add_device(dev_xml, attach_device) if hot_reboot: vm.reboot() vm.wait_for_login() if rand_reboot and randvar == rand_value: vm.reboot() vm.wait_for_login() randvar = 0 rand_value = random.randint(15, 25) logging.debug("reboots at %s", rand_value) # Check domain xml after attach device. if test_dom_xml: check_dom_xml(at_mem=attach_device) # Set domain state if pre_vm_state == "transient": logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): try: vm.start() vm.wait_for_login().close() except virt_vm.VMStartError as detail: if start_error: pass else: except_msg = "memory hotplug isn't supported by this QEMU binary" if except_msg in detail.reason: test.cancel(detail) test.fail(detail) # Set memory operation if set_max_mem: max_mem_option = params.get("max_mem_option", "") ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option) libvirt.check_exit_status(ret, maxmem_error) # Hotplug memory device if add_mem_device and hot_plug: process.run('ps -ef|grep qemu', shell=True, verbose=True) session = vm.wait_for_login() original_mem = vm.get_totalmem_sys() dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model) add_device(dev_xml, True) mem_after = vm.get_totalmem_sys() params['delta'] = mem_after - original_mem # Check domain xml after start the domain. if test_dom_xml: check_dom_xml(at_mem=attach_device) if mem_align: dom_mem = check_mem_align() check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem']) if hot_plug and params['delta'] != dom_mem['attached_mem']: test.fail( 'Memory after attach not equal to original mem + attached mem' ) # Check qemu command line if test_qemu_cmd: check_qemu_cmd(max_mem_rt, tg_size) # Check guest meminfo after attachment if (attach_device and not attach_option.count("config") and not any([attach_error, start_error])): check_guest_meminfo(old_mem_total, check_option="attach") # Consuming memory on guest, # to verify memory changes by numastat if test_mem_binding: pid = vm.get_pid() old_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", old_numastat) # Increase the memory consumed to 1500 consume_vm_mem(1500) new_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", new_numastat) # Only check total memory which is the last element if float(new_numastat[-1]) - float(old_numastat[-1]) < 0: test.fail("Numa memory can't be consumed on guest") # Run managedsave command to check domain xml. if test_managedsave: # Wait 10s for vm to be ready before managedsave time.sleep(wait_before_save_secs) ret = virsh.managedsave(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) def _wait_for_vm_start(): try: vm.start() return True except Exception as e: logging.error(e) utils_misc.wait_for(_wait_for_vm_start, timeout=30, step=5) vm.wait_for_login().close() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Run save and restore command to check domain xml if test_save_restore: # Wait 10s for vm to be ready before save time.sleep(wait_before_save_secs) check_save_restore() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Check domain xml after restarting libvirtd if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Detach the memory device unplug_failed_with_known_error = False if detach_device: dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model, mem_discard) for x in xrange(at_times): if not detach_alias: ret = virsh.detach_device(vm_name, dev_xml.xml, flagstr=attach_option, debug=True) else: ret = virsh.detach_device_alias(vm_name, device_alias, detach_alias_options, debug=True) if ret.stderr and host_known_unplug_errors: for known_error in host_known_unplug_errors: if (known_error[0] == known_error[-1]) and \ known_error.startswith(("'")): known_error = known_error[1:-1] if known_error in ret.stderr: unplug_failed_with_known_error = True logging.debug( "Known error occurred in Host, while" " hot unplug: %s", known_error) if unplug_failed_with_known_error: break try: libvirt.check_exit_status(ret, detach_error) except Exception as detail: dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) try: session = vm.wait_for_login() utils_misc.verify_dmesg(dmesg_log_file=dmesg_file, ignore_result=True, session=session, level_check=5) except Exception: session.close() test.fail("After memory unplug Unable to connect to VM" " or unable to collect dmesg") session.close() if os.path.exists(dmesg_file): with open(dmesg_file, 'r') as f: flag = re.findall( r'memory memory\d+?: Offline failed', f.read()) if not flag: # The attached memory is used by vm, and it could # not be unplugged.The result is expected os.remove(dmesg_file) test.fail(detail) unplug_failed_with_known_error = True os.remove(dmesg_file) # Check whether a known error occurred or not dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) try: session = vm.wait_for_login() utils_misc.verify_dmesg(dmesg_log_file=dmesg_file, ignore_result=True, session=session, level_check=4) except Exception: session.close() test.fail("After memory unplug Unable to connect to VM" " or unable to collect dmesg") session.close() if guest_known_unplug_errors and os.path.exists(dmesg_file): for known_error in guest_known_unplug_errors: if (known_error[0] == known_error[-1]) and \ known_error.startswith(("'")): known_error = known_error[1:-1] with open(dmesg_file, 'r') as f: if known_error in f.read(): unplug_failed_with_known_error = True logging.debug( "Known error occurred, while hot" " unplug: %s", known_error) if test_dom_xml and not unplug_failed_with_known_error: check_dom_xml(dt_mem=detach_device) # Remove dmesg temp file if os.path.exists(dmesg_file): os.remove(dmesg_file) except xcepts.LibvirtXMLError: if define_error: pass finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") if (setup_hugepages_flag == "yes"): restore_hugepages() vmxml_backup.sync()
def run(test, params, env): """ Test virsh snapshot command when disk in all kinds of type. (1). Init the variables from params. (2). Create a image by specifice format. (3). Attach disk to vm. (4). Snapshot create. (5). Snapshot revert. (6). cleanup. """ # Init variables. vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) image_format = params.get("snapshot_image_format", "qcow2") status_error = ("yes" == params.get("status_error", "no")) snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no")) snapshot_current = ("yes" == params.get("snapshot_current", "no")) snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused", "no")) # Some variable for xmlfile of snapshot. snapshot_memory = params.get("snapshot_memory", "internal") snapshot_disk = params.get("snapshot_disk", "internal") # Get a tmp_dir. tmp_dir = data_dir.get_tmp_dir() # Create a image. params['image_name'] = "snapshot_test" params['image_format'] = image_format params['image_size'] = "1M" image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test") img_path, _ = image.create(params) # Do the attach action. result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra="--persistent --subdriver %s" % image_format) if result.exit_status: raise error.TestNAError("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Init snapshot_name snapshot_name = None snapshot_external_disk = [] try: # Create snapshot. if snapshot_from_xml: snapshot_name = "snapshot_test" lines = [ "<domainsnapshot>\n", "<name>%s</name>\n" % snapshot_name, "<description>Snapshot Test</description>\n" ] if snapshot_memory == "external": memory_external = os.path.join(tmp_dir, "snapshot_memory") snapshot_external_disk.append(memory_external) lines.append("<memory snapshot=\'%s\' file='%s'/>\n" % (snapshot_memory, memory_external)) else: lines.append("<memory snapshot='%s'/>\n" % snapshot_memory) # Add all disks into xml file. disks = vm.get_disk_devices().values() lines.append("<disks>\n") for disk in disks: lines.append("<disk name='%s' snapshot='%s'>\n" % (disk['source'], snapshot_disk)) if snapshot_disk == "external": disk_external = os.path.join( tmp_dir, "%s.snap" % os.path.basename(disk['source'])) snapshot_external_disk.append(disk_external) lines.append("<source file='%s'/>\n" % disk_external) lines.append("</disk>\n") lines.append("</disks>\n") lines.append("</domainsnapshot>") snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() snapshot_result = virsh.snapshot_create( vm_name, ("--xmlfile %s" % snapshot_xml_path)) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail( "Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) else: options = "" snapshot_result = virsh.snapshot_create(vm_name, options) if snapshot_result.exit_status: if status_error: return else: raise error.TestFail( "Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) if snapshot_current: lines = [ "<domainsnapshot>\n", "<description>Snapshot Test</description>\n", "<state>running</state>\n", "<creationTime>%s</creationTime>" % snapshot_name, "</domainsnapshot>" ] snapshot_xml_path = "%s/snapshot_xml" % tmp_dir snapshot_xml_file = open(snapshot_xml_path, "w") snapshot_xml_file.writelines(lines) snapshot_xml_file.close() options += "--redefine %s --current" % snapshot_xml_path if snapshot_result.exit_status: raise error.TestFail("Failed to create snapshot --current." "Error:%s." % snapshot_result.stderr.strip()) if status_error: raise error.TestFail( "Success to create snapshot in negative case\n" "Detail: %s" % snapshot_result) # Touch a file in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() # Init a unique name for tmp_file. tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") tmp_file_path = tmp_file.name tmp_file.close() status, output = session.cmd_status_output("touch %s" % tmp_file_path) if status: raise error.TestFail("Touch file in vm failed. %s" % output) session.close() # Destroy vm for snapshot revert. virsh.destroy(vm_name) # Revert snapshot. revert_options = "" if snapshot_revert_paused: revert_options += " --paused" revert_result = virsh.snapshot_revert(vm_name, snapshot_name, revert_options) if revert_result.exit_status: raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip()) if vm.is_dead(): raise error.TestFail("Revert snapshot failed.") if snapshot_revert_paused: if vm.is_paused(): vm.resume() else: raise error.TestFail( "Revert command successed, but VM is not " "paused after reverting with --paused option.") # login vm. session = vm.wait_for_login() # Check the result of revert. status, output = session.cmd_status_output("cat %s" % tmp_file_path) if not status: raise error.TestFail("Tmp file exists, revert failed.") # Close the session. session.close() finally: virsh.detach_disk(vm_name, target="vdf", extra="--persistent") image.remove() if snapshot_name: virsh.snapshot_delete(vm_name, snapshot_name, "--metadata") for disk in snapshot_external_disk: if os.path.exists(disk): os.remove(disk)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Global variable to store max/current memory, # it may change after attach/detach new_max_mem = None new_cur_mem = None def get_vm_memtotal(session): """ Get guest total memory """ proc_meminfo = session.cmd_output("cat /proc/meminfo") # verify format and units are expected return int( re.search(r'MemTotal:\s+(\d+)\s+[kK]B', proc_meminfo).group(1)) def consume_vm_mem(size=1000, timeout=360): """ To consume guest memory, default size is 1000M """ session = vm.wait_for_login() # Mount tmpfs on /mnt and write to a file on it, # it is the memory operation sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs " "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M" " count={0}".format(size)) session.cmd(sh_cmd, timeout=timeout) session.close() def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if max_mem_rt: cmd += (" | grep 'slots=%s,maxmem=%sk'" % (max_mem_slots, max_mem_rt)) if tg_size: size = int(tg_size) * 1024 cmd_str = 'memdimm.\|memory-backend-ram,id=ram-node.' cmd += (" | grep 'memory-backend-ram,id=%s' | grep 'size=%s" % (cmd_str, size)) if pg_size: cmd += ",host-nodes=%s" % node_mask if numa_memnode: for node in numa_memnode: if ('nodeset' in node and node['nodeset'] in node_mask): cmd += ",policy=%s" % node['mode'] cmd += ".*pc-dimm,node=%s" % tg_node if mem_addr: cmd += (".*slot=%s,addr=%s" % (mem_addr['slot'], int(mem_addr['base'], 16))) cmd += "'" # Run the command process.run(cmd, shell=True) def check_guest_meminfo(old_mem, check_option): """ Check meminfo on guest. """ assert old_mem is not None session = vm.wait_for_login() # Hot-plugged memory should be online by udev rules udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules" udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",' ' ATTR{state}=="offline", ATTR{state}="online"') cmd = ("grep memory %s || echo '%s' >> %s" % (udev_file, udev_rules, udev_file)) session.cmd(cmd) # Wait a while for new memory to be detected. utils_misc.wait_for(lambda: get_vm_memtotal(session) != int(old_mem), 20, first=15.0) new_mem = get_vm_memtotal(session) session.close() logging.debug("Memtotal on guest: %s", new_mem) no_of_times = 1 if at_times: no_of_times = at_times if check_option == "attach": if new_mem != int(old_mem) + (int(tg_size) * no_of_times): test.fail("Total memory on guest couldn't changed after " "attach memory device") if check_option == "detach": if new_mem != int(old_mem) - (int(tg_size) * no_of_times): test.fail("Total memory on guest couldn't changed after " "detach memory device") def check_dom_xml(at_mem=False, dt_mem=False): """ Check domain xml options. """ # Global variable to store max/current memory global new_max_mem global new_cur_mem if attach_option.count("config"): dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) else: dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) try: xml_max_mem_rt = int(dom_xml.max_mem_rt) xml_max_mem = int(dom_xml.max_mem) xml_cur_mem = int(dom_xml.current_mem) assert int(max_mem_rt) == xml_max_mem_rt # Check attached/detached memory if at_mem: if at_times: assert int(max_mem) + (int(tg_size) * at_times) == xml_max_mem else: assert int(max_mem) + int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory if at_times: assert int(cur_mem) + (int(tg_size) * at_times) == xml_cur_mem else: assert int(cur_mem) + int(tg_size) == xml_cur_mem new_max_mem = xml_max_mem new_cur_mem = xml_cur_mem mem_dev = dom_xml.get_devices("memory") memory_devices = 1 if at_times: memory_devices = at_times if len(mem_dev) != memory_devices: test.fail("Found wrong number of memory device") assert int(tg_size) == int(mem_dev[0].target.size) assert int(tg_node) == int(mem_dev[0].target.node) elif dt_mem: if at_times: assert int(new_max_mem) - (int(tg_size) * at_times) == xml_max_mem assert int(new_cur_mem) - (int(tg_size) * at_times) == xml_cur_mem else: assert int(new_max_mem) - int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory assert int(new_cur_mem) - int(tg_size) == xml_cur_mem except AssertionError: utils_misc.log_last_traceback() test.fail("Found unmatched memory setting from domain xml") def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def create_mem_xml(): """ Create memory device xml. """ mem_xml = memory.Memory() mem_model = params.get("mem_model", "dimm") mem_xml.mem_model = mem_model if tg_size: tg_xml = memory.Memory.Target() tg_xml.size = int(tg_size) tg_xml.size_unit = tg_sizeunit # There is support for non-numa node if numa_cells: tg_xml.node = int(tg_node) mem_xml.target = tg_xml if pg_size: src_xml = memory.Memory.Source() src_xml.pagesize = int(pg_size) src_xml.pagesize_unit = pg_unit src_xml.nodemask = node_mask mem_xml.source = src_xml if mem_addr: mem_xml.address = mem_xml.new_mem_address(**{"attrs": mem_addr}) logging.debug("Memory device xml: %s", mem_xml) return mem_xml.copy() def add_device(dev_xml, at_error=False): """ Add memory device by attachment or modify domain xml. """ if attach_device: ret = virsh.attach_device(vm_name, dev_xml.xml, flagstr=attach_option) libvirt.check_exit_status(ret, at_error) else: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if numa_cells: del vmxml.max_mem del vmxml.current_mem vmxml.add_device(dev_xml) vmxml.sync() def modify_domain_xml(): """ Modify domain xml and define it. """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) mem_unit = params.get("mem_unit", "KiB") vcpu = params.get("vcpu", "4") if max_mem_rt: vmxml.max_mem_rt = int(max_mem_rt) vmxml.max_mem_rt_slots = max_mem_slots vmxml.max_mem_rt_unit = mem_unit if vcpu: vmxml.vcpu = int(vcpu) vcpu_placement = params.get("vcpu_placement", "static") vmxml.placement = vcpu_placement if numa_memnode: vmxml.numa_memory = {} vmxml.numa_memnode = numa_memnode else: try: del vmxml.numa_memory del vmxml.numa_memnode except Exception: # Not exists pass if numa_cells: cells = [ast.literal_eval(x) for x in numa_cells] # Rounding the numa memory values if align_mem_values: for cell in range(cells.__len__()): memory_value = str( utils_numeric.align_value(cells[cell]["memory"], align_to_value)) cells[cell]["memory"] = memory_value cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu><numa/></cpu>" cpu_mode = params.get("cpu_mode") model_fallback = params.get("model_fallback") if cpu_mode: cpu_xml.mode = cpu_mode if model_fallback: cpu_xml.fallback = model_fallback cpu_xml.numa_cell = cells vmxml.cpu = cpu_xml # Delete memory and currentMemory tag, # libvirt will fill it automatically del vmxml.max_mem del vmxml.current_mem # hugepages setting if huge_pages: membacking = vm_xml.VMMemBackingXML() hugepages = vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(huge_pages)): pagexml = hugepages.PageXML() pagexml.update(huge_pages[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug("vm xml: %s", vmxml) vmxml.sync() pre_vm_state = params.get("pre_vm_state", "running") attach_device = "yes" == params.get("attach_device", "no") detach_device = "yes" == params.get("detach_device", "no") attach_error = "yes" == params.get("attach_error", "no") start_error = "yes" == params.get("start_error", "no") detach_error = "yes" == params.get("detach_error", "no") maxmem_error = "yes" == params.get("maxmem_error", "no") attach_option = params.get("attach_option", "") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_managedsave = "yes" == params.get("test_managedsave", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_mem_binding = "yes" == params.get("test_mem_binding", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") add_mem_device = "yes" == params.get("add_mem_device", "no") test_dom_xml = "yes" == params.get("test_dom_xml", "no") max_mem = params.get("max_mem") max_mem_rt = params.get("max_mem_rt") max_mem_slots = params.get("max_mem_slots", "16") cur_mem = params.get("current_mem") numa_cells = params.get("numa_cells", "").split() set_max_mem = params.get("set_max_mem") align_mem_values = "yes" == params.get("align_mem_values", "no") align_to_value = int(params.get("align_to_value", "65536")) known_unplug_errors = [] known_unplug_errors.append(params.get("known_unplug_errors")) # params for attached device tg_size = params.get("tg_size") tg_sizeunit = params.get("tg_sizeunit", 'KiB') tg_node = params.get("tg_node", 0) pg_size = params.get("page_size") pg_unit = params.get("page_unit", "KiB") node_mask = params.get("node_mask", "0") mem_addr = ast.literal_eval(params.get("memory_addr", "{}")) huge_pages = [ ast.literal_eval(x) for x in params.get("huge_pages", "").split() ] numa_memnode = [ ast.literal_eval(x) for x in params.get("numa_memnode", "").split() ] at_times = int(params.get("attach_times", 1)) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not libvirt_version.version_compare(1, 2, 14): test.cancel("Memory hotplug not supported in current libvirt version.") if align_mem_values: # Rounding the following values to 'align' max_mem = utils_numeric.align_value(max_mem, align_to_value) max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value) cur_mem = utils_numeric.align_value(cur_mem, align_to_value) tg_size = utils_numeric.align_value(tg_size, align_to_value) try: # Drop caches first for host has enough memory drop_caches() # Destroy domain first if vm.is_alive(): vm.destroy(gracefully=False) modify_domain_xml() # Start the domain any way if attach memory device old_mem_total = None if attach_device: vm.start() session = vm.wait_for_login() old_mem_total = get_vm_memtotal(session) logging.debug("Memtotal on guest: %s", old_mem_total) session.close() dev_xml = None # To attach the memory device. if add_mem_device: at_times = int(params.get("attach_times", 1)) dev_xml = create_mem_xml() for x in xrange(at_times): # If any error excepted, command error status should be # checked in the last time if x == at_times - 1: add_device(dev_xml, attach_error) else: add_device(dev_xml) # Check domain xml after attach device. if test_dom_xml: check_dom_xml(at_mem=attach_device) # Set domain state if pre_vm_state == "transient": logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Cann't create the domain") elif vm.is_dead(): try: vm.start() vm.wait_for_login().close() except virt_vm.VMStartError as detail: if start_error: pass else: test.fail(detail) # Set memory operation if set_max_mem: max_mem_option = params.get("max_mem_option", "") ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option) libvirt.check_exit_status(ret, maxmem_error) # Check domain xml after start the domain. if test_dom_xml: check_dom_xml(at_mem=attach_device) # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check guest meminfo after attachment if (attach_device and not attach_option.count("config") and not any([attach_error, start_error])): check_guest_meminfo(old_mem_total, check_option="attach") # Consuming memory on guest, # to verify memory changes by numastat if test_mem_binding: pid = vm.get_pid() old_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", old_numastat) consume_vm_mem() new_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", new_numastat) # Only check total memory which is the last element if float(new_numastat[-1]) - float(old_numastat[-1]) < 0: test.fail("Numa memory can't be consumed on guest") # Run managedsave command to check domain xml. if test_managedsave: ret = virsh.managedsave(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) vm.start() vm.wait_for_login().close() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Run save and restore command to check domain xml if test_save_restore: check_save_restore() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Check domain xml after restarting libvirtd if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Detach the memory device unplug_failed_with_known_error = False if detach_device: if not dev_xml: dev_xml = create_mem_xml() for x in xrange(at_times): ret = virsh.detach_device(vm_name, dev_xml.xml, flagstr=attach_option) try: libvirt.check_exit_status(ret, detach_error) except Exception as detail: dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) try: session = vm.wait_for_login() utils_misc.verify_dmesg(dmesg_log_file=dmesg_file, ignore_result=True, session=session, level_check=5) except Exception: session.close() test.fail("After memory unplug Unable to connect to VM" " or unable to collect dmesg") session.close() if os.path.exists(dmesg_file): with open(dmesg_file, 'r') as f: flag = re.findall( r'memory memory\d+?: Offline failed', f.read()) if not flag: # The attached memory is used by vm, and it could not be unplugged # The result is expected os.remove(dmesg_file) test.fail(detail) unplug_failed_with_known_error = True os.remove(dmesg_file) # Check whether a known error occured or not dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) try: session = vm.wait_for_login() utils_misc.verify_dmesg(dmesg_log_file=dmesg_file, ignore_result=True, session=session, level_check=4) except Exception: session.close() test.fail("After memory unplug Unable to connect to VM" " or unable to collect dmesg") session.close() if known_unplug_errors and os.path.exists(dmesg_file): for known_error in known_unplug_errors: if (known_error[0] == known_error[-1]) and \ known_error.startswith(("'")): known_error = known_error[1:-1] with open(dmesg_file, 'r') as f: if known_error in f.read(): unplug_failed_with_known_error = True logging.debug( "Known error occured, while hot unplug" ": %s", known_error) if test_dom_xml and not unplug_failed_with_known_error: check_dom_xml(dt_mem=detach_device) # Remove dmesg temp file if os.path.exists(dmesg_file): os.remove(dmesg_file) finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync()
os.remove(disks_xml[i].xml) libvirt.check_exit_status(ret) # Check disks in VM after hotunplug. if check_patitions_hotunplug: if not check_vm_partitions(devices, device_targets, False): raise error.TestFail("See device in VM after hotunplug") finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snapshot in snapshot_lists: virsh.snapshot_delete(vm_name, snapshot, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") virsh.undefine(vm_name) virsh.define(vm_xml_file) os.remove(vm_xml_file) # Restore qemu_config file. qemu_config.restore() utils_libvirtd.libvirtd_restart() for img in disks_img: os.remove(img["source"])
def run(test, params, env): """ Test nbd disk option. 1.Prepare backend storage 2.Use nbd to export the backend storage with or without TLS 3.Prepare a disk xml indicating to the backend storage 4.Start VM with disk hotplug/coldplug 5.Start snapshot or save/restore operations on ndb disk 6.Check some behaviours on VM 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': False} def check_disk_save_restore(save_file): """ Check domain save and restore operation. :param save_file: the path to saved file """ # Save the domain. ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Restore the domain. ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) def check_snapshot(): """ Check domain snapshot operations. """ # Cleaup dirty data if exists if os.path.exists(snapshot_name1_file): os.remove(snapshot_name1_file) if os.path.exists(snapshot_name2_mem_file): os.remove(snapshot_name2_mem_file) if os.path.exists(snapshot_name2_disk_file): os.remove(snapshot_name2_disk_file) device_target = 'vda' snapshot_name1_option = "--diskspec %s,file=%s,snapshot=external --disk-only --atomic" % ( device_target, snapshot_name1_file) ret = virsh.snapshot_create_as(vm_name, "%s %s" % (snapshot_name1, snapshot_name1_option), debug=True) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name, debug=True) if snapshot_name1 not in snap_lists: test.fail("Snapshot %s doesn't exist" % snapshot_name1) # Check file can be created after snapshot def _check_file_create(filename): """ Check whether file with specified filename exists or not. :param filename: finename """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) cmd = ("echo" " teststring > /tmp/{0}".format(filename)) status, output = session.cmd_status_output(cmd) if status != 0: test.fail("Failed to touch one file on VM internal") except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) raise finally: if session: session.close() _check_file_create("disk.txt") # Create memory snapshot. snapshot_name2_mem_option = "--memspec file=%s,snapshot=external" % ( snapshot_name2_mem_file) snapshot_name2_disk_option = "--diskspec %s,file=%s,snapshot=external --atomic" % ( device_target, snapshot_name2_disk_file) snapshot_name2_option = "%s %s" % (snapshot_name2_mem_option, snapshot_name2_disk_option) ret = virsh.snapshot_create_as(vm_name, "%s %s" % (snapshot_name2, snapshot_name2_option), debug=True) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name, debug=True) if snapshot_name2 not in snap_lists: test.fail("Snapshot: %s doesn't exist" % snapshot_name2) _check_file_create("mem.txt") def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test".format( added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdb") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") image_path = params.get("emulated_image") # Get config parameters status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") tls_enabled = "yes" == params.get("enable_tls", "no") enable_private_key_encryption = "yes" == params.get( "enable_private_key_encryption", "no") private_key_encrypt_passphrase = params.get("private_key_password") domain_operation = params.get("domain_operation") secret_uuid = None # Get snapshot attributes. snapshot_name1 = params.get("snapshot_name1") snapshot_name1_file = params.get("snapshot_name1_file") snapshot_name2 = params.get("snapshot_name2") snapshot_name2_mem_file = params.get("snapshot_name2_mem_file") snapshot_name2_disk_file = params.get("snapshot_name2_disk_file") # Initialize one NbdExport object nbd = None # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) # Get server hostname. hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip() # Setup backend storage nbd_server_host = hostname nbd_server_port = params.get("nbd_server_port") image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img") export_name = params.get("export_name", None) deleteExisted = "yes" == params.get("deleteExisted", "yes") tls_bit = "no" if tls_enabled: tls_bit = "yes" # Create secret if enable_private_key_encryption: # this feature is enabled after libvirt 6.6.0 if not libvirt_version.version_compare(6, 6, 0): test.cancel( "current libvirt version doesn't support client private key encryption" ) utils_secret.clean_up_secrets() private_key_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", private_key_sec_uuid) private_key_sec_passwd = params.get("private_key_password", "redhat") ret = virsh.secret_set_value(private_key_sec_uuid, private_key_sec_passwd, encode=True, use_file=True, debug=True) libvirt.check_exit_status(ret) secret_uuid = private_key_sec_uuid # Initialize special test environment config for snapshot operations. if domain_operation == "snap_shot": first_disk = vm.get_first_disk_devices() image_path = first_disk['source'] device_target = 'vda' # Remove previous xml disks = vmxml.get_devices(device_type="disk") for disk_ in disks: if disk_.target['dev'] == device_target: vmxml.del_device(disk_) break # Create NbdExport object nbd = NbdExport( image_path, image_format=device_format, port=nbd_server_port, export_name=export_name, tls=tls_enabled, deleteExisted=deleteExisted, private_key_encrypt_passphrase=private_key_encrypt_passphrase, secret_uuid=secret_uuid) nbd.start_nbd_server() # Prepare disk source xml source_attrs_dict = {"protocol": "nbd", "tls": "%s" % tls_bit} if export_name: source_attrs_dict.update({"name": "%s" % export_name}) disk_src_dict = {} disk_src_dict.update({"attrs": source_attrs_dict}) disk_src_dict.update( {"hosts": [{ "name": nbd_server_host, "port": nbd_server_port }]}) # Add disk xml. disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": 'raw'} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) try: vmxml.sync() vm.start() vm.wait_for_login() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % str(xml_error)) except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) # Hotplug disk. if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) # Check save and restore operation and its result if domain_operation == 'save_restore': save_file = "/tmp/%s.save" % vm_name check_disk_save_restore(save_file) # Check attached nbd disk if check_partitions and not status_error: logging.debug("wait seconds for starting in checking vm part") time.sleep(2) if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") # Check snapshot operation and its result if domain_operation == 'snap_shot': check_snapshot() # Unplug disk. if hotplug_disk: result = virsh.detach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True, wait_for_event=True) libvirt.check_exit_status(result, status_error) finally: if enable_private_key_encryption: utils_secret.clean_up_secrets() # Clean up backend storage and TLS try: if nbd: nbd.cleanup() # Clean up snapshots if exist if domain_operation == 'snap_shot': snap_lists = virsh.snapshot_list(vm_name, debug=True) for snap_name in snap_lists: virsh.snapshot_delete(vm_name, snap_name, "--metadata", debug=True, ignore_status=True) # Cleaup dirty data if exists if os.path.exists(snapshot_name1_file): os.remove(snapshot_name1_file) if os.path.exists(snapshot_name2_mem_file): os.remove(snapshot_name2_mem_file) if os.path.exists(snapshot_name2_disk_file): os.remove(snapshot_name2_disk_file) except Exception as ndbEx: logging.info("Clean Up nbd failed: %s" % str(ndbEx)) # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata")
def run(test, params, env): """ Test for virt-xml-validate """ # Get the full path of virt-xml-validate command. try: VIRT_XML_VALIDATE = os_dep.command("virt-xml-validate") except ValueError: raise error.TestNAError("Not find virt-xml-validate command on host.") vm_name = params.get("main_vm", "virt-tests-vm1") net_name = params.get("net_dumpxml_name", "default") pool_name = params.get("pool_dumpxml_name", "default") schema = params.get("schema", "domain") output = params.get("output_file", "output") output_path = os.path.join(data_dir.get_tmp_dir(), output) valid_schemas = [ "domain", "domainsnapshot", "network", "storagepool", "storagevol", "nodedev", "capability", "nwfilter", "secret", "interface", ] if schema not in valid_schemas: raise error.TestFail("invalid %s specified" % schema) virsh_dargs = {"ignore_status": True, "debug": True} if schema == "domainsnapshot": domainsnapshot_validate(vm_name, file=output_path, **virsh_dargs) elif schema == "network": network_validate(net_name, file=output_path, **virsh_dargs) elif schema == "storagepool": storagepool_validate(pool_name, file=output_path, **virsh_dargs) elif schema == "storagevol": storagevol_validate(pool_name, file=output_path, **virsh_dargs) elif schema == "nodedev": nodedev_validate(file=output_path, **virsh_dargs) elif schema == "capability": capability_validate(file=output_path, **virsh_dargs) elif schema == "nwfilter": nwfilter_validate(file=output_path, **virsh_dargs) elif schema == "secret": secret_validate(file=output_path, **virsh_dargs) elif schema == "interface": interface_validate(file=output_path, **virsh_dargs) else: # domain virsh.dumpxml(vm_name, to_file=output_path) cmd = "%s %s %s" % (VIRT_XML_VALIDATE, output_path, schema) cmd_result = utils.run(cmd, ignore_status=True) # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snapshot in snapshot_lists: virsh.snapshot_delete(vm_name, snapshot, "--metadata") if cmd_result.exit_status: raise error.TestFail("virt-xml-validate command failed.\n" "Detail: %s." % cmd_result) if cmd_result.stdout.count("fail"): raise error.TestFail("xml fails to validate\n" "Detail: %s." % cmd_result)
def run(test, params, env): """ 1. prepare a fc lun with one of following methods - create a scsi pool&vol - create a vhba 2. prepare the virtual disk xml, as one of following - source = /dev/disk/by-path - source = /dev/mapper/mpathX - source = pool&vol format 3. start a vm with above disk as vdb 4. create disk-only snapshot of vdb 5. check the snapshot-list and snapshot file's existence 6. mount vdb and touch file to it 7. revert the snapshot and check file's existence 8. delete snapshot 9. cleanup env. """ vm_name = params.get("main_vm", "avocado-vt-vm1") wwpn = params.get("wwpn", "WWPN_EXAMPLE") wwnn = params.get("wwnn", "WWNN_EXAMPLE") disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "file") disk_size = params.get("disk_size", "100M") device_target = params.get("device_target", "vdb") driver_name = params.get("driver_name", "qemu") driver_type = params.get("driver_type", "raw") target_bus = params.get("target_bus", "virtio") vd_format = params.get("vd_format", "") snapshot_dir = params.get("snapshot_dir", "/tmp") snapshot_name = params.get("snapshot_name", "s1") pool_name = params.get("pool_name", "") pool_target = params.get("pool_target", "/dev") snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no") new_vhbas = [] current_vhbas = [] new_vhba = [] path_to_blk = "" lun_sl = [] new_disk = "" pool_ins = None old_mpath_conf = "" mpath_conf_path = "/etc/multipath.conf" original_mpath_conf_exist = os.path.exists(mpath_conf_path) vm = env.get_vm(vm_name) online_hbas = utils_npiv.find_hbas("hba") if not online_hbas: raise exceptions.TestSkipError("There is no online hba cards.") old_mpath_conf = utils_npiv.prepare_multipath_conf( conf_path=mpath_conf_path, replace_existing=True) first_online_hba = online_hbas[0] old_vhbas = utils_npiv.find_hbas("vhba") if vm.is_dead(): vm.start() session = vm.wait_for_login() virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) old_disks = virt_vm.get_disks() if vm.is_alive(): vm.destroy(gracefully=False) if pool_name: pool_ins = libvirt_storage.StoragePool() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() try: # prepare a fc lun if vd_format in ['scsi_vol']: if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba, pool_target) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT) if not utils_npiv.is_vhbas_added(old_vhbas): raise exceptions.TestFail("vHBA not successfully created") current_vhbas = utils_npiv.find_hbas("vhba") new_vhba = list(set(current_vhbas).difference(set(old_vhbas)))[0] new_vhbas.append(new_vhba) new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail( "block device not found with scsi_%s", new_vhba_scsibus) vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_TIMEOUT * 3) path_to_blk = list(vol_list.values())[0] elif vd_format in ['mpath', 'by_path']: old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": first_online_hba, "scsi_wwnn": wwnn, "scsi_wwpn": wwpn }) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT * 2) if not new_vhba: raise exceptions.TestFail("vHBA not successfully generated.") new_vhbas.append(new_vhba) if vd_format == "mpath": utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_TIMEOUT * 5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list( set(cur_mpath_devs).difference(set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) path_to_blk = "/dev/mapper/" + new_mpath_devs[0] elif vd_format == "by_path": new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("blk dev not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail( "lun symbolic links not found in " "/dev/disk/by-path/ for %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) else: pass else: raise exceptions.TestSkipError("Not provided how to pass" "virtual disk to VM.") # create qcow2 file on the block device with specified size if path_to_blk: cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size) try: process.run(cmd, shell=True) except process.cmdError as detail: raise exceptions.TestFail( "Fail to create qcow2 on blk dev: %s", detail) else: raise exceptions.TestFail("Don't have a valid path to blk dev.") # prepare disk xml if "vol" in vd_format: vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_TIMEOUT * 3) test_vol = list(vol_list.keys())[0] disk_params = { 'type_name': disk_type, 'target_dev': device_target, 'target_bus': target_bus, 'source_pool': pool_name, 'source_volume': test_vol, 'driver_type': driver_type } else: disk_params = { 'type_name': disk_type, 'device': disk_device, 'driver_name': driver_name, 'driver_type': driver_type, 'source_file': path_to_blk, 'target_dev': device_target, 'target_bus': target_bus } if vm.is_alive(): vm.destroy(gracefully=False) new_disk = disk.Disk() new_disk.xml = open(utlv.create_disk_xml(disk_params)).read() # start vm with the virtual disk vmxml.devices = vmxml.devices.append(new_disk) vmxml.sync() vm.start() session = vm.wait_for_login() cur_disks = virt_vm.get_disks() mount_disk = "".join(list(set(old_disks) ^ set(cur_disks))) # mkfs and mount disk in vm, create a file on that disk. if not mount_disk: logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks) raise exceptions.TestFail("No new disk found in vm.") mkfs_and_mount(session, mount_disk) create_file_in_vm(session, "/mnt/before_snapshot.txt", "before") # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) options = "%s --disk-only" % snapshot_name for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name if os.path.exists(snapshot_file): os.remove(snapshot_file) options = options + " --diskspec %s,file=%s" % (vm_blk, snapshot_file) else: options = snapshot_name utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options)) # check virsh snapshot-list logging.debug("Running: snapshot-list %s", vm_name) snapshot_list = virsh.snapshot_list(vm_name) logging.debug("snapshot list is: %s", snapshot_list) if not snapshot_list: raise exceptions.TestFail("snapshots not found after creation.") # snapshot-revert doesn't support external snapshot for now. so # only check this with internal snapshot. if not snapshot_disk_only: create_file_in_vm(session, "/mnt/after_snapshot.txt", "after") logging.debug("Running: snapshot-revert %s %s", vm_name, snapshot_name) utlv.check_exit_status( virsh.snapshot_revert(vm_name, snapshot_name)) session = vm.wait_for_login() file_existence, file_content = get_file_in_vm( session, "/mnt/after_snapshot.txt") logging.debug("file exist = %s, file content = %s", file_existence, file_content) if file_existence: raise exceptions.TestFail("The file created " "after snapshot still exists.") file_existence, file_content = get_file_in_vm( session, "/mnt/before_snapshot.txt") logging.debug("file eixst = %s, file content = %s", file_existence, file_content) if ((not file_existence) or (file_content.strip() != "before")): raise exceptions.TestFail("The file created " "before snapshot is lost.") # delete snapshots # if diskonly, delete --metadata and remove files # if not diskonly, delete snapshot if snapshot_disk_only: options = "--metadata" else: options = "" for snap in snapshot_list: logging.debug("deleting snapshot %s with options %s", snap, options) result = virsh.snapshot_delete(vm_name, snap, options) logging.debug("result of snapshot-delete: %s", result.stdout.strip()) if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap if os.path.exists(snapshot_file): os.remove(snapshot_file) snapshot_list = virsh.snapshot_list(vm_name) if snapshot_list: raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list) except Exception as detail: raise exceptions.TestFail("exception happens: %s", detail) finally: logging.debug("Start to clean up env...") vmxml_backup.sync() if pool_ins and pool_ins.pool_exists(pool_name): virsh.pool_destroy(pool_name) for new_vhba in new_vhbas: virsh.nodedev_destroy(new_vhba) utils_npiv.restart_multipathd() if old_mpath_conf: utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, conf_content=old_mpath_conf, replace_existing=True) if not original_mpath_conf_exist and os.path.exists(mpath_conf_path): os.remove(mpath_conf_path)
def run(test, params, env): """ Test memory management of nvdimm """ vm_name = params.get('main_vm') nvdimm_file = params.get('nvdimm_file') check = params.get('check', '') status_error = "yes" == params.get('status_error', 'no') error_msg = params.get('error_msg', '') qemu_checks = params.get('qemu_checks', '').split('`') test_str = 'This is a test' def check_boot_config(session): """ Check /boot/config-$KVER file """ check_list = [ 'CONFIG_LIBNVDIMM=m', 'CONFIG_BLK_DEV_PMEM=m', 'CONFIG_ACPI_NFIT=m' ] current_boot = session.cmd('uname -r').strip() content = session.cmd('cat /boot/config-%s' % current_boot).strip() for item in check_list: if item in content: logging.info(item) else: logging.error(item) test.fail('/boot/config content not correct') def check_file_in_vm(session, path, expect=True): """ Check whether the existance of file meets expectation """ exist = session.cmd_status('ls %s' % path) logging.debug(exist) exist = True if exist == 0 else False status = '' if exist else 'NOT' logging.info('File %s does %s exist', path, status) if exist != expect: err_msg = 'Existance doesn\'t meet expectation: %s ' % path if expect: err_msg += 'should exist.' else: err_msg += 'should not exist' test.fail(err_msg) def create_cpuxml(): """ Create cpu xml for test """ cpu_params = { k: v for k, v in params.items() if k.startswith('cpuxml_') } logging.debug(cpu_params) cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu><numa/></cpu>" for attr_key in cpu_params: val = cpu_params[attr_key] logging.debug('Set cpu params') setattr(cpu_xml, attr_key.replace('cpuxml_', ''), eval(val) if ':' in val else val) logging.debug(cpu_xml) return cpu_xml.copy() def create_nvdimm_xml(**mem_param): """ Create xml of nvdimm memory device """ mem_xml = utils_hotplug.create_mem_xml( tg_size=mem_param['target_size'], mem_addr={'slot': mem_param['address_slot']}, tg_sizeunit=mem_param['target_size_unit'], tg_node=mem_param['target_node'], mem_discard=mem_param.get('discard'), mem_model="nvdimm", lb_size=mem_param.get('label_size'), lb_sizeunit=mem_param.get('label_size_unit'), mem_access=mem_param['mem_access']) source_xml = memory.Memory.Source() source_xml.path = mem_param['source_path'] mem_xml.source = source_xml logging.debug(mem_xml) return mem_xml.copy() def check_nvdimm_file(file_name): """ check if the file exists in nvdimm memory device :param file_name: the file name in nvdimm device """ vm_session = vm.wait_for_login() if test_str not in vm_session.cmd('cat /mnt/%s ' % file_name): test.fail('"%s" should be in output' % test_str) bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if 'ppc64le' in platform.machine().lower(): if not libvirt_version.version_compare(6, 2, 0): test.cancel('Libvirt version should be > 6.2.0' ' to support nvdimm on pseries') try: vm = env.get_vm(vm_name) # Create nvdimm file on the host process.run('truncate -s 512M %s' % nvdimm_file, verbose=True) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Set cpu according to params cpu_xml = create_cpuxml() vmxml.cpu = cpu_xml # Update other vcpu, memory info according to params update_vm_args = { k: params[k] for k in params if k.startswith('setvm_') } logging.debug(update_vm_args) for key, value in list(update_vm_args.items()): attr = key.replace('setvm_', '') logging.debug('Set %s = %s', attr, value) setattr(vmxml, attr, int(value) if value.isdigit() else value) logging.debug(virsh.dumpxml(vm_name)) # Add an nvdimm mem device to vm xml nvdimm_params = { k.replace('nvdimmxml_', ''): v for k, v in params.items() if k.startswith('nvdimmxml_') } nvdimm_xml = create_nvdimm_xml(**nvdimm_params) vmxml.add_device(nvdimm_xml) if check in ['ppc_no_label', 'discard']: result = virsh.define(vmxml.xml, debug=True) libvirt.check_result(result, expected_fails=[error_msg]) return vmxml.sync() logging.debug(virsh.dumpxml(vm_name)) virsh.start(vm_name, debug=True, ignore_status=False) # Check qemu command line one by one map(libvirt.check_qemu_cmd_line, qemu_checks) alive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) # Check if the guest support NVDIMM: # check /boot/config-$KVER file vm_session = vm.wait_for_login() check_boot_config(vm_session) # check /dev/pmem0 existed inside guest check_file_in_vm(vm_session, '/dev/pmem0') if check == 'back_file': # Create a file system on /dev/pmem0 if platform.platform().count('el8'): vm_session.cmd('mkfs.xfs -f /dev/pmem0 -m reflink=0') else: vm_session.cmd('mkfs.xfs -f /dev/pmem0') vm_session.cmd('mount -o dax /dev/pmem0 /mnt') vm_session.cmd('echo \"%s\" >/mnt/foo' % test_str) vm_session.cmd('umount /mnt') vm_session.close() # Shutdown the guest, then start it, remount /dev/pmem0, # check if the test file is still on the file system vm.destroy() vm.start() vm_session = vm.wait_for_login() vm_session.cmd('mount -o dax /dev/pmem0 /mnt') if test_str not in vm_session.cmd('cat /mnt/foo'): test.fail('\"%s\" should be in /mnt/foo' % test_str) # From the host, check the file has changed: host_output = process.run('hexdump -C /tmp/nvdimm', shell=True, verbose=True).stdout_text if test_str not in host_output: test.fail('\"%s\" should be in output' % test_str) # Shutdown the guest, and edit the xml, # include: access='private' vm_session.close() vm.destroy() vm_devices = vmxml.devices nvdimm_device = vm_devices.by_device_tag('memory')[0] nvdimm_index = vm_devices.index(nvdimm_device) vm_devices[nvdimm_index].mem_access = 'private' vmxml.devices = vm_devices vmxml.sync() # Login to the guest, mount the /dev/pmem0 and . # create a file: foo-private vm.start() vm_session = vm.wait_for_login() libvirt.check_qemu_cmd_line('mem-path=/tmp/nvdimm,share=no') private_str = 'This is a test for foo-private' vm_session.cmd('mount -o dax /dev/pmem0 /mnt/') file_private = 'foo-private' vm_session.cmd("echo '%s' >/mnt/%s" % (private_str, file_private)) if private_str not in vm_session.cmd('cat /mnt/%s' % file_private): test.fail('"%s" should be in output' % private_str) # Shutdown the guest, then start it, # check the file: foo-private is no longer existed vm_session.close() vm.destroy() vm.start() vm_session = vm.wait_for_login() vm_session.cmd('mount -o dax /dev/pmem0 /mnt/') if file_private in vm_session.cmd('ls /mnt/'): test.fail('%s should not exist, for it was ' 'created when access=private' % file_private) if check == 'label_back_file': # Create an xfs file system on /dev/pmem0 if platform.platform().count('el8'): vm_session.cmd( 'mkfs.xfs -f -b size=4096 /dev/pmem0 -m reflink=0') else: vm_session.cmd('mkfs.xfs -f -b size=4096 /dev/pmem0') # Mount the file system with DAX enabled for page cache bypass output = vm_session.cmd_output('mount -o dax /dev/pmem0 /mnt/') logging.info(output) # Create a file on the nvdimm device. test_str = 'This is a test with label' vm_session.cmd('echo "%s" >/mnt/foo-label' % test_str) if test_str not in vm_session.cmd('cat /mnt/foo-label '): test.fail('"%s" should be in the output of cat cmd' % test_str) # Reboot the guest, and remount the nvdimm device in the guest. # Check the file foo-label is exited vm_session.close() virsh.reboot(vm_name, debug=True) vm_session = vm.wait_for_login() vm_session.cmd('mount -o dax /dev/pmem0 /mnt') if test_str not in vm_session.cmd('cat /mnt/foo-label '): test.fail('"%s" should be in output' % test_str) if params.get('check_life_cycle', 'no') == 'yes': virsh.managedsave(vm_name, ignore_status=False, debug=True) vm.start() check_nvdimm_file('foo-label') vm_s1 = vm_name + ".s1" virsh.save(vm_name, vm_s1, ignore_status=False, debug=True) virsh.restore(vm_s1, ignore_status=False, debug=True) check_nvdimm_file('foo-label') virsh.snapshot_create_as(vm_name, vm_s1, ignore_status=False, debug=True) virsh.snapshot_revert(vm_name, vm_s1, ignore_status=False, debug=True) virsh.snapshot_delete(vm_name, vm_s1, ignore_status=False, debug=True) if check == 'hot_plug': # Create file for 2nd nvdimm device nvdimm_file_2 = params.get('nvdimm_file_2') process.run('truncate -s 512M %s' % nvdimm_file_2) # Add 2nd nvdimm device to vm xml nvdimm2_params = { k.replace('nvdimmxml2_', ''): v for k, v in params.items() if k.startswith('nvdimmxml2_') } nvdimm2_xml = create_nvdimm_xml(**nvdimm2_params) ori_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices( 'memory') logging.debug('Starts with %d memory devices', len(ori_devices)) result = virsh.attach_device(vm_name, nvdimm2_xml.xml, debug=True) libvirt.check_exit_status(result) # After attach, there should be an extra memory device devices_after_attach = vm_xml.VMXML.new_from_dumpxml( vm_name).get_devices('memory') logging.debug('After detach, vm has %d memory devices', len(devices_after_attach)) if len(ori_devices) != len(devices_after_attach) - 1: test.fail( 'Number of memory devices after attach is %d, should be %d' % (len(devices_after_attach), len(ori_devices) + 1)) time.sleep(5) check_file_in_vm(vm_session, '/dev/pmem1') nvdimm_detach = alive_vmxml.get_devices('memory')[-1] logging.debug(nvdimm_detach) # Hot-unplug nvdimm device result = virsh.detach_device(vm_name, nvdimm_detach.xml, debug=True) libvirt.check_exit_status(result) vm_session.close() vm_session = vm.wait_for_login() virsh.dumpxml(vm_name, debug=True) left_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices( 'memory') logging.debug(left_devices) if len(left_devices) != len(ori_devices): test.fail( 'Number of memory devices after detach is %d, should be %d' % (len(left_devices), len(ori_devices))) time.sleep(5) check_file_in_vm(vm_session, '/dev/pmem1', expect=False) finally: if vm.is_alive(): vm.destroy(gracefully=False) bkxml.sync() os.remove(nvdimm_file)
def run(test, params, env): """ Test DAC in adding nfs pool disk to VM. (1).Init variables for test. (2).Create nfs pool and vol. (3).Attach the nfs pool vol to VM. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing") # Get qemu.conf config variables qemu_user = params.get("qemu_user") qemu_group = params.get("qemu_group") dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about pool vol virt_use_nfs = params.get("virt_use_nfs", "off") nfs_server_dir = params.get("nfs_server_dir", "nfs-server") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") export_options = params.get("export_options", "rw,async,no_root_squash,fsid=0") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") bk_file_name = params.get("bk_file_name") # Get pool vol variables img_tup = ("img_user", "img_group", "img_mode") img_val = [] for i in img_tup: try: img_val.append(int(params.get(i))) except ValueError: raise error.TestNAError("%s value '%s' is not a number." % (i, params.get(i))) img_user, img_group, img_mode = img_val # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Backup domain disk label disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in disks.values(): disk_path = disk['source'] f = os.open(disk_path, 0) stat_re = os.fstat(f) backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) os.close(f) # Backup selinux status of host. backup_sestatus = utils_selinux.get_status() pvt = None snapshot_name = None disk_snap_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # chown domain disk to qemu:qemu to avoid fail on local disk for disk in disks.values(): disk_path = disk['source'] if qemu_user == "root": os.chown(disk_path, 0, 0) elif qemu_user == "qemu": os.chown(disk_path, 107, 107) # Set selinux of host. utils_selinux.set_status(host_sestatus) # set qemu conf qemu_conf.user = qemu_user qemu_conf.group = qemu_user if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() # Create dst pool for create attach vol img logging.debug("export_options is: %s" % export_options) pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], export_options=export_options) # set virt_use_nfs result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs) if result.exit_status: raise error.TestNAError("Failed to set virt_use_nfs value") # Init a QemuImg instance and create img on nfs server dir. params['image_name'] = vol_name tmp_dir = data_dir.get_tmp_dir() nfs_path = os.path.join(tmp_dir, nfs_server_dir) image = qemu_storage.QemuImg(params, nfs_path, vol_name) # Create a image. server_img_path, result = image.create(params) if params.get("image_name_backing_file"): params['image_name'] = bk_file_name params['has_backing_file'] = "yes" image = qemu_storage.QemuImg(params, nfs_path, bk_file_name) server_img_path, result = image.create(params) # Get vol img path vol_name = server_img_path.split('/')[-1] virsh.pool_refresh(pool_name, debug=True) cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() # Do the attach action. extra = "--persistent --subdriver qcow2" result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestFail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Change img ownership and mode on nfs server dir os.chown(server_img_path, img_user, img_group) os.chmod(server_img_path, img_mode) img_label_before = check_ownership(server_img_path) if img_label_before: logging.debug("attached image ownership on nfs server before " "start: %s" % img_label_before) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. img_label_after = check_ownership(server_img_path) if img_label_after: logging.debug("attached image ownership on nfs server after" " start: %s" % img_label_after) if status_error: raise error.TestFail('Test succeeded in negative case.') except virt_vm.VMStartError, e: # Starting VM failed. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) if params.get("image_name_backing_file"): options = "--disk-only" snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if not status_error: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_name: disks_snap = vm.get_disk_devices() for disk in disks_snap.values(): disk_snap_path.append(disk['source']) virsh.snapshot_delete(vm_name, snapshot_name, "--metadata", debug=True) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except error.CmdError: raise error.TestFail("Detach disk 'vdf' from VM %s failed." % vm.name)
def run(test, params, env): """ Test DAC in adding nfs pool disk to VM. (1).Init variables for test. (2).Create nfs pool and vol. (3).Attach the nfs pool vol to VM. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing") # Get qemu.conf config variables qemu_user = params.get("qemu_user") qemu_group = params.get("qemu_group") dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about pool vol virt_use_nfs = params.get("virt_use_nfs", "off") nfs_server_dir = params.get("nfs_server_dir", "nfs-server") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") export_options = params.get("export_options", "rw,async,no_root_squash") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") bk_file_name = params.get("bk_file_name") # Get pool vol variables img_tup = ("img_user", "img_group", "img_mode") img_val = [] for i in img_tup: try: img_val.append(int(params.get(i))) except ValueError: test.cancel("%s value '%s' is not a number." % (i, params.get(i))) # False positive - img_val was filled in the for loop above. # pylint: disable=E0632 img_user, img_group, img_mode = img_val # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vm_os_xml = vmxml.os # Backup domain disk label disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in list(disks.values()): disk_path = disk['source'] label = check_ownership(disk_path) if label: backup_labels_of_disks[disk_path] = label try: if vm_os_xml.nvram: nvram_path = vm_os_xml.nvram if not os.path.exists(nvram_path): # Need libvirt automatically generate the path vm.start() vm.destroy(gracefully=False) label = check_ownership(nvram_path) if label: backup_labels_of_disks[nvram_path] = label except xcepts.LibvirtXMLNotFoundError: logging.debug("vm xml don't have nvram element") # Backup selinux status of host. backup_sestatus = utils_selinux.get_status() pvt = None snapshot_name = None disk_snap_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # chown domain disk to qemu:qemu to avoid fail on local disk for file_path in list(backup_labels_of_disks.keys()): if qemu_user == "root": os.chown(file_path, 0, 0) elif qemu_user == "qemu": os.chown(file_path, 107, 107) else: process.run('chown %s %s' % (qemu_user, file_path), shell=True) # Set selinux of host. if backup_sestatus == "disabled": test.cancel("SELinux is in Disabled mode." "It must be Enabled to" "run this test") utils_selinux.set_status(host_sestatus) # set qemu conf qemu_conf.user = qemu_user qemu_conf.group = qemu_user if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s", qemu_conf) libvirtd.restart() # Create dst pool for create attach vol img logging.debug("export_options is: %s", export_options) pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], export_options=export_options) # set virt_use_nfs result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.cancel("Failed to set virt_use_nfs value") # Init a QemuImg instance and create img on nfs server dir. params['image_name'] = vol_name tmp_dir = data_dir.get_tmp_dir() nfs_path = os.path.join(tmp_dir, nfs_server_dir) image = qemu_storage.QemuImg(params, nfs_path, vol_name) # Create a image. server_img_path, result = image.create(params) if params.get("image_name_backing_file"): params['image_name'] = bk_file_name params['has_backing_file'] = "yes" image = qemu_storage.QemuImg(params, nfs_path, bk_file_name) server_img_path, result = image.create(params) # Get vol img path vol_name = server_img_path.split('/')[-1] virsh.pool_refresh(pool_name, debug=True) cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() # Do the attach action. extra = "--persistent --subdriver qcow2" result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: test.fail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Change img ownership and mode on nfs server dir os.chown(server_img_path, img_user, img_group) os.chmod(server_img_path, img_mode) img_label_before = check_ownership(server_img_path) if img_label_before: logging.debug( "attached image ownership on nfs server before " "start: %s", img_label_before) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. img_label_after = check_ownership(server_img_path) if img_label_after: logging.debug( "attached image ownership on nfs server after" " start: %s", img_label_after) if status_error: test.fail('Test succeeded in negative case.') except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) if params.get("image_name_backing_file"): options = "--disk-only" snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if not status_error: test.fail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) if snapshot_name: disks_snap = vm.get_disk_devices() for disk in list(disks_snap.values()): disk_snap_path.append(disk['source']) virsh.snapshot_delete(vm_name, snapshot_name, "--metadata", debug=True) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except process.CmdError: test.fail("Detach disk 'vdf' from VM %s failed." % vm.name) finally: # clean up vm.destroy() qemu_conf.restore() for path, label in list(backup_labels_of_disks.items()): label_list = label.split(":") os.chown(path, int(label_list[0]), int(label_list[1])) if snapshot_name: backup_xml.sync("--snapshots-metadata") else: backup_xml.sync() for i in disk_snap_path: if i and os.path.exists(i): os.unlink(i) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except exceptions.TestFail as detail: logging.error(str(detail)) utils_selinux.set_status(backup_sestatus) libvirtd.restart()