def clean_up_lvm(device_source, vg_name, lv_name): """ Clean up lvm. :param device_source: source file :param vg_name: volume group name :param lv_name: logical volume name """ libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name) lv_utils.vg_remove(vg_name) process.system("pvremove %s" % device_source, ignore_status=True, shell=True) process.system("rm -rf /dev/%s" % vg_name, ignore_status=True, shell=True)
def run(test, params, env): """ Test virsh {at|de}tach-disk command. The command can attach new disk/detach disk. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh attach/detach-disk operation. 3.Recover test environment. 4.Confirm the test result. """ def check_vm_partition(vm, device, os_type, target_name, old_parts): """ Check VM disk's partition. :param vm. VM guest. :param os_type. VM's operation system type. :param target_name. Device target type. :return: True if check successfully. """ logging.info("Checking VM partittion...") if vm.is_dead(): vm.start() try: attached = False if os_type == "linux": session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.debug("Added parts: %s" % added_parts) for i in range(len(added_parts)): if device == "disk": if target_name.startswith("vd"): if added_parts[i].startswith("vd"): attached = True elif target_name.startswith( "hd") or target_name.startswith("sd"): if added_parts[i].startswith("sd"): attached = True elif device == "cdrom": if added_parts[i].startswith("sr"): attached = True session.close() return attached except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def acpiphp_module_modprobe(vm, os_type): """ Add acpiphp module if VM's os type is rhle5.* :param vm. VM guest. :param os_type. VM's operation system type. :return: True if operate successfully. """ if vm.is_dead(): vm.start() try: if os_type == "linux": session = vm.wait_for_login() s_rpm, _ = session.cmd_status_output("rpm --version") # If status is different from 0, this # guest OS doesn't support the rpm package # manager if s_rpm: session.close() return True _, o_vd = session.cmd_status_output( "rpm -qa | grep redhat-release") if o_vd.find("5Server") != -1: s_mod, o_mod = session.cmd_status_output( "modprobe acpiphp") del o_mod if s_mod != 0: session.close() return False session.close() return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_shareable(at_with_shareable, test_twice): """ check if current libvirt version support shareable option at_with_shareable: True or False. Whether attach disk with shareable option test_twice: True or False. Whether perform operations twice return: True or cancel the test """ if at_with_shareable or test_twice: if libvirt_version.version_compare(3, 9, 0): return True else: test.cancel( "Current libvirt version doesn't support shareable feature" ) # Get test command. test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk") vm_ref = params.get("at_dt_disk_vm_ref", "name") at_options = params.get("at_dt_disk_at_options", "") dt_options = params.get("at_dt_disk_dt_options", "") at_with_shareable = "yes" == params.get("at_with_shareable", 'no') pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running") status_error = "yes" == params.get("status_error", 'no') no_attach = params.get("at_dt_disk_no_attach", 'no') os_type = params.get("os_type", "linux") qemu_file_lock = params.get("qemu_file_lock", "") if qemu_file_lock: if utils_misc.compare_qemu_version(2, 9, 0): logging.info('From qemu-kvm-rhev 2.9.0:' 'QEMU image locking, which should prevent multiple ' 'runs of QEMU or qemu-img when a VM is running.') if test_cmd == "detach-disk" or pre_vm_state == "shut off": test.cancel('This case is not supported.') else: logging.info( 'The expect result is failure as opposed with succeed') status_error = True # Disk specific attributes. device = params.get("at_dt_disk_device", "disk") device_source_name = params.get("at_dt_disk_device_source", "attach.img") device_source_format = params.get("at_dt_disk_device_source_format", "raw") device_target = params.get("at_dt_disk_device_target", "vdd") device_disk_bus = params.get("at_dt_disk_bus_type", "virtio") source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes") create_img = "yes" == params.get("at_dt_disk_create_image", "yes") test_twice = "yes" == params.get("at_dt_disk_test_twice", "no") test_type = "yes" == params.get("at_dt_disk_check_type", "no") test_audit = "yes" == params.get("at_dt_disk_check_audit", "no") test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no") test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no") restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no") detach_disk_with_print_xml = "yes" == params.get( "detach_disk_with_print_xml", "no") vg_name = params.get("at_dt_disk_vg", "vg_test_0") lv_name = params.get("at_dt_disk_lv", "lv_test_0") serial = params.get("at_dt_disk_serial", "") address = params.get("at_dt_disk_address", "") address2 = params.get("at_dt_disk_address2", "") cache_options = params.get("cache_options", "") time_sleep = params.get("time_sleep", 3) if check_shareable(at_with_shareable, test_twice): at_options += " --mode shareable" if serial: at_options += (" --serial %s" % serial) if address2: at_options_twice = at_options + (" --address %s" % address2) if address: at_options += (" --address %s" % address) if cache_options: if cache_options.count("directsync"): if not libvirt_version.version_compare(1, 0, 0): test.cancel("'directsync' cache option doesn't " "support in current libvirt version.") at_options += (" --cache %s" % cache_options) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Create virtual device file. device_source_path = os.path.join(data_dir.get_tmp_dir(), device_source_name) if test_block_dev: device_source = libvirt.setup_or_cleanup_iscsi(True) if not device_source: # We should skip this case test.cancel("Can not get iscsi device name in host") if test_logcial_dev: lv_utils.vg_create(vg_name, device_source) device_source = libvirt.create_local_disk("lvm", size="10M", vgname=vg_name, lvname=lv_name) logging.debug("New created volume: %s", lv_name) else: if source_path and create_img: device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1G", disk_format=device_source_format) else: device_source = device_source_name # if we are testing audit, we need to start audit servcie first. if test_audit: auditd_service = Factory.create_service("auditd") if not auditd_service.status(): auditd_service.start() logging.info("Auditd service status: %s" % auditd_service.status()) # If we are testing cdrom device, we need to detach hdc in VM first. if device == "cdrom": if vm.is_alive(): vm.destroy(gracefully=False) s_detach = virsh.detach_disk(vm_name, device_target, "--config") if not s_detach: logging.error("Detach hdc failed before test.") # If we are testing detach-disk, we need to attach certain device first. if test_cmd == "detach-disk" and no_attach != "yes": s_at_options = "--driver qemu --config" #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options #need be set if disk needs be attached multitimes if check_shareable(at_with_shareable, test_twice): s_at_options += " --mode shareable" s_attach = virsh.attach_disk(vm_name, device_source, device_target, s_at_options, debug=True).exit_status if s_attach != 0: logging.error("Attaching device failed before testing detach-disk") else: logging.debug( "Attaching device succeeded before testing detach-disk") if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1", disk_format=device_source_format) s_attach = virsh.attach_disk(vm_name, device_source, device_target2, s_at_options).exit_status if s_attach != 0: logging.error("Attaching device failed before testing " "detach-disk test_twice") vm.start() vm.wait_for_login() # Add acpiphp module before testing if VM's os type is rhle5.* if not acpiphp_module_modprobe(vm, os_type): test.error("Add acpiphp module failed before test.") # Turn VM into certain state. if pre_vm_state == "paused": logging.info("Suspending %s..." % vm_name) if vm.is_alive(): vm.pause() elif pre_vm_state == "shut off": logging.info("Shuting down %s..." % vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Get disk count before test. disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name) # Test. domid = vm.get_id() domuuid = vm.get_uuid() # Confirm how to reference a VM. if vm_ref == "name": vm_ref = vm_name elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid else: vm_ref = "" if test_cmd == "attach-disk": status = virsh.attach_disk(vm_ref, device_source, device_target, at_options, debug=True).exit_status elif test_cmd == "detach-disk": # For detach disk with print-xml option, it only print information,and not actual disk detachment. if detach_disk_with_print_xml and libvirt_version.version_compare( 4, 5, 0): ret = virsh.detach_disk(vm_ref, device_target, at_options) libvirt.check_exit_status(ret) cmd = ("echo \"%s\" | grep -A 16 %s" % (ret.stdout.strip(), device_source_name)) if process.system(cmd, ignore_status=True, shell=True): test.error("Check disk with source image name failed") status = virsh.detach_disk(vm_ref, device_target, dt_options, debug=True).exit_status if restart_libvirtd: libvirtd_serv = utils_libvirtd.Libvirtd() libvirtd_serv.restart() if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1G", disk_format=device_source_format) if test_cmd == "attach-disk": if address2: at_options = at_options_twice status = virsh.attach_disk(vm_ref, device_source, device_target2, at_options, debug=True).exit_status elif test_cmd == "detach-disk": status = virsh.detach_disk(vm_ref, device_target2, dt_options, debug=True).exit_status # Resume guest after command. On newer libvirt this is fixed as it has # been a bug. The change in xml file is done after the guest is resumed. if pre_vm_state == "paused": vm.resume() time.sleep(5) # Check audit log check_audit_after_cmd = True if test_audit: grep_audit = ('grep "%s" /var/log/audit/audit.log' % test_cmd.split("-")[0]) cmd = (grep_audit + ' | ' + 'grep "%s" | tail -n1 | grep "res=success"' % device_source) if process.run(cmd, shell=True).exit_status: logging.error("Audit check failed") check_audit_after_cmd = False # Need wait a while for xml to sync time.sleep(float(time_sleep)) # Check disk count after command. check_count_after_cmd = True disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name) if test_cmd == "attach-disk": if disk_count_after_cmd == disk_count_before_cmd: check_count_after_cmd = False elif test_cmd == "detach-disk": if disk_count_after_cmd < disk_count_before_cmd: check_count_after_cmd = False # Recover VM state. if pre_vm_state == "shut off": vm.start() # Check in VM after command. check_vm_after_cmd = True check_vm_after_cmd = check_vm_partition(vm, device, os_type, device_target, old_parts) # Check disk type after attach. check_disk_type = True if test_type: if test_block_dev: check_disk_type = vm_xml.VMXML.check_disk_type( vm_name, device_source, "block") else: check_disk_type = vm_xml.VMXML.check_disk_type( vm_name, device_source, "file") # Check disk serial after attach. check_disk_serial = True if serial: disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target) if serial != disk_serial: check_disk_serial = False # Check disk address after attach. check_disk_address = True if address: disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target) if address != disk_address: check_disk_address = False # Check multifunction address after attach. check_disk_address2 = True if address2: disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2) if address2 != disk_address2: check_disk_address2 = False # Check disk cache option after attach. check_cache_after_cmd = True if cache_options: disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target, "driver", "cache") if cache_options == "default": if disk_cache is not None: check_cache_after_cmd = False elif disk_cache != cache_options: check_cache_after_cmd = False # Eject cdrom test eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no") save_vm = "yes" == params.get("at_dt_disk_save_vm", "no") save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") try: if eject_cdrom: eject_params = { 'type_name': "file", 'device_type': "cdrom", 'target_dev': device_target, 'target_bus': device_disk_bus } eject_xml = libvirt.create_disk_xml(eject_params) with open(eject_xml) as eject_file: logging.debug("Eject CDROM by XML: %s", eject_file.read()) # Run command tiwce to make sure cdrom tray open first #BZ892289 # Open tray virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True) # Add time sleep between two attach commands. if time_sleep: time.sleep(float(time_sleep)) # Eject cdrom result = virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True) if result.exit_status != 0: test.fail("Eject CDROM failed") if vm_xml.VMXML.check_disk_exist(vm_name, device_source): test.fail("Find %s after do eject" % device_source) # Save and restore VM if save_vm: result = virsh.save(vm_name, save_file, debug=True) libvirt.check_exit_status(result) result = virsh.restore(save_file, debug=True) libvirt.check_exit_status(result) if vm_xml.VMXML.check_disk_exist(vm_name, device_source): test.fail("Find %s after do restore" % device_source) # Destroy VM. vm.destroy(gracefully=False) # Check disk count after VM shutdown (with --config). check_count_after_shutdown = True inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disk_count_after_shutdown = len(inactive_vmxml.get_disk_all()) if test_cmd == "attach-disk": if disk_count_after_shutdown == disk_count_before_cmd: check_count_after_shutdown = False elif test_cmd == "detach-disk": if disk_count_after_shutdown < disk_count_before_cmd: check_count_after_shutdown = False finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.debug("Restore the VM XML") backup_xml.sync() if os.path.exists(save_file): os.remove(save_file) if test_block_dev: if test_logcial_dev: libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name) lv_utils.vg_remove(vg_name) process.run("pvremove %s" % device_source, shell=True, ignore_status=True) libvirt.setup_or_cleanup_iscsi(False) else: libvirt.delete_local_disk("file", device_source) # Check results. if status_error: if not status: test.fail("virsh %s exit with unexpected value." % test_cmd) else: if status: test.fail("virsh %s failed." % test_cmd) if test_cmd == "attach-disk": if at_options.count("config"): if not check_count_after_shutdown: test.fail("Cannot see config attached device " "in xml file after VM shutdown.") if not check_disk_serial: test.fail("Serial set failed after attach") if not check_disk_address: test.fail("Address set failed after attach") if not check_disk_address2: test.fail("Address(multifunction) set failed" " after attach") else: if not check_count_after_cmd: test.fail("Cannot see device in xml file" " after attach.") if not check_vm_after_cmd: test.fail("Cannot see device in VM after" " attach.") if not check_disk_type: test.fail("Check disk type failed after" " attach.") if not check_audit_after_cmd: test.fail("Audit hotplug failure after attach") if not check_cache_after_cmd: test.fail("Check cache failure after attach") if at_options.count("persistent"): if not check_count_after_shutdown: test.fail("Cannot see device attached " "with persistent after " "VM shutdown.") else: if check_count_after_shutdown: test.fail("See non-config attached device " "in xml file after VM shutdown.") elif test_cmd == "detach-disk": if dt_options.count("config"): if check_count_after_shutdown: test.fail("See config detached device in " "xml file after VM shutdown.") else: if check_count_after_cmd: test.fail("See device in xml file " "after detach.") if check_vm_after_cmd: test.fail("See device in VM after detach.") if not check_audit_after_cmd: test.fail("Audit hotunplug failure " "after detach") if dt_options.count("persistent"): if check_count_after_shutdown: test.fail("See device deattached " "with persistent after " "VM shutdown.") else: if not check_count_after_shutdown: test.fail("See non-config detached " "device in xml file after " "VM shutdown.") else: test.error("Unknown command %s." % test_cmd)
def run(test, params, env): """ Test command: virsh find-storage-pool-sources 1. Prepare env to provide source storage if use localhost: 1). For 'netfs' source type, setup nfs server 2). For 'iscsi' source type, setup iscsi server 3). For 'logical' type pool, setup iscsi storage to create vg 4). Prepare srcSpec xml file if not given 2. Find the pool sources by running virsh cmd """ source_type = params.get("source_type", "") source_host = params.get("source_host", "127.0.0.1") source_initiator = params.get("source_initiator", "") srcSpec = params.get("source_Spec", "") vg_name = params.get("vg_name", "virttest_vg_0") ro_flag = "yes" == params.get("readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise exceptions.TestSkipError("API acl test not supported in " "current libvirt version.") if not libvirt_version.version_compare(4, 7, 0): if source_type == "iscsi-direct": test.cancel("iscsi-drect pool is not supported in current" "libvirt version") if not source_type: raise exceptions.TestFail("Command requires <type> value") cleanup_nfs = False cleanup_iscsi = False cleanup_logical = False # Prepare source storage if source_host == "127.0.0.1": if source_type == "netfs": # Set up nfs res = utils_test.libvirt.setup_or_cleanup_nfs(True) selinux_bak = res["selinux_status_bak"] cleanup_nfs = True if source_type in ["iscsi", "logical", "iscsi-direct"]: # Set up iscsi iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True) # If we got nothing, force failure if not iscsi_device: raise exceptions.TestFail("Did not setup an iscsi device") cleanup_iscsi = True if source_type == "logical": # Create vg by using iscsi device try: lv_utils.vg_create(vg_name, iscsi_device) except Exception as detail: utils_test.libvirt.setup_or_cleanup_iscsi(False) raise exceptions.TestFail("vg_create failed: %s" % detail) cleanup_logical = True # Prepare srcSpec xml if srcSpec: if srcSpec == "INVALID.XML": src_xml = "<invalid><host name='#@!'/><?source>" elif srcSpec == "VALID.XML": if source_type == "iscsi-direct": src_xml = "<source><host name='%s'/><initiator><iqn name='%s'/></initiator></source>" % ( source_host, source_initiator) else: src_xml = "<source><host name='%s'/></source>" % source_host srcSpec = xml_utils.TempXMLFile().name with open(srcSpec, "w+") as srcSpec_file: srcSpec_file.write(src_xml) logging.debug("srcSpec file content:\n%s", srcSpec_file.read()) if params.get('setup_libvirt_polkit') == 'yes' and srcSpec: cmd = "chmod 666 %s" % srcSpec process.run(cmd) if ro_flag: logging.debug("Readonly mode test") # Run virsh cmd try: cmd_result = virsh.find_storage_pool_sources( source_type, srcSpec, ignore_status=True, debug=True, unprivileged_user=unprivileged_user, uri=uri, readonly=ro_flag) utils_test.libvirt.check_exit_status(cmd_result, status_error) finally: # Clean up if cleanup_logical: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.run(cmd, shell=True).stdout_text lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name) if cleanup_iscsi: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_nfs: utils_test.libvirt.setup_or_cleanup_nfs( False, restore_selinux=selinux_bak)
def run(test, params, env): """ Test command: virsh find-storage-pool-sources-as 1. Prepare env to provide source storage: 1). For 'netfs' source type, setup nfs server 2). For 'iscsi' source type, setup iscsi server 3). For 'logcial' type pool, setup iscsi storage to create vg 2. Find the pool source by running virsh cmd """ source_type = params.get("source_type", "") source_host = params.get("source_host", "127.0.0.1") source_port = params.get("source_port", "") options = params.get("extra_options", "") vg_name = params.get("vg_name", "virttest_vg_0") ro_flag = "yes" == params.get("readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") if not source_type: raise exceptions.TestFail("Command requires <type> value") cleanup_nfs = False cleanup_iscsi = False cleanup_logical = False if source_host == "127.0.0.1": if source_type == "netfs": # Set up nfs res = utils_test.libvirt.setup_or_cleanup_nfs(True) selinux_bak = res["selinux_status_bak"] cleanup_nfs = True if source_type in ["iscsi", "logical"]: # Set up iscsi try: iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True) # If we got nothing, force failure if not iscsi_device: raise exceptions.TestFail("Did not setup an iscsi device") cleanup_iscsi = True if source_type == "logical": # Create VG by using iscsi device lv_utils.vg_create(vg_name, iscsi_device) cleanup_logical = True except Exception as detail: if cleanup_iscsi: utils_test.libvirt.setup_or_cleanup_iscsi(False) raise exceptions.TestFail("iscsi setup failed:\n%s" % detail) # Run virsh cmd options = "%s %s " % (source_host, source_port) + options if ro_flag: logging.debug("Readonly mode test") try: cmd_result = virsh.find_storage_pool_sources_as(source_type, options, ignore_status=True, debug=True, readonly=ro_flag) utils_test.libvirt.check_exit_status(cmd_result, status_error) finally: # Clean up if cleanup_logical: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = to_text(process.system_output(cmd, shell=True)) lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name) if cleanup_iscsi: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_nfs: utils_test.libvirt.setup_or_cleanup_nfs( False, restore_selinux=selinux_bak)
def run(test, params, env): """ Test pool command:virsh pool_autostart 1) Define a given type pool 2) Mark pool as autostart 3) Restart libvirtd and check pool 4) Destroy the pool 5) Unmark pool as autostart 6) Repeate step(3) """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") ip_protocal = params.get("ip_protocal", "ipv4") pool_ref = params.get("pool_ref", "name") pool_uuid = params.get("pool_uuid", "") invalid_source_path = params.get("invalid_source_path", "") status_error = "yes" == params.get("status_error", "no") readonly_mode = "yes" == params.get("readonly_mode", "no") pre_def_pool = "yes" == params.get("pre_def_pool", "yes") disk_type = params.get("disk_type", "") vg_name = params.get("vg_name", "") lv_name = params.get("lv_name", "") update_policy = params.get("update_policy") # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True if pool_target is "": pool_target = os.path.join(test.tmpdir, pool_target) # The file for dumped pool xml p_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): test.fail("Pool %s already exist" % pool_name) def check_pool(pool_name, pool_type, checkpoint, expect_value="", expect_error=False): """ Check the pool after autostart it :param pool_name: Name of the pool. :param pool_type: Type of the pool. :param checkpoint: Which part for checking. :param expect_value: Expected value. :param expect_error: Boolen value, expect command success or fail """ libvirt_pool = libvirt_storage.StoragePool() virsh.pool_list(option="--all", debug=True) if checkpoint == 'State': actual_value = libvirt_pool.pool_state(pool_name) if checkpoint == 'Autostart': actual_value = libvirt_pool.pool_autostart(pool_name) if actual_value != expect_value: if not expect_error: if checkpoint == 'State' and pool_type in ("dir", "scsi"): error_msg = "Dir pool should be always active when libvirtd restart. " error_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610" logging.error(error_msg) test.fail("Pool %s isn't %s as expected" % (checkpoint, expect_value)) else: logging.debug("Pool %s is %s as expected", checkpoint, actual_value) def change_source_path(new_path, update_policy="set"): n_poolxml = pool_xml.PoolXML() n_poolxml = n_poolxml.new_from_dumpxml(pool_name) s_xml = n_poolxml.get_source() s_xml.device_path = new_path if update_policy == "set": n_poolxml.set_source(s_xml) elif update_policy == "add": n_poolxml.add_source("device", {"path": new_path}) else: test.error("Unsupported policy type") logging.debug("After change_source_path:\n%s" % open(n_poolxml.xml).read()) return n_poolxml # Run Testcase pvt = utlv.PoolVolumeTest(test, params) emulated_image = "emulated-image" kwargs = {'image_size': '1G', 'pre_disk_vol': ['100M'], 'source_name': source_name, 'source_path': source_path, 'source_format': source_format, 'persistent': True, 'ip_protocal': ip_protocal} pool = pool_name clean_mount = False new_device = None try: if pre_def_pool: # Step(1) # Pool define pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) # Remove the partition for disk pool # For sometimes the partition will cause pool start failed if pool_type == "disk": virsh.pool_build(pool_name, "--overwrite", debug=True) # Get pool uuid: if pool_ref == "uuid" and not pool_uuid: pool = pool_ins.get_pool_uuid(pool_name) # Setup logical block device # Change pool source path # Undefine pool # Define pool with new xml # Start pool if update_policy: new_device = utlv.setup_or_cleanup_iscsi(True) lv_utils.vg_create(vg_name, new_device) new_device = utlv.create_local_disk(disk_type, size="0.5", vgname=vg_name, lvname=lv_name) new_path = new_device if invalid_source_path: new_path = invalid_source_path if pool_type == "fs": utlv.mkfs(new_device, source_format) n_poolxml = change_source_path(new_path, update_policy) p_xml = n_poolxml.xml if not virsh.pool_undefine(pool_name): test.fail("Undefine pool %s failed" % pool_name) if not virsh.pool_define(p_xml): test.fail("Define pool %s from %s failed" % (pool_name, p_xml)) logging.debug("Start pool %s" % pool_name) result = virsh.pool_start(pool_name, ignore_status=True, debug=True) utlv.check_exit_status(result, status_error) # Mount a valid fs to pool target if pool_type == "fs": source_list = [] mnt_cmd = "" pool_target = n_poolxml.target_path if invalid_source_path: source_list.append(new_device) else: s_devices = n_poolxml.xmltreefile.findall("//source/device") for dev in s_devices: source_list.append(dev.get('path')) try: for src in source_list: mnt_cmd = "mount %s %s" % (src, pool_target) if not process.system(mnt_cmd, shell=True): clean_mount = True except process.CmdError: test.error("Failed to run %s" % mnt_cmd) # Step(2) # Pool autostart logging.debug("Try to mark pool %s as autostart" % pool_name) result = virsh.pool_autostart(pool, readonly=ro_flag, ignore_status=True, debug=True) if not pre_def_pool: utlv.check_exit_status(result, status_error) if not result.exit_status: check_pool(pool_name, pool_type, checkpoint='Autostart', expect_value="yes", expect_error=status_error) # Step(3) # Restart libvirtd and check pool status logging.info("Try to restart libvirtd") libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() check_pool(pool_name, pool_type, checkpoint="State", expect_value="active", expect_error=status_error) # Step(4) # Pool destroy if pool_ins.is_pool_active(pool_name): virsh.pool_destroy(pool_name) logging.debug("Pool %s destroyed" % pool_name) # Step(5) # Pool autostart disable logging.debug("Try to unmark pool %s as autostart" % pool_name) result = virsh.pool_autostart(pool, extra="--disable", debug=True, ignore_status=True) if not pre_def_pool: utlv.check_exit_status(result, status_error) if not result.exit_status: check_pool(pool_name, pool_type, checkpoint='Autostart', expect_value="no", expect_error=status_error) # Repeat step (3) logging.debug("Try to restart libvirtd") libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() check_pool(pool_name, pool_type, checkpoint='State', expect_value="inactive", expect_error=status_error) finally: # Clean up logging.debug("Try to clean up env") try: if clean_mount is True: for src in source_list: process.system("umount %s" % pool_target) if pre_def_pool: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) if new_device: utlv.delete_local_disk(disk_type, vgname=vg_name, lvname=lv_name) lv_utils.vg_remove(vg_name) utlv.setup_or_cleanup_iscsi(False) if os.path.exists(p_xml): os.remove(p_xml) except test.fail as details: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() logging.error(str(details))
def run(test, params, env): """ Test pool command:virsh pool_autostart 1) Define a given type pool 2) Mark pool as autostart 3) Restart libvirtd and check pool 4) Destroy the pool 5) Unmark pool as autostart 6) Repeate step(3) """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") ip_protocal = params.get("ip_protocal", "ipv4") pool_ref = params.get("pool_ref", "name") pool_uuid = params.get("pool_uuid", "") invalid_source_path = params.get("invalid_source_path", "") status_error = "yes" == params.get("status_error", "no") readonly_mode = "yes" == params.get("readonly_mode", "no") pre_def_pool = "yes" == params.get("pre_def_pool", "yes") disk_type = params.get("disk_type", "") vg_name = params.get("vg_name", "") lv_name = params.get("lv_name", "") update_policy = params.get("update_policy") # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True if pool_target is "": pool_target = os.path.join(test.tmpdir, pool_target) # The file for dumped pool xml p_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): test.fail("Pool %s already exist" % pool_name) def check_pool(pool_name, pool_type, checkpoint, expect_value="", expect_error=False): """ Check the pool after autostart it :param pool_name: Name of the pool. :param pool_type: Type of the pool. :param checkpoint: Which part for checking. :param expect_value: Expected value. :param expect_error: Boolen value, expect command success or fail """ libvirt_pool = libvirt_storage.StoragePool() virsh.pool_list(option="--all", debug=True) if checkpoint == 'State': actual_value = libvirt_pool.pool_state(pool_name) if checkpoint == 'Autostart': actual_value = libvirt_pool.pool_autostart(pool_name) if actual_value != expect_value: if not expect_error: if checkpoint == 'State' and pool_type in ("dir", "scsi"): debug_msg = "Dir pool should be always active when libvirtd restart. " debug_msg += "See https://bugzilla.redhat.com/show_bug.cgi?id=1238610" logging.debug(debug_msg) else: test.fail("Pool %s isn't %s as expected" % (checkpoint, expect_value)) else: logging.debug("Pool %s is %s as expected", checkpoint, actual_value) def change_source_path(new_path, update_policy="set"): n_poolxml = pool_xml.PoolXML() n_poolxml = n_poolxml.new_from_dumpxml(pool_name) s_xml = n_poolxml.get_source() s_xml.device_path = new_path if update_policy == "set": n_poolxml.set_source(s_xml) elif update_policy == "add": n_poolxml.add_source("device", {"path": new_path}) else: test.error("Unsupported policy type") logging.debug("After change_source_path:\n%s" % open(n_poolxml.xml).read()) return n_poolxml # Run Testcase pvt = utlv.PoolVolumeTest(test, params) kwargs = { 'image_size': '1G', 'pre_disk_vol': ['100M'], 'source_name': source_name, 'source_path': source_path, 'source_format': source_format, 'persistent': True, 'ip_protocal': ip_protocal, 'emulated_image': "emulated-image", 'pool_target': pool_target } params.update(kwargs) pool = pool_name clean_mount = False new_device = None try: if pre_def_pool: # Step(1) # Pool define pvt.pre_pool(**params) # Remove the partition for disk pool # For sometimes the partition will cause pool start failed if pool_type == "disk": virsh.pool_build(pool_name, "--overwrite", debug=True) # Get pool uuid: if pool_ref == "uuid" and not pool_uuid: pool = pool_ins.get_pool_uuid(pool_name) # Setup logical block device # Change pool source path # Undefine pool # Define pool with new xml # Start pool if update_policy: new_device = utlv.setup_or_cleanup_iscsi(True) lv_utils.vg_create(vg_name, new_device) new_device = utlv.create_local_disk(disk_type, size="0.5", vgname=vg_name, lvname=lv_name) new_path = new_device if invalid_source_path: new_path = invalid_source_path if pool_type == "fs": utlv.mkfs(new_device, source_format) n_poolxml = change_source_path(new_path, update_policy) p_xml = n_poolxml.xml if not virsh.pool_undefine(pool_name): test.fail("Undefine pool %s failed" % pool_name) if not virsh.pool_define(p_xml): test.fail("Define pool %s from %s failed" % (pool_name, p_xml)) logging.debug("Start pool %s" % pool_name) result = virsh.pool_start(pool_name, ignore_status=True, debug=True) utlv.check_exit_status(result, status_error) # Mount a valid fs to pool target if pool_type == "fs": source_list = [] mnt_cmd = "" pool_target = n_poolxml.target_path if invalid_source_path: source_list.append(new_device) else: s_devices = n_poolxml.xmltreefile.findall( "//source/device") for dev in s_devices: source_list.append(dev.get('path')) try: for src in source_list: mnt_cmd = "mount %s %s" % (src, pool_target) if not process.system(mnt_cmd, shell=True): clean_mount = True except process.CmdError: test.error("Failed to run %s" % mnt_cmd) # Step(2) # Pool autostart logging.debug("Try to mark pool %s as autostart" % pool_name) result = virsh.pool_autostart(pool, readonly=ro_flag, ignore_status=True, debug=True) if not pre_def_pool: utlv.check_exit_status(result, status_error) if not result.exit_status: check_pool(pool_name, pool_type, checkpoint='Autostart', expect_value="yes", expect_error=status_error) # Step(3) # Restart libvirtd and check pool status logging.info("Try to restart libvirtd") libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() check_pool(pool_name, pool_type, checkpoint="State", expect_value="active", expect_error=status_error) # Step(4) # Pool destroy if pool_ins.is_pool_active(pool_name): virsh.pool_destroy(pool_name) logging.debug("Pool %s destroyed" % pool_name) # Step(5) # Pool autostart disable logging.debug("Try to unmark pool %s as autostart" % pool_name) result = virsh.pool_autostart(pool, extra="--disable", debug=True, ignore_status=True) if not pre_def_pool: utlv.check_exit_status(result, status_error) if not result.exit_status: check_pool(pool_name, pool_type, checkpoint='Autostart', expect_value="no", expect_error=status_error) # Repeat step (3) logging.debug("Try to restart libvirtd") libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() check_pool(pool_name, pool_type, checkpoint='State', expect_value="inactive", expect_error=status_error) finally: # Clean up logging.debug("Try to clean up env") try: if clean_mount is True: for src in source_list: process.system("umount %s" % pool_target) if pre_def_pool: pvt.cleanup_pool(**params) if new_device: utlv.delete_local_disk(disk_type, vgname=vg_name, lvname=lv_name) lv_utils.vg_remove(vg_name) utlv.setup_or_cleanup_iscsi(False) if os.path.exists(p_xml): os.remove(p_xml) except exceptions.TestFail as details: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() logging.error(str(details))
def run(test, params, env): """ Test vm backingchain, blockcopy """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) status_error = 'yes' == params.get('status_error', 'no') error_msg = params.get('error_msg', '') case = params.get('case', '') blockcommand = params.get('blockcommand', '') blk_top = int(params.get('top', 0)) blk_base = int(params.get('base', 0)) opts = params.get('opts', '--verbose --wait') check_func = params.get('check_func', '') disk_type = params.get('disk_type', '') disk_src = params.get('disk_src', '') driver_type = params.get('driver_type', 'qcow2') vol_name = params.get('vol_name', 'vol_blockpull') pool_name = params.get('pool_name', '') brick_path = os.path.join(data_dir.get_tmp_dir(), pool_name) vg_name = params.get('vg_name', 'HostVG') vol_size = params.get('vol_size', '10M') vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) bkxml = vmxml.copy() # List to collect paths to delete after test file_to_del = [] virsh_dargs = {'debug': True, 'ignore_status': False} try: all_disks = vmxml.get_disk_source(vm_name) if not all_disks: test.error('Not found any disk file in vm.') image_file = all_disks[0].find('source').get('file') logging.debug('Image file of vm: %s', image_file) # Get all dev of virtio disks to calculate the dev of new disk all_vdisks = [disk for disk in all_disks if disk.find('target').get('dev').startswith('vd')] disk_dev = all_vdisks[-1].find('target').get('dev') new_dev = disk_dev[:-1] + chr(ord(disk_dev[-1]) + 1) # Setup iscsi target if disk_src == 'iscsi': disk_target = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size='1G') logging.debug('ISCSI target: %s', disk_target) # Setup lvm elif disk_src == 'lvm': # Stop multipathd to avoid vgcreate fail multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Setup iscsi target device_name = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size='1G') logging.debug('ISCSI target for lvm: %s', device_name) # Create logical device logical_device = device_name lv_utils.vg_create(vg_name, logical_device) vg_created = True # Create logical volume as backing store vol_bk, vol_disk = 'vol1', 'vol2' lv_utils.lv_create(vg_name, vol_bk, vol_size) disk_target = '/dev/%s/%s' % (vg_name, vol_bk) src_vol = '/dev/%s/%s' % (vg_name, vol_disk) # Setup gluster elif disk_src == 'gluster': host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, brick_path=brick_path, **params) logging.debug(host_ip) gluster_img = 'test.img' img_create_cmd = "qemu-img create -f raw /mnt/%s 10M" % gluster_img process.run("mount -t glusterfs %s:%s /mnt; %s; umount /mnt" % (host_ip, vol_name, img_create_cmd), shell=True) disk_target = 'gluster://%s/%s/%s' % (host_ip, vol_name, gluster_img) else: test.error('Wrong disk source, unsupported by this test.') new_image = os.path.join(os.path.split(image_file)[0], 'test.img') params['snapshot_list'] = ['s%d' % i for i in range(1, 5)] if disk_src == 'lvm': new_image = src_vol if disk_type == 'block': new_image = disk_target for i in range(2, 6): lv_utils.lv_create(vg_name, 'vol%s' % i, vol_size) snapshot_image_list = ['/dev/%s/vol%s' % (vg_name, i) for i in range(2, 6)] else: file_to_del.append(new_image) snapshot_image_list = [new_image.replace('img', i) for i in params['snapshot_list']] cmd_create_img = 'qemu-img create -f %s -b %s %s -F raw' % (driver_type, disk_target, new_image) if disk_type == 'block' and driver_type == 'raw': pass else: process.run(cmd_create_img, verbose=True, shell=True) info_new = utils_misc.get_image_info(new_image) logging.debug(info_new) # Create xml of new disk and add it to vmxml if disk_type: new_disk = Disk() new_disk.xml = libvirt.create_disk_xml({ 'type_name': disk_type, 'driver_type': driver_type, 'target_dev': new_dev, 'source_file': new_image }) logging.debug(new_disk.xml) vmxml.devices = vmxml.devices.append(new_disk) vmxml.xmltreefile.write() logging.debug(vmxml) vmxml.sync() vm.start() logging.debug(virsh.dumpxml(vm_name)) # Create backing chain for i in range(len(params['snapshot_list'])): virsh.snapshot_create_as( vm_name, '%s --disk-only --diskspec %s,file=%s,stype=%s' % (params['snapshot_list'][i], new_dev, snapshot_image_list[i], disk_type), **virsh_dargs ) # Get path of each snapshot file snaps = virsh.domblklist(vm_name, debug=True).stdout.splitlines() for line in snaps: if line.lstrip().startswith(('hd', 'sd', 'vd')): file_to_del.append(line.split()[-1]) qemu_img_cmd = 'qemu-img info --backing-chain %s' % snapshot_image_list[-1] if libvirt_storage.check_qemu_image_lock_support(): qemu_img_cmd += " -U" bc_info = process.run(qemu_img_cmd, verbose=True, shell=True).stdout_text if not disk_type == 'block': bc_chain = snapshot_image_list[::-1] + [new_image, disk_target] else: bc_chain = snapshot_image_list[::-1] + [new_image] bc_result = check_backingchain(bc_chain, bc_info) if not bc_result: test.fail('qemu-img info output of backing chain is not correct: %s' % bc_info) # Generate blockpull/blockcommit options virsh_blk_cmd = eval('virsh.%s' % blockcommand) if blockcommand == 'blockpull' and blk_base != 0: opts += '--base {dev}[{}]'.format(blk_base, dev=new_dev) elif blockcommand == 'blockcommit': opt_top = ' --top {dev}[{}]'.format(blk_top, dev=new_dev) if blk_top != 0 else '' opt_base = ' --base {dev}[{}]'.format(blk_base, dev=new_dev) if blk_base != 0 else '' opts += opt_top + opt_base + ' --active' if blk_top == 0 else '' # Do blockpull/blockcommit virsh_blk_cmd(vm_name, new_dev, opts, **virsh_dargs) if blockcommand == 'blockcommit': virsh.blockjob(vm_name, new_dev, '--pivot', **virsh_dargs) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("XML after %s: %s" % (blockcommand, vmxml)) # Check backing chain after blockpull/blockcommit check_bc_func_name = 'check_bc_%s' % check_func if check_bc_func_name in globals(): check_bc = eval(check_bc_func_name) if not callable(check_bc): logging.warning('Function "%s" is not callable.', check_bc_func_name) if not check_bc(blockcommand, vmxml, new_dev, bc_chain): test.fail('Backing chain check after %s failed' % blockcommand) else: logging.warning('Function "%s" is not implemented.', check_bc_func_name) virsh.dumpxml(vm_name, debug=True) # Check whether login is successful try: vm.wait_for_login().close() except Exception as e: test.fail('Vm login failed') finally: logging.info('Start cleaning up.') for ss in params.get('snapshot_list', []): virsh.snapshot_delete(vm_name, '%s --metadata' % ss, debug=True) bkxml.sync() for path in file_to_del: logging.debug('Remove %s', path) if os.path.exists(path): os.remove(path) if disk_src == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False) elif disk_src == 'lvm': process.run('rm -rf /dev/%s/%s' % (vg_name, vol_disk), ignore_status=True) if 'vol_bk' in locals(): lv_utils.lv_remove(vg_name, vol_bk) if 'vg_created' in locals() and vg_created: lv_utils.vg_remove(vg_name) cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.system_output(cmd, shell=True, verbose=True).strip() if pv_name: process.run("pvremove %s" % pv_name, verbose=True, ignore_status=True) libvirt.setup_or_cleanup_iscsi(is_setup=False) elif disk_src == 'gluster': gluster.setup_or_cleanup_gluster( is_setup=False, brick_path=brick_path, **params) if 'multipathd_status' in locals() and multipathd_status: multipathd.start()
if disk_count_after_shutdown < disk_count_before_cmd: check_count_after_shutdown = False finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if os.path.exists(save_file): os.remove(save_file) if test_block_dev: if test_logcial_dev: libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name) lv_utils.vg_remove(vg_name) utils.system("pvremove %s" % device_source, ignore_status=True) libvirt.setup_or_cleanup_iscsi(False) else: libvirt.delete_local_disk("file", device_source) # Check results. if status_error: if not status: raise error.TestFail("virsh %s exit with unexpected value." % test_cmd) else: if status: raise error.TestFail("virsh %s failed." % test_cmd) if test_cmd == "attach-disk": if at_options.count("config"):
def run(test, params, env): """ Test migration with option --copy-storage-all or --copy-storage-inc. """ vm = env.get_vm(params.get("migrate_main_vm")) disk_type = params.get("copy_storage_type", "file") if disk_type == "file": params['added_disk_type'] = "file" else: params['added_disk_type'] = "lvm" cp_mig = None primary_target = vm.get_first_disk_devices()["target"] file_path, file_size = vm.get_device_size(primary_target) # Convert to Gib file_size = int(file_size) // 1073741824 # Set the pool target using the source of the first disk params["precreation_pool_target"] = os.path.dirname(file_path) remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE") remote_user = params.get("migrate_dest_user", "root") remote_passwd = params.get("migrate_dest_pwd") if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"): test.cancel("Config remote or local host first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) # Attach additional disks to vm if disk count big than 1 disks_count = int(params.get("added_disks_count", 1)) - 1 if disks_count: new_vm_name = "%s_smtest" % vm.name if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) vms = [vm] vms_ip = {} for vm in vms: if vm.is_dead(): vm.start() vm.wait_for_login().close() vms_ip[vm.name] = vm.get_address() # Check if image pre-creation is supported. support_precreation = False try: if qemu_test("drive-mirror") and qemu_test("nbd-server"): support_precreation = True except exceptions.TestError as e: logging.debug(e) params["support_precreation"] = support_precreation # Abnormal parameters migrate_again = "yes" == params.get("migrate_again", "no") abnormal_type = params.get("abnormal_type") added_disks_list = [] rdm = None src_libvirt_file = None try: rdm = utils_test.RemoteDiskManager(params) vgname = params.get("sm_vg_name", "SMTEST") pool_created = False if disk_type == "lvm": target1 = target2 = "" # For cleanup # Create volume group with iscsi # For local, target is a device name target1 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True, emulated_image="emulated-iscsi1") lv_utils.vg_create(vgname, target1) logging.debug("Created VG %s", vgname) # For remote, target is real target name target2, _ = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=False, emulated_image="emulated-iscsi2") logging.debug("Created target: %s", target2) # Login on remote host remote_device = rdm.iscsi_login_setup(local_host, target2) if not rdm.create_vg(vgname, remote_device): test.error("Create VG %s on %s failed." % (vgname, remote_host)) all_disks = utlv.attach_disks(vm, file_path, vgname, params) # Reserve for cleanup added_disks_list = list(all_disks.keys()) all_disks[file_path] = file_size logging.debug("All disks need to be migrated:%s", all_disks) if abnormal_type == "occupied_disk": occupied_path = rdm.occupy_space(disk_type, file_size, file_path, vgname, timeout=600) if abnormal_type != "not_exist_file": for disk, size in list(all_disks.items()): if disk == file_path: if support_precreation: pool_created = create_destroy_pool_on_remote(test, "create", params) if not pool_created: test.error("Create pool on remote " + "host '%s' failed." % remote_host) else: rdm.create_image("file", disk, size, None, None, img_frmt='qcow2') else: sparse = False if disk_type == 'lvm' else True rdm.create_image(disk_type, disk, size, vgname, os.path.basename(disk), sparse=sparse, timeout=120) fail_flag = False remove_dict = { "do_search": '{"%s": "ssh:/"}' % params.get("migrate_dest_uri")} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) try: logging.debug("Start migration...") cp_mig = copied_migration(test, vms, vms_ip, params) # Check the new disk can be working well with I/O after migration utils_disk.check_remote_vm_disks({'server_ip': remote_host, 'server_user': remote_user, 'server_pwd': remote_passwd, 'vm_ip': vms_ip[vm.name], 'vm_pwd': params.get('password')}) if migrate_again: fail_flag = True test.fail("Migration succeed, but not expected!") else: return except exceptions.TestFail: if not migrate_again: raise if abnormal_type == "occupied_disk": rdm.remove_path(disk_type, occupied_path) elif abnormal_type == "not_exist_file": for disk, size in list(all_disks.items()): if disk == file_path: rdm.create_image("file", disk, size, None, None, img_frmt='qcow2') else: rdm.create_image(disk_type, disk, size, vgname, os.path.basename(disk)) elif abnormal_type == "migration_interupted": params["thread_timeout"] = 120 # Raise after cleanup if fail_flag: raise # Migrate it again to confirm failed reason params["status_error"] = "no" cp_mig = copied_migration(test, vms, vms_ip, params) finally: # Recover created vm if cp_mig: cp_mig.cleanup_dest_vm(vm, None, params.get("migrate_dest_uri")) if vm.is_alive(): vm.destroy() if src_libvirt_file: src_libvirt_file.restore() if disks_count and vm.name == new_vm_name: vm.undefine() for disk in added_disks_list: if disk_type == 'file': utlv.delete_local_disk(disk_type, disk) else: lvname = os.path.basename(disk) utlv.delete_local_disk(disk_type, disk, vgname, lvname) rdm.remove_path(disk_type, disk) rdm.remove_path("file", file_path) if pool_created: pool_destroyed = create_destroy_pool_on_remote(test, "destroy", params) if not pool_destroyed: test.error("Destroy pool on remote host '%s' failed." % remote_host) if disk_type == "lvm": rdm.remove_vg(vgname) rdm.iscsi_login_setup(local_host, target2, is_login=False) try: lv_utils.vg_remove(vgname) except Exception: pass # let it go to confirm cleanup iscsi device utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi1") utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi2")
check_count_after_shutdown = False elif test_cmd == "detach-disk": if disk_count_after_shutdown < disk_count_before_cmd: check_count_after_shutdown = False finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if os.path.exists(save_file): os.remove(save_file) if test_block_dev: if test_logcial_dev: libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name) lv_utils.vg_remove(vg_name) utils.system("pvremove %s" % device_source, ignore_status=True) libvirt.setup_or_cleanup_iscsi(False) else: libvirt.delete_local_disk("file", device_source) # Check results. if status_error: if not status: raise error.TestFail("virsh %s exit with unexpected value." % test_cmd) else: if status: raise error.TestFail("virsh %s failed." % test_cmd) if test_cmd == "attach-disk": if at_options.count("config"):
def run(test, params, env): """ Test migration with option --copy-storage-all or --copy-storage-inc. """ vm = env.get_vm(params.get("migrate_main_vm")) disk_type = params.get("copy_storage_type", "file") if disk_type == "file": params['added_disk_type'] = "file" else: params['added_disk_type'] = "block" primary_target = vm.get_first_disk_devices()["target"] file_path, file_size = vm.get_device_size(primary_target) # Convert to Gib file_size = int(file_size) // 1073741824 # Set the pool target using the source of the first disk params["precreation_pool_target"] = os.path.dirname(file_path) remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE") local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE") remote_user = params.get("migrate_dest_user", "root") remote_passwd = params.get("migrate_dest_pwd") if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"): test.cancel("Config remote or local host first.") # Config ssh autologin for it ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22) # Attach additional disks to vm if disk count big than 1 disks_count = int(params.get("added_disks_count", 1)) - 1 if disks_count: new_vm_name = "%s_smtest" % vm.name if vm.is_alive(): vm.destroy() utlv.define_new_vm(vm.name, new_vm_name) vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir, vm.address_cache) vms = [vm] if vm.is_dead(): vm.start() # Check if image pre-creation is supported. support_precreation = False try: if qemu_test("drive-mirror") and qemu_test("nbd-server"): support_precreation = True except exceptions.TestError as e: logging.debug(e) params["support_precreation"] = support_precreation # Abnormal parameters migrate_again = "yes" == params.get("migrate_again", "no") abnormal_type = params.get("abnormal_type") added_disks_list = [] rdm = None try: rdm = utils_test.RemoteDiskManager(params) vgname = params.get("sm_vg_name", "SMTEST") pool_created = False if disk_type == "lvm": target1 = target2 = "" # For cleanup # Create volume group with iscsi # For local, target is a device name target1 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True, emulated_image="emulated-iscsi1") lv_utils.vg_create(vgname, target1) logging.debug("Created VG %s", vgname) # For remote, target is real target name target2, _ = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=False, emulated_image="emulated-iscsi2") logging.debug("Created target: %s", target2) # Login on remote host remote_device = rdm.iscsi_login_setup(local_host, target2) if not rdm.create_vg(vgname, remote_device): test.error("Create VG %s on %s failed." % (vgname, remote_host)) all_disks = utlv.attach_disks(vm, file_path, vgname, params) # Reserve for cleanup added_disks_list = list(all_disks.keys()) all_disks[file_path] = file_size logging.debug("All disks need to be migrated:%s", all_disks) if abnormal_type == "occupied_disk": occupied_path = rdm.occupy_space(disk_type, file_size, file_path, vgname, timeout=600) if abnormal_type != "not_exist_file": for disk, size in list(all_disks.items()): if disk == file_path: if support_precreation: pool_created = create_destroy_pool_on_remote(test, "create", params) if not pool_created: test.error("Create pool on remote " + "host '%s' failed." % remote_host) else: rdm.create_image("file", disk, size, None, None, img_frmt='qcow2') else: rdm.create_image(disk_type, disk, size, vgname, os.path.basename(disk)) fail_flag = False try: logging.debug("Start migration...") copied_migration(test, vms, params) if migrate_again: fail_flag = True test.fail("Migration succeed, but not expected!") else: return except exceptions.TestFail: if not migrate_again: raise if abnormal_type == "occupied_disk": rdm.remove_path(disk_type, occupied_path) elif abnormal_type == "not_exist_file": for disk, size in list(all_disks.items()): if disk == file_path: rdm.create_image("file", disk, size, None, None) else: rdm.create_image(disk_type, disk, size, vgname, os.path.basename(disk)) elif abnormal_type == "migration_interupted": params["thread_timeout"] = 120 # Raise after cleanup if fail_flag: raise # Migrate it again to confirm failed reason copied_migration(test, vms, params) finally: # Recover created vm if vm.is_alive(): vm.destroy() if disks_count and vm.name == new_vm_name: vm.undefine() for disk in added_disks_list: utlv.delete_local_disk(disk_type, disk) rdm.remove_path(disk_type, disk) rdm.remove_path("file", file_path) if pool_created: pool_destroyed = create_destroy_pool_on_remote(test, "destroy", params) if not pool_destroyed: test.error("Destroy pool on remote host '%s' failed." % remote_host) if disk_type == "lvm": rdm.remove_vg(vgname) rdm.iscsi_login_setup(local_host, target2, is_login=False) try: lv_utils.vg_remove(vgname) except Exception: pass # let it go to confirm cleanup iscsi device utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi1") utlv.setup_or_cleanup_iscsi(is_setup=False, emulated_image="emulated-iscsi2")
def run(test, params, env): """ Test command: virsh find-storage-pool-sources-as 1. Prepare env to provide source storage: 1). For 'netfs' source type, setup nfs server 2). For 'iscsi' source type, setup iscsi server 3). For 'logcial' type pool, setup iscsi storage to create vg 2. Find the pool source by running virsh cmd """ source_type = params.get("source_type", "") source_host = params.get("source_host", "127.0.0.1") source_port = params.get("source_port", "") options = params.get("extra_options", "") vg_name = params.get("vg_name", "virttest_vg_0") ro_flag = "yes" == params.get("readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") if not source_type: raise exceptions.TestFail("Command requires <type> value") cleanup_nfs = False cleanup_iscsi = False cleanup_logical = False if source_host == "127.0.0.1": if source_type == "netfs": # Set up nfs res = utils_test.libvirt.setup_or_cleanup_nfs(True) selinux_bak = res["selinux_status_bak"] cleanup_nfs = True if source_type in ["iscsi", "logical"]: # Set up iscsi try: iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True) # If we got nothing, force failure if not iscsi_device: raise exceptions.TestFail("Did not setup an iscsi device") cleanup_iscsi = True if source_type == "logical": # Create VG by using iscsi device lv_utils.vg_create(vg_name, iscsi_device) cleanup_logical = True except Exception as detail: if cleanup_iscsi: utils_test.libvirt.setup_or_cleanup_iscsi(False) raise exceptions.TestFail("iscsi setup failed:\n%s" % detail) # Run virsh cmd options = "%s %s " % (source_host, source_port) + options if ro_flag: logging.debug("Readonly mode test") try: cmd_result = virsh.find_storage_pool_sources_as( source_type, options, ignore_status=True, debug=True, readonly=ro_flag) utils_test.libvirt.check_exit_status(cmd_result, status_error) finally: # Clean up if cleanup_logical: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = to_text(process.system_output(cmd, shell=True)) lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name) if cleanup_iscsi: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_nfs: utils_test.libvirt.setup_or_cleanup_nfs( False, restore_selinux=selinux_bak)
def run(test, params, env): """ Test command: virsh find-storage-pool-sources 1. Prepare env to provide source storage if use localhost: 1). For 'netfs' source type, setup nfs server 2). For 'iscsi' source type, setup iscsi server 3). For 'logical' type pool, setup iscsi storage to create vg 4). Prepare srcSpec xml file if not given 2. Find the pool sources by running virsh cmd """ source_type = params.get("source_type", "") source_host = params.get("source_host", "127.0.0.1") srcSpec = params.get("source_Spec", "") vg_name = params.get("vg_name", "virttest_vg_0") ro_flag = "yes" == params.get("readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise exceptions.TestSkipError("API acl test not supported in " "current libvirt version.") if not source_type: raise exceptions.TestFail("Command requires <type> value") cleanup_nfs = False cleanup_iscsi = False cleanup_logical = False # Prepare source storage if source_host == "127.0.0.1": if source_type == "netfs": # Set up nfs res = utils_test.libvirt.setup_or_cleanup_nfs(True) selinux_bak = res["selinux_status_bak"] cleanup_nfs = True if source_type in ["iscsi", "logical"]: # Set up iscsi iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True) # If we got nothing, force failure if not iscsi_device: raise exceptions.TestFail("Did not setup an iscsi device") cleanup_iscsi = True if source_type == "logical": # Create vg by using iscsi device try: lv_utils.vg_create(vg_name, iscsi_device) except Exception as detail: utils_test.libvirt.setup_or_cleanup_iscsi(False) raise exceptions.TestFail("vg_create failed: %s" % detail) cleanup_logical = True # Prepare srcSpec xml if srcSpec: if srcSpec == "INVALID.XML": src_xml = "<invalid><host name='#@!'/><?source>" elif srcSpec == "VALID.XML": src_xml = "<source><host name='%s'/></source>" % source_host srcSpec = xml_utils.TempXMLFile().name with open(srcSpec, "w+") as srcSpec_file: srcSpec_file.write(src_xml) logging.debug("srcSpec file content:\n%s", srcSpec_file.read()) if params.get('setup_libvirt_polkit') == 'yes' and srcSpec: cmd = "chmod 666 %s" % srcSpec process.run(cmd) if ro_flag: logging.debug("Readonly mode test") # Run virsh cmd try: cmd_result = virsh.find_storage_pool_sources( source_type, srcSpec, ignore_status=True, debug=True, unprivileged_user=unprivileged_user, uri=uri, readonly=ro_flag) utils_test.libvirt.check_exit_status(cmd_result, status_error) finally: # Clean up if cleanup_logical: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = to_text(process.system_output(cmd, shell=True)) lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name) if cleanup_iscsi: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_nfs: utils_test.libvirt.setup_or_cleanup_nfs( False, restore_selinux=selinux_bak)
def run(test, params, env): """ Test the virsh pool commands with acl, initiate a pool then do following operations. (1) Undefine a given type pool (2) Define the pool from xml (3) Build given type pool (4) Start pool (5) Destroy pool (6) Refresh pool after start it (7) Run vol-list with the pool (9) Delete pool For negative cases, redo failed step to make the case run continue. Run cleanup at last restore env. """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name vol_path = os.path.join(pool_target, vol_name) define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") start_acl = "yes" == params.get("start_acl", "no") destroy_acl = "yes" == params.get("destroy_acl", "no") build_acl = "yes" == params.get("build_acl", "no") delete_acl = "yes" == params.get("delete_acl", "no") refresh_acl = "yes" == params.get("refresh_acl", "no") vol_list_acl = "yes" == params.get("vol_list_acl", "no") list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no") src_pool_error = "yes" == params.get("src_pool_error", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") start_error = "yes" == params.get("start_error", "no") destroy_error = "yes" == params.get("destroy_error", "no") build_error = "yes" == params.get("build_error", "no") delete_error = "yes" == params.get("delete_error", "no") refresh_error = "yes" == params.get("refresh_error", "no") vol_list_error = "yes" == params.get("vol_list_error", "no") # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster cleanup_env = [False, False, False, "", False] # libvirt acl related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True} def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable if list_dumpxml_acl: result = virsh.pool_list(option, **acl_dargs) else: result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: test.fail("Unexpected pool '%s' exist." % pool_name) if not expect_error and not found: test.fail("Expect pool '%s' doesn't exist." % pool_name) # Run Testcase kwargs = {'source_format': params.get('pool_source_format', 'ext4')} try: _pool = libvirt_storage.StoragePool() # Init a pool for test result = utlv.define_pool(pool_name, pool_type, pool_target, cleanup_env, **kwargs) utlv.check_exit_status(result, src_pool_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if list_dumpxml_acl: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs) else: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (1) # Undefine pool if undefine_acl: result = virsh.pool_undefine(pool_name, **acl_dargs) else: result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, undefine_error) if undefine_error: check_pool_list(pool_name, "--all", False) # Redo under negative case to keep case continue result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) else: check_pool_list(pool_name, "--all", True) # Step (2) # Define pool from XML file if define_acl: result = virsh.pool_define(pool_xml, **acl_dargs) else: result = virsh.pool_define(pool_xml) utlv.check_exit_status(result, define_error) if define_error: # Redo under negative case to keep case continue result = virsh.pool_define(pool_xml) utlv.check_exit_status(result) # Step (3) # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if pool_type != "logical": option = '' if pool_type in ['disk', 'fs']: option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) if build_acl: result = virsh.pool_build(pool_name, option, **acl_dargs) else: result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result, build_error) if build_error: # Redo under negative case to keep case continue result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) # For iSCSI pool, we need discover targets before start the pool if pool_type == 'iscsi': cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1' process.run(cmd, shell=True) # Step (4) # Pool start if start_acl: result = virsh.pool_start(pool_name, **acl_dargs) else: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, start_error) if start_error: # Redo under negative case to keep case continue result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (5) # Pool destroy if destroy_acl: result = virsh.pool_destroy(pool_name, **acl_dargs) else: result = virsh.pool_destroy(pool_name) if result: if destroy_error: test.fail("Expect fail, but run successfully.") else: if not destroy_error: test.fail("Pool %s destroy failed, not expected." % pool_name) else: # Redo under negative case to keep case continue if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (6) # Pool refresh for 'dir' type pool # Pool start result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) if pool_type == "dir": os.mknod(vol_path) if refresh_acl: result = virsh.pool_refresh(pool_name, **acl_dargs) else: result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result, refresh_error) # Step (7) # Pool vol-list if vol_list_acl: result = virsh.vol_list(pool_name, **acl_dargs) else: result = virsh.vol_list(pool_name) utlv.check_exit_status(result, vol_list_error) # Step (8) # Pool delete for 'dir' type pool if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) if delete_acl: result = virsh.pool_delete(pool_name, **acl_dargs) else: result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result, delete_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if not delete_error: if os.path.exists(pool_target): test.fail("The target path '%s' still exist." % pool_target) result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.run(cmd, shell=True).stdout_text lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name, shell=True) if cleanup_env[1]: utlv.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utlv.setup_or_cleanup_nfs( False, restore_selinux=cleanup_env[3])
def run(test, params, env): """ Test virsh {at|de}tach-disk command. The command can attach new disk/detach disk. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh attach/detach-disk operation. 3.Recover test environment. 4.Confirm the test result. """ def check_vm_partition(vm, device, os_type, target_name, old_parts): """ Check VM disk's partition. :param vm. VM guest. :param os_type. VM's operation system type. :param target_name. Device target type. :return: True if check successfully. """ logging.info("Checking VM partittion...") if vm.is_dead(): vm.start() try: attached = False if os_type == "linux": session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.debug("Added parts: %s" % added_parts) for i in range(len(added_parts)): if device == "disk": if target_name.startswith("vd"): if added_parts[i].startswith("vd"): attached = True elif target_name.startswith("hd") or target_name.startswith("sd"): if added_parts[i].startswith("sd"): attached = True elif device == "cdrom": if added_parts[i].startswith("sr"): attached = True session.close() return attached except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def acpiphp_module_modprobe(vm, os_type): """ Add acpiphp module if VM's os type is rhle5.* :param vm. VM guest. :param os_type. VM's operation system type. :return: True if operate successfully. """ if vm.is_dead(): vm.start() try: if os_type == "linux": session = vm.wait_for_login() s_rpm, _ = session.cmd_status_output( "rpm --version") # If status is different from 0, this # guest OS doesn't support the rpm package # manager if s_rpm: session.close() return True _, o_vd = session.cmd_status_output( "rpm -qa | grep redhat-release") if o_vd.find("5Server") != -1: s_mod, o_mod = session.cmd_status_output( "modprobe acpiphp") del o_mod if s_mod != 0: session.close() return False session.close() return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_shareable(at_with_shareable, test_twice): """ check if current libvirt version support shareable option at_with_shareable: True or False. Whether attach disk with shareable option test_twice: True or False. Whether perform operations twice return: True or cancel the test """ if at_with_shareable or test_twice: if libvirt_version.version_compare(3, 9, 0): return True else: test.cancel("Current libvirt version doesn't support shareable feature") # Get test command. test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk") vm_ref = params.get("at_dt_disk_vm_ref", "name") at_options = params.get("at_dt_disk_at_options", "") dt_options = params.get("at_dt_disk_dt_options", "") at_with_shareable = "yes" == params.get("at_with_shareable", 'no') pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running") status_error = "yes" == params.get("status_error", 'no') no_attach = params.get("at_dt_disk_no_attach", 'no') os_type = params.get("os_type", "linux") qemu_file_lock = params.get("qemu_file_lock", "") if qemu_file_lock: if utils_misc.compare_qemu_version(2, 9, 0): logging.info('From qemu-kvm-rhev 2.9.0:' 'QEMU image locking, which should prevent multiple ' 'runs of QEMU or qemu-img when a VM is running.') if test_cmd == "detach-disk" or pre_vm_state == "shut off": test.cancel('This case is not supported.') else: logging.info('The expect result is failure as opposed with succeed') status_error = True # Disk specific attributes. device = params.get("at_dt_disk_device", "disk") device_source_name = params.get("at_dt_disk_device_source", "attach.img") device_source_format = params.get("at_dt_disk_device_source_format", "raw") device_target = params.get("at_dt_disk_device_target", "vdd") device_disk_bus = params.get("at_dt_disk_bus_type", "virtio") source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes") create_img = "yes" == params.get("at_dt_disk_create_image", "yes") test_twice = "yes" == params.get("at_dt_disk_test_twice", "no") test_type = "yes" == params.get("at_dt_disk_check_type", "no") test_audit = "yes" == params.get("at_dt_disk_check_audit", "no") test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no") test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no") restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no") detach_disk_with_print_xml = "yes" == params.get("detach_disk_with_print_xml", "no") vg_name = params.get("at_dt_disk_vg", "vg_test_0") lv_name = params.get("at_dt_disk_lv", "lv_test_0") serial = params.get("at_dt_disk_serial", "") address = params.get("at_dt_disk_address", "") address2 = params.get("at_dt_disk_address2", "") cache_options = params.get("cache_options", "") time_sleep = params.get("time_sleep", 3) if check_shareable(at_with_shareable, test_twice): at_options += " --mode shareable" if serial: at_options += (" --serial %s" % serial) if address2: at_options_twice = at_options + (" --address %s" % address2) if address: at_options += (" --address %s" % address) if cache_options: if cache_options.count("directsync"): if not libvirt_version.version_compare(1, 0, 0): test.cancel("'directsync' cache option doesn't " "support in current libvirt version.") at_options += (" --cache %s" % cache_options) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Create virtual device file. device_source_path = os.path.join(data_dir.get_tmp_dir(), device_source_name) if test_block_dev: device_source = libvirt.setup_or_cleanup_iscsi(True) if not device_source: # We should skip this case test.cancel("Can not get iscsi device name in host") if test_logcial_dev: lv_utils.vg_create(vg_name, device_source) device_source = libvirt.create_local_disk("lvm", size="10M", vgname=vg_name, lvname=lv_name) logging.debug("New created volume: %s", lv_name) else: if source_path and create_img: device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1G", disk_format=device_source_format) else: device_source = device_source_name # if we are testing audit, we need to start audit servcie first. if test_audit: auditd_service = Factory.create_service("auditd") if not auditd_service.status(): auditd_service.start() logging.info("Auditd service status: %s" % auditd_service.status()) # If we are testing cdrom device, we need to detach hdc in VM first. if device == "cdrom": if vm.is_alive(): vm.destroy(gracefully=False) s_detach = virsh.detach_disk(vm_name, device_target, "--config") if not s_detach: logging.error("Detach hdc failed before test.") # If we are testing detach-disk, we need to attach certain device first. if test_cmd == "detach-disk" and no_attach != "yes": s_at_options = "--driver qemu --config" #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options #need be set if disk needs be attached multitimes if check_shareable(at_with_shareable, test_twice): s_at_options += " --mode shareable" s_attach = virsh.attach_disk(vm_name, device_source, device_target, s_at_options, debug=True).exit_status if s_attach != 0: logging.error("Attaching device failed before testing detach-disk") else: logging.debug("Attaching device succeeded before testing detach-disk") if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1", disk_format=device_source_format) s_attach = virsh.attach_disk(vm_name, device_source, device_target2, s_at_options).exit_status if s_attach != 0: logging.error("Attaching device failed before testing " "detach-disk test_twice") vm.start() vm.wait_for_login() # Add acpiphp module before testing if VM's os type is rhle5.* if not acpiphp_module_modprobe(vm, os_type): test.error("Add acpiphp module failed before test.") # Turn VM into certain state. if pre_vm_state == "paused": logging.info("Suspending %s..." % vm_name) if vm.is_alive(): vm.pause() elif pre_vm_state == "shut off": logging.info("Shuting down %s..." % vm_name) if vm.is_alive(): vm.destroy(gracefully=False) # Get disk count before test. disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name) # Test. domid = vm.get_id() domuuid = vm.get_uuid() # Confirm how to reference a VM. if vm_ref == "name": vm_ref = vm_name elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid else: vm_ref = "" if test_cmd == "attach-disk": status = virsh.attach_disk(vm_ref, device_source, device_target, at_options, debug=True).exit_status elif test_cmd == "detach-disk": # For detach disk with print-xml option, it only print information,and not actual disk detachment. if detach_disk_with_print_xml and libvirt_version.version_compare(4, 5, 0): ret = virsh.detach_disk(vm_ref, device_target, at_options) libvirt.check_exit_status(ret) cmd = ("echo \"%s\" | grep -A 16 %s" % (ret.stdout.strip(), device_source_name)) if process.system(cmd, ignore_status=True, shell=True): test.error("Check disk with source image name failed") status = virsh.detach_disk(vm_ref, device_target, dt_options, debug=True).exit_status if restart_libvirtd: libvirtd_serv = utils_libvirtd.Libvirtd() libvirtd_serv.restart() if test_twice: device_target2 = params.get("at_dt_disk_device_target2", device_target) device_source = libvirt.create_local_disk( "file", path=device_source_path, size="1G", disk_format=device_source_format) if test_cmd == "attach-disk": if address2: at_options = at_options_twice status = virsh.attach_disk(vm_ref, device_source, device_target2, at_options, debug=True).exit_status elif test_cmd == "detach-disk": status = virsh.detach_disk(vm_ref, device_target2, dt_options, debug=True).exit_status # Resume guest after command. On newer libvirt this is fixed as it has # been a bug. The change in xml file is done after the guest is resumed. if pre_vm_state == "paused": vm.resume() time.sleep(5) # Check audit log check_audit_after_cmd = True if test_audit: grep_audit = ('grep "%s" /var/log/audit/audit.log' % test_cmd.split("-")[0]) cmd = (grep_audit + ' | ' + 'grep "%s" | tail -n1 | grep "res=success"' % device_source) if process.run(cmd, shell=True).exit_status: logging.error("Audit check failed") check_audit_after_cmd = False # Need wait a while for xml to sync time.sleep(float(time_sleep)) # Check disk count after command. check_count_after_cmd = True disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name) if test_cmd == "attach-disk": if disk_count_after_cmd == disk_count_before_cmd: check_count_after_cmd = False elif test_cmd == "detach-disk": if disk_count_after_cmd < disk_count_before_cmd: check_count_after_cmd = False # Recover VM state. if pre_vm_state == "shut off": vm.start() # Check in VM after command. check_vm_after_cmd = True check_vm_after_cmd = check_vm_partition(vm, device, os_type, device_target, old_parts) # Check disk type after attach. check_disk_type = True if test_type: if test_block_dev: check_disk_type = vm_xml.VMXML.check_disk_type(vm_name, device_source, "block") else: check_disk_type = vm_xml.VMXML.check_disk_type(vm_name, device_source, "file") # Check disk serial after attach. check_disk_serial = True if serial: disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target) if serial != disk_serial: check_disk_serial = False # Check disk address after attach. check_disk_address = True if address: disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target) if address != disk_address: check_disk_address = False # Check multifunction address after attach. check_disk_address2 = True if address2: disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2) if address2 != disk_address2: check_disk_address2 = False # Check disk cache option after attach. check_cache_after_cmd = True if cache_options: disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target, "driver", "cache") if cache_options == "default": if disk_cache is not None: check_cache_after_cmd = False elif disk_cache != cache_options: check_cache_after_cmd = False # Eject cdrom test eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no") save_vm = "yes" == params.get("at_dt_disk_save_vm", "no") save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") try: if eject_cdrom: eject_params = {'type_name': "file", 'device_type': "cdrom", 'target_dev': device_target, 'target_bus': device_disk_bus} eject_xml = libvirt.create_disk_xml(eject_params) with open(eject_xml) as eject_file: logging.debug("Eject CDROM by XML: %s", eject_file.read()) # Run command tiwce to make sure cdrom tray open first #BZ892289 # Open tray virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True) # Add time sleep between two attach commands. if time_sleep: time.sleep(float(time_sleep)) # Eject cdrom result = virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True) if result.exit_status != 0: test.fail("Eject CDROM failed") if vm_xml.VMXML.check_disk_exist(vm_name, device_source): test.fail("Find %s after do eject" % device_source) # Save and restore VM if save_vm: result = virsh.save(vm_name, save_file, debug=True) libvirt.check_exit_status(result) result = virsh.restore(save_file, debug=True) libvirt.check_exit_status(result) if vm_xml.VMXML.check_disk_exist(vm_name, device_source): test.fail("Find %s after do restore" % device_source) # Destroy VM. vm.destroy(gracefully=False) # Check disk count after VM shutdown (with --config). check_count_after_shutdown = True inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disk_count_after_shutdown = len(inactive_vmxml.get_disk_all()) if test_cmd == "attach-disk": if disk_count_after_shutdown == disk_count_before_cmd: check_count_after_shutdown = False elif test_cmd == "detach-disk": if disk_count_after_shutdown < disk_count_before_cmd: check_count_after_shutdown = False finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.debug("Restore the VM XML") backup_xml.sync() if os.path.exists(save_file): os.remove(save_file) if test_block_dev: if test_logcial_dev: libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name) lv_utils.vg_remove(vg_name) process.run("pvremove %s" % device_source, shell=True, ignore_status=True) libvirt.setup_or_cleanup_iscsi(False) else: libvirt.delete_local_disk("file", device_source) # Check results. if status_error: if not status: test.fail("virsh %s exit with unexpected value." % test_cmd) else: if status: test.fail("virsh %s failed." % test_cmd) if test_cmd == "attach-disk": if at_options.count("config"): if not check_count_after_shutdown: test.fail("Cannot see config attached device " "in xml file after VM shutdown.") if not check_disk_serial: test.fail("Serial set failed after attach") if not check_disk_address: test.fail("Address set failed after attach") if not check_disk_address2: test.fail("Address(multifunction) set failed" " after attach") else: if not check_count_after_cmd: test.fail("Cannot see device in xml file" " after attach.") if not check_vm_after_cmd: test.fail("Cannot see device in VM after" " attach.") if not check_disk_type: test.fail("Check disk type failed after" " attach.") if not check_audit_after_cmd: test.fail("Audit hotplug failure after attach") if not check_cache_after_cmd: test.fail("Check cache failure after attach") if at_options.count("persistent"): if not check_count_after_shutdown: test.fail("Cannot see device attached " "with persistent after " "VM shutdown.") else: if check_count_after_shutdown: test.fail("See non-config attached device " "in xml file after VM shutdown.") elif test_cmd == "detach-disk": if dt_options.count("config"): if check_count_after_shutdown: test.fail("See config detached device in " "xml file after VM shutdown.") else: if check_count_after_cmd: test.fail("See device in xml file " "after detach.") if check_vm_after_cmd: test.fail("See device in VM after detach.") if not check_audit_after_cmd: test.fail("Audit hotunplug failure " "after detach") if dt_options.count("persistent"): if check_count_after_shutdown: test.fail("See device deattached " "with persistent after " "VM shutdown.") else: if not check_count_after_shutdown: test.fail("See non-config detached " "device in xml file after " "VM shutdown.") else: test.error("Unknown command %s." % test_cmd)