def check_event_value(vm_name, perf_option, event): """ Check domstats output and if the event has a value as expect 1. if perf_option == --disable, there isn't a value/line 2. if perf_option == --enable, there is a value/line :param vm_name: Domain name,id :param perf_option: --enable or --disable :param vent: perf event name """ logging.debug("check_event_value: vm_name= %s, perf_option=%s, event=%s", vm_name, perf_option, event) ret = False result = virsh.domstats(vm_name, "--perf", ignore_status=True, debug=True) libvirt.check_exit_status(result) output = result.stdout.strip() logging.debug("domstats output is %s", output) if perf_option == '--enable': for line in output.split('\n'): if '.' in line and event == (line.split('.')[1]).split('=')[0]: ret = True else: ret = True for line in output.split('\n'): if '.' in line and event == (line.split('.')[1]).split('=')[0]: ret = False return ret
def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)} for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list
def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable if list_dumpxml_acl: result = virsh.pool_list(option, **acl_dargs) else: result = virsh.pool_list(option, ignore_status=True) libvirt.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)
def check_result(result, status_error): """ Check virt-v2v command result """ libvirt.check_exit_status(result, status_error) output = result.stdout + result.stderr if not status_error: if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): raise exceptions.TestFail('Import VM failed') # Check guest following the checkpoint document after convertion logging.info('Checking common checkpoints for v2v') vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") # Check specific checkpoints if checkpoint == 'cdrom': virsh_session = utils_sasl.VirshSessionSASL(params) virsh_session_id = virsh_session.get_id() check_device_exist('cdrom', virsh_session_id) # Merge 2 error lists error_list.extend(vmchecker.errors) if len(error_list): raise exceptions.TestFail('%d checkpoints failed: %s' % len(error_list), error_list)
def check_result(result, status_error): """ Check virt-v2v command result """ libvirt.check_exit_status(result, status_error) output = result.stdout + result.stderr if skip_check: logging.info('Skip checking vm after conversion') elif not status_error: if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') if output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception, e: test.fail('Start vm failed: %s' % str(e)) # Check guest following the checkpoint document after convertion vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if params.get('skip_vm_check') != 'yes': if checkpoint != 'win2008r2_ostk': ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") if checkpoint == 'win2008r2_ostk': check_BSOD() # Merge 2 error lists error_list.extend(vmchecker.errors)
def trigger_hpt_resize(session): """ Check the HPT order file and dmesg :param session: the session to guest :raise: test.fail if required message is not found """ hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order" hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) hpt_order += 1 cmd = 'echo %d > %s' % (hpt_order, hpt_order_path) cmd_result = session.cmd_status_output(cmd) result = process.CmdResult(stderr=cmd_result[1], stdout=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result) dmesg = session.cmd('dmesg') dmesg_content = params.get('dmesg_content').split('|') for content in dmesg_content: if content % hpt_order not in dmesg: test.fail("'%s' is missing in dmesg" % (content % hpt_order)) else: logging.info("'%s' is found in dmesg", content % hpt_order)
def test_pmsuspend(vm_name): """ Test pmsuspend command. """ if vm.is_dead(): vm.start() vm.wait_for_login() # Create swap partition if nessesary. if not vm.has_swap(): swap_path = os.path.join(test.tmpdir, 'swap.img') vm.create_swap_partition(swap_path) ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs) libvirt.check_exit_status(ret) # wait for vm to shutdown if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60): test.fail("vm is still alive after S4 operation") # Wait for vm and qemu-ga service to start vm.start() # Prepare guest agent and start guest try: vm.prepare_guest_agent() except (remote.LoginError, virt_vm.VMError), detail: test.fail("failed to prepare agent:\n%s" % detail)
def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img")
def create_luks_secret(vol_path, password, test): """ Create secret for luks encryption :param vol_path: volume path. :return: secret id if create successfully. """ sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "volume secret" sec_xml.usage = 'volume' sec_xml.volume = vol_path sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) utlv.check_exit_status(ret) try: encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() except IndexError: test.error("Fail to get newly created secret uuid") logging.debug("Secret uuid %s", encryption_uuid) # Set secret value. encoding = locale.getpreferredencoding() secret_string = base64.b64encode(password.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(encryption_uuid, secret_string) utlv.check_exit_status(ret) return encryption_uuid
def storagevol_validate(pool_name, file=None, **virsh_dargs): """ Test for schema storagevol """ if pool_name is None: raise error.TestNAError("None pool is specified.") # Confirm the storagepool exists. found = False result = virsh.pool_list(ignore_status=True) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output[1:]: if pool_name == item[0]: found = True break if not found: raise error.TestNAError("Make sure the storagepool %s exists!" % pool_name) # Get volume name cmd_result = virsh.vol_list(pool_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get volume name") if vol_name is not None: cmd_result = virsh.vol_dumpxml(vol_name, pool_name, to_file=file) libvirt.check_exit_status(cmd_result)
def check_disk_save_restore(save_file, device_targets, startup_policy): """ Check domain save and restore operation. """ # Save the domain. ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Restore the domain. restore_error = False # Check disk startup policy option if "optional" in startup_policy: os.remove(disks[0]["source"]) restore_error = True ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret, restore_error) if restore_error: return # Connect to the domain and check disk. try: session = vm.wait_for_login() cmd = ("ls /dev/%s && mkfs.ext3 -F /dev/%s && mount /dev/%s" " /mnt && ls /mnt && touch /mnt/test && umount /mnt" % (device_targets[0], device_targets[0], device_targets[0])) s, o = session.cmd_status_output(cmd) if s: session.close() raise error.TestError("Failed to read/write disk in VM:" " %s" % o) session.close() except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e: raise error.TestError(str(e))
def secret_validate(file=None, **virsh_dargs): """ Test for schema secret """ tmp_dir = data_dir.get_tmp_dir() volume_path = os.path.join(tmp_dir, "secret_volume") ephemeral = "no" private = "no" secret_xml_obj = SecretXML(ephemeral, private) status, uuid = commands.getstatusoutput("uuidgen") if status: raise error.TestNAError("Failed to generate valid uuid") secret_xml_obj.uuid = uuid secret_xml_obj.volume = volume_path secret_xml_obj.usage = "volume" secret_obj_xmlfile = os.path.join(SECRET_DIR, uuid + ".xml") cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True) cmd_result = virsh.secret_list(**virsh_dargs) libvirt.check_exit_status(cmd_result) try: uuid = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get secret uuid") if uuid: try: virsh.secret_dumpxml(uuid, to_file=file, **virsh_dargs) except error.CmdError, e: raise error.TestError(str(e))
def check_state(expected_state): result = virsh.domstate(vm_name, uri=uri) utlv.check_exit_status(result) vm_state = result.stdout.strip() if vm_state == expected_state: logging.info("Get expected state: %s", vm_state) else: raise TestFail("Get unexpected state: %s", vm_state)
def check_snapshot(bgjob=None): """ Do snapshot operation and check the results """ snapshot_name1 = "snap.s1" snapshot_name2 = "snap.s2" if not snapshot_vm_running: vm.destroy(gracefully=False) ret = virsh.snapshot_create_as(vm_name, snapshot_name1) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name) if snapshot_name not in snap_lists: test.fail("Snapshot %s doesn't exist" % snapshot_name) if snapshot_vm_running: options = "--force" else: options = "" ret = virsh.snapshot_revert( vm_name, ("%s %s" % (snapshot_name, options))) libvirt.check_exit_status(ret) ret = virsh.dumpxml(vm_name) if ret.stdout.count("<rng model="): test.fail("Found rng device in xml") if snapshot_with_rng: if vm.is_alive(): vm.destroy(gracefully=False) if bgjob: bgjob.kill_func() modify_rng_xml(params, False) # Start the domain before disk-only snapshot if vm.is_dead(): # Add random server if params.get("backend_type") == "tcp": cmd = "cat /dev/random | nc -4 -l localhost 1024" bgjob = utils.AsyncJob(cmd) vm.start() vm.wait_for_login().close() err_msgs = ("live disk snapshot not supported" " with this QEMU binary") ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot_name2) if ret.exit_status: if ret.stderr.count(err_msgs): test.skip(err_msgs) else: test.fail("Failed to create external snapshot") snap_lists = virsh.snapshot_list(vm_name) if snapshot_name2 not in snap_lists: test.fail("Failed to check snapshot list") ret = virsh.domblklist(vm_name) if not ret.stdout.count(snapshot_name2): test.fail("Failed to find snapshot disk")
def run(test, params, env): """ Test svirt in virt-clone. """ VIRT_CLONE = None try: VIRT_CLONE = utils_path.find_command("virt-clone") except utils_path.CmdNotFoundError: raise error.TestNAError("No virt-clone command found.") # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("svirt_virt_clone_host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("svirt_virt_clone_vm_sec_type", "dynamic") sec_model = params.get("svirt_virt_clone_vm_sec_model", "selinux") sec_label = params.get("svirt_virt_clone_vm_sec_label", None) sec_relabel = params.get("svirt_virt_clone_vm_sec_relabel", "yes") sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label, 'relabel': sec_relabel} # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Get varialbles about image. img_label = params.get('svirt_virt_clone_disk_label') # Label the disks of VM with img_label. disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in disks.values(): disk_path = disk['source'] backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file( filename=disk_path) utils_selinux.set_context_of_file(filename=disk_path, context=img_label) # Set selinux of host. backup_sestatus = utils_selinux.get_status() utils_selinux.set_status(host_sestatus) # Set the context of the VM. vmxml.set_seclabel([sec_dict]) vmxml.sync() clone_name = ("%s-clone" % vm.name) try: cmd = ("%s --original %s --name %s --auto-clone" % (VIRT_CLONE, vm.name, clone_name)) cmd_result = utils.run(cmd, ignore_status=True) utils_libvirt.check_exit_status(cmd_result, status_error) finally: # clean up for path, label in backup_labels_of_disks.items(): utils_selinux.set_context_of_file(filename=path, context=label) backup_xml.sync() utils_selinux.set_status(backup_sestatus) if not virsh.domstate(clone_name).exit_status: libvirt_vm.VM(clone_name, params, None, None).remove_with_storage()
def run(test, params, env): """ Test domiftune tuning 1) Positive testing 1.1) get the current domiftune parameters for a running guest 1.2) set the current domiftune parameters for a running guest 2) Negative testing 2.1) get domiftune parameters 2.2) set domiftune parameters """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) status_error = params.get("status_error", "no") start_vm = params.get("start_vm", "yes") change_parameters = params.get("change_parameters", "no") interface_ref = params.get("interface_ref", "name") interface = [] if vm and not vm.is_alive(): vm.start() if vm and vm.is_alive(): virt_xml_obj = vm_xml.VMXML(virsh_instance=virsh) interface = virt_xml_obj.get_iface_dev(vm_name) if_mac = interface[0] # Get interface name vmxml = virt_xml_obj.new_from_dumpxml(vm_name) if_node = vmxml.get_iface_all().get(if_mac) if_name = if_node.find('target').get('dev') if interface_ref == "name": interface = if_name if interface_ref == "mac": interface = if_mac logging.debug("the interface is %s", interface) test_dict = dict(params) test_dict['vm'] = vm if interface: test_dict['iface_dev'] = interface if start_vm == "no" and vm and vm.is_alive(): vm.destroy() # positive and negative testing ######### if change_parameters == "no": get_domiftune_parameter(test_dict, test) else: set_domiftune_parameter(test_dict, test) ret = virsh.domiftune(vm_name, interface, 'current', '0', '0') libvirt.check_exit_status(ret)
def run(test, params, env): """ Test the command virsh domcapabilities """ target_uri = params.get("target_uri", "default") if target_uri.count("EXAMPLE.COM"): raise error.TestNAError("Please replace '%s' with valid uri" % target_uri) connect_uri = libvirt_vm.normalize_connect_uri(target_uri) virsh_options = params.get("virsh_options", "") virttype = params.get("virttype_value", "") emulatorbin = params.get("emulatorbin_value", "") arch = params.get("arch_value", "") machine = params.get("machine_value", "") option_dict = {'arch': arch, 'virttype': virttype, 'emulatorbin': emulatorbin, 'machine': machine} options_list = [option_dict] extra_option = params.get("extra_option", "") # Get --virttype, --emulatorbin, --arch and --machine values from # virsh capabilities output, then assemble option for testing # This will ignore the virttype, emulatorbin, arch and machine values if virsh_options == "AUTO": options_list = [] capa_xml = capability_xml.CapabilityXML() guest_capa = capa_xml.get_guest_capabilities() for arch_prop in guest_capa.values(): for arch in arch_prop.keys(): machine_list = arch_prop[arch]['machine'] virttype_list = [] emulatorbin_list = [arch_prop[arch]['emulator']] for key in arch_prop[arch].keys(): if key.startswith("domain_"): virttype_list.append(key[7:]) if arch_prop[arch][key].values(): emulatorbin_list.append(arch_prop[arch][key]['emulator']) for virttype in virttype_list: for emulatorbin in emulatorbin_list: for machine in machine_list: option_dict = {'arch': arch, 'virttype': virttype, 'emulatorbin': emulatorbin, 'machine': machine} options_list.append(option_dict) # Run test cases for option in options_list: result = virsh.domcapabilities(virttype=option['virttype'], emulatorbin=option['emulatorbin'], arch=option['arch'], machine=option['machine'], options=extra_option, uri=connect_uri, ignore_status=True, debug=True) # Check status_error status_error = "yes" == params.get("status_error", "no") utlv.check_exit_status(result, status_error)
def check_save_restore(save_file): """ Test domain save and restore. """ # Save the domain. ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Restore the domain. ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret)
def dump_nodedev_xml(dev_name, dev_opt="", **dargs): """ Do dumpxml and check the result. step1.execute nodedev-dumpxml command. step1.compare info in xml with info in sysfs. :param dev_name: name of device. :param dev_opt: command extra options :param dargs: extra dict args """ result = virsh.nodedev_dumpxml(dev_name, options=dev_opt, **dargs) libvirt.check_exit_status(result) logging.debug('Executing "virsh nodedev-dumpxml %s" finished.', dev_name) # Compare info in xml with info in sysfs. nodedevice_xml = nodedev_xml.NodedevXML.new_from_dumpxml(dev_name) if not nodedevice_xml.validates: test.error("nodedvxml of %s is not validated." % (dev_name)) # Get the dict of key to value in xml. # nodedev_dict_xml contain the all keys and values in xml need checking. nodedev_dict_xml = nodedevice_xml.get_key2value_dict() # Get the dict of key to path in sysfs. # nodedev_syspath_dict contain the all keys and the path of file which contain # information for each key. nodedev_syspath_dict = nodedevice_xml.get_key2syspath_dict() # Get the values contained in files. # nodedev_dict_sys contain the all keys and values in sysfs. nodedev_dict_sys = {} for key, filepath in list(nodedev_syspath_dict.items()): with open(filepath, 'r') as f: value = f.readline().rstrip('\n') nodedev_dict_sys[key] = value # Compare the value in xml and in syspath. for key in nodedev_dict_xml: xml_value = nodedev_dict_xml.get(key) sys_value = nodedev_dict_sys.get(key) if not xml_value == sys_value: if (key == 'numa_node' and not libvirt_version.version_compare(1, 2, 5)): logging.warning("key: %s in xml is not supported yet" % key) else: test.error("key: %s in xml is %s," "but in sysfs is %s." % (key, xml_value, sys_value)) else: continue logging.debug("Compare info in xml and info in sysfs finished" "for device %s.", dev_name)
def set_secret_value(password, encryption_uuid): """ Generate secret string and set secret value. :param password: password for encryption :param encryption_uuid: uuid of secret """ encoding = locale.getpreferredencoding() secret_string = base64.b64encode(password.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(encryption_uuid, secret_string) libvirt.check_exit_status(ret)
def redefine_new_xml(): if restore_state == "running": option = "--running" elif restore_state == "paused": option = "--paused" else: raise exceptions.TestFail("Unknown save-image-define option") cmd_result = virsh.save_image_define(vm_save, xmlfile, option, debug=True) libvirt.check_exit_status(cmd_result)
def check_dom_iothread(): """ Check iothread by qemu-monitor-command. """ ret = virsh.qemu_monitor_command(vm_name, '{"execute": "query-iothreads"}', "--pretty") libvirt.check_exit_status(ret) logging.debug("Domain iothreads: %s", ret.stdout) iothreads_ret = json.loads(ret.stdout) if len(iothreads_ret['return']) != int(dom_iothreads): raise error.TestFail("Failed to check domain iothreads")
def get_image_xml(): # Invoke save-image-dumpxml cmd_result = virsh.save_image_dumpxml(vm_save, debug=True) libvirt.check_exit_status(cmd_result) xml = cmd_result.stdout.strip() match_string = "<name>%s</name>" % vm_name if not re.search(match_string, xml): raise exceptions.TestFail("The xml from saved state file " "is invalid") return xml
def nodedev_validate(file=None, **virsh_dargs): """ Test for schema nodedev """ # Get dev name cmd_result = virsh.nodedev_list() libvirt.check_exit_status(cmd_result) dev_name = cmd_result.stdout.strip().splitlines()[1] if dev_name: cmd_result = virsh.nodedev_dumpxml(dev_name, to_file=file) libvirt.check_exit_status(cmd_result)
def check_save_restore(vm_name): """ Do save/restore operation and check status """ save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") try: result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) result = virsh.restore(save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) finally: os.remove(save_file)
def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(test.tmpdir, "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close()
def check_result(result, expect_status, expect_output=None): """ Check virsh metadata command """ utlv.check_exit_status(result, expect_status) if result.exit_status == 0 and expect_output: expect_output = pretty_xml(expect_output) logging.debug("Expect metadata: %s", expect_output) output = result.stdout.strip() output = pretty_xml(output) logging.debug("Command get metadata: %s", output) if output != expect_output: raise error.TestFail("Metadat is not expected")
def offline_pin_and_check(vm, vcpu, cpu_list): """ Edit domain xml to pin vcpu and check the result. """ cputune = vm_xml.VMCPUTuneXML() cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) vmxml.cputune = cputune vmxml.sync() cmdResult = virsh.start(vm.name, debug=True) libvirt.check_exit_status(cmdResult, status_error) pid = vm.get_pid() vcpu_pid = vm.get_vcpus_pid()[vcpu] check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)
def nwfilter_validate(file=None, **virsh_dargs): """ Test for schema nwfilter """ cmd_result = virsh.nwfilter_list(**virsh_dargs) libvirt.check_exit_status(cmd_result) try: uuid = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get nwfilter uuid") if uuid: cmd_result = virsh.nwfilter_dumpxml(uuid, to_file=file, **virsh_dargs) libvirt.check_exit_status(cmd_result)
def network_validate(net_name, file=None, **virsh_dargs): """ Test for schema network """ if net_name is None: raise error.TestNAError("None network is specified.") # Confirm the network exists. output = virsh.net_list("--all").stdout.strip() if not re.search(net_name, output): raise error.TestNAError("Make sure the network exists!!") cmd_result = virsh.net_dumpxml(net_name, to_file=file) libvirt.check_exit_status(cmd_result)
def run(test, params, env): """ Test virsh {at|de}tach-device command. The command can attach new disk/detach device. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh attach/detach-device operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pre_vm_state = params.get("at_dt_device_pre_vm_state", "running") virsh_dargs = {'debug': True, 'ignore_status': True} def is_attached(vmxml_devices, disk_type, source_file, target_dev): """ Check attached device and disk exist or not. :param vmxml_devices: VMXMLDevices instance :param disk_type: disk's device type: cdrom or floppy :param source_file : disk's source file to check :param target_dev : target device name :return: True/False if backing file and device found """ disks = vmxml_devices.by_device_tag('disk') for disk in disks: if disk.device != disk_type: continue if disk.target['dev'] != target_dev: continue if disk.xmltreefile.find('source') is not None: if disk.source.attrs['file'] != source_file: continue else: continue # All three conditions met logging.debug("Find %s in given disk XML", source_file) return True logging.debug("Not find %s in gievn disk XML", source_file) return False def check_result(disk_source, disk_type, disk_target, flags, attach=True): """ Check the test result of attach/detach-device command. """ vm_state = pre_vm_state active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) active_attached = is_attached(active_vmxml.devices, disk_type, disk_source, disk_target) if vm_state != "transient": inactive_vmxml = vm_xml.VMXML.new_from_dumpxml( vm_name, options="--inactive") inactive_attached = is_attached(inactive_vmxml.devices, disk_type, disk_source, disk_target) if flags.count("config") and not flags.count("live"): if vm_state != "transient": if attach: if not inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated" " when --config options used for" " attachment") if vm_state != "shutoff": if active_attached: raise exceptions.TestFail( "Active domain XML updated " "when --config options used " "for attachment") else: if inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated" " when --config options used for" " detachment") elif flags.count("live") and not flags.count("config"): if attach: if vm_state in ["paused", "running", "transient"]: if not active_attached: raise exceptions.TestFail( "Active domain XML not updated" " when --live options used for" " attachment") if vm_state in ["paused", "running"]: if inactive_attached: raise exceptions.TestFail( "Inactive domain XML updated " "when --live options used for" " attachment") else: if vm_state in ["paused", "running", "transient"]: if active_attached: raise exceptions.TestFail( "Active domain XML not updated" " when --live options used for" " detachment") elif flags.count("live") and flags.count("config"): if attach: if vm_state in ["paused", "running"]: if not active_attached: raise exceptions.TestFail( "Active domain XML not updated" " when --live --config options" " used for attachment") if not inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated" " when --live --config options " "used for attachment") else: if vm_state in ["paused", "running"]: if active_attached: raise exceptions.TestFail( "Active domain XML not updated " "when --live --config options " "used for detachment") if inactive_attached: raise exceptions.TestFail( "Inactive domain XML updated " "when --live --config options" " used for detachment") elif flags.count("current") or flags == "": if attach: if vm_state in ["paused", "running", "transient"]: if not active_attached: raise exceptions.TestFail( "Active domain XML not updated" " when --current options used " "for attachment") if vm_state in ["paused", "running"]: if inactive_attached: raise exceptions.TestFail( "Inactive domain XML updated " "when --current options used " "for live attachment") if vm_state == "shutoff" and not inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated" " when --current options used for" " attachment") else: if vm_state in ["paused", "running", "transient"]: if active_attached: raise exceptions.TestFail( "Active domain XML not updated" " when --current options used " "for detachment") if vm_state == "shutoff" and inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated" " when --current options used for" " detachment") vm_ref = params.get("at_dt_device_vm_ref", "name") at_status_error = "yes" == params.get("at_status_error", 'no') dt_status_error = "yes" == params.get("dt_status_error", 'no') # Disk specific attributes. at_options = params.get("at_dt_device_at_options", "") dt_options = params.get("at_dt_device_dt_options", "") device = params.get("at_dt_device_device", "disk") device_source_name = params.get("at_dt_device_source", "attach.img") device_target = params.get("at_dt_device_target", "vdd") if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Turn VM into certain state. if pre_vm_state == "running": logging.info("Starting %s..." % vm_name) if vm.is_dead(): vm.start() vm.wait_for_login().close() elif pre_vm_state == "shutoff": logging.info("Shuting down %s..." % vm_name) if vm.is_alive(): vm.destroy(gracefully=False) elif pre_vm_state == "paused": logging.info("Pausing %s..." % vm_name) if vm.is_dead(): vm.start() vm.wait_for_login().close() if not vm.pause(): raise exceptions.TestSkipError("Cann't pause the domain") elif pre_vm_state == "transient": logging.info("Creating %s..." % vm_name) vm.undefine() if virsh.create(backup_xml.xml, **virsh_dargs).exit_status: backup_xml.define() raise exceptions.TestSkipError("Cann't create the domain") vm.wait_for_login().close() # Test. domid = vm.get_id() domuuid = vm.get_uuid() # Confirm how to reference a VM. if vm_ref == "name": vm_ref = vm_name elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref == "uuid": vm_ref = domuuid else: vm_ref = "" try: device_source = os.path.join(data_dir.get_tmp_dir(), device_source_name) libvirt.create_local_disk("file", device_source, "1") # Get disk xml file. disk_params = { 'type_name': "file", 'device_type': device, 'target_dev': device_target, 'target_bus': "virtio", 'source_file': device_source, 'driver_name': "qemu", 'driver_type': "raw" } disk_xml = libvirt.create_disk_xml(disk_params) # Attach the disk. ret = virsh.attach_device(vm_ref, disk_xml, flagstr=at_options, debug=True) libvirt.check_exit_status(ret, at_status_error) # Check if command take affect config file. if vm.is_paused(): vm.resume() vm.wait_for_login().close() #Sleep a while for vm is stable time.sleep(3) if not ret.exit_status: check_result(device_source, device, device_target, at_options) # Detach the disk. if pre_vm_state == "paused": if not vm.pause(): raise exceptions.TestFail("Cann't pause the domain") disk_xml = libvirt.create_disk_xml(disk_params) ret = virsh.detach_device(vm_ref, disk_xml, flagstr=dt_options, debug=True) libvirt.check_exit_status(ret, dt_status_error) # Check if command take affect config file. if vm.is_paused(): vm.resume() vm.wait_for_login().close() #Sleep a while for vm is stable time.sleep(3) if not ret.exit_status: check_result(device_source, device, device_target, dt_options, False) # Try to start vm at last. if vm.is_dead(): vm.start() vm.wait_for_login().close() finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if os.path.exists(device_source): os.remove(device_source)
def run(test, params, env): """ Test command: virsh blockcopy. This command can copy a disk backing image chain to dest. 1. Positive testing 1.1 Copy a disk to a new image file. 1.2 Reuse existing destination copy. 1.3 Valid blockcopy timeout and bandwidth test. 2. Negative testing 2.1 Copy a disk to a non-exist directory. 2.2 Copy a disk with invalid options. 2.3 Do block copy for a persistent domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) target = params.get("target_disk", "") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_source_protocol = params.get("disk_source_protocol") disk_type = params.get("disk_type") pool_name = params.get("pool_name") image_size = params.get("image_size") emu_image = params.get("emulated_image") copy_to_nfs = "yes" == params.get("copy_to_nfs", "no") mnt_path_name = params.get("mnt_path_name") options = params.get("blockcopy_options", "") bandwidth = params.get("blockcopy_bandwidth", "") bandwidth_byte = "yes" == params.get("bandwidth_byte", "no") reuse_external = "yes" == params.get("reuse_external", "no") persistent_vm = params.get("persistent_vm", "no") status_error = "yes" == params.get("status_error", "no") active_error = "yes" == params.get("active_error", "no") active_snap = "yes" == params.get("active_snap", "no") active_save = "yes" == params.get("active_save", "no") check_state_lock = "yes" == params.get("check_state_lock", "no") with_shallow = "yes" == params.get("with_shallow", "no") with_blockdev = "yes" == params.get("with_blockdev", "no") setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit') bug_url = params.get("bug_url", "") timeout = int(params.get("timeout", 1200)) rerun_flag = 0 blkdev_n = None back_n = 'blockdev-backing-iscsi' snapshot_external_disks = [] # Skip/Fail early if with_blockdev and not libvirt_version.version_compare(1, 2, 13): raise exceptions.TestSkipError("--blockdev option not supported in " "current version") if not target: raise exceptions.TestSkipError("Require target disk to copy") if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("API acl test not supported in current" " libvirt version") if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1): raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url) if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3): raise exceptions.TestSkipError("--bytes option not supported in " "current version") # Check the source disk if vm_xml.VMXML.check_disk_exist(vm_name, target): logging.debug("Find %s in domain %s", target, vm_name) else: raise exceptions.TestFail("Can't find %s in domain %s" % (target, vm_name)) original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_dir = data_dir.get_tmp_dir() # Prepare dest path params dest_path = params.get("dest_path", "") dest_format = params.get("dest_format", "") # Ugh... this piece of chicanery brought to you by the QemuImg which # will "add" the 'dest_format' extension during the check_format code. # So if we create the file with the extension and then remove it when # doing the check_format later, then we avoid erroneous failures. dest_extension = "" if dest_format != "": dest_extension = ".%s" % dest_format # Prepare for --reuse-external option if reuse_external: options += "--reuse-external --wait" # Set rerun_flag=1 to do blockcopy twice, and the first time created # file can be reused in the second time if no dest_path given # This will make sure the image size equal to original disk size if dest_path == "/path/non-exist": if os.path.exists(dest_path) and not os.path.isdir(dest_path): os.remove(dest_path) else: rerun_flag = 1 # Prepare other options if dest_format == "raw": options += " --raw" if with_blockdev: options += " --blockdev" if len(bandwidth): options += " --bandwidth %s" % bandwidth if bandwidth_byte: options += " --bytes" if with_shallow: options += " --shallow" # Prepare acl options uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout} libvirtd_utl = utils_libvirtd.Libvirtd() libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"' libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd_utl.restart() def check_format(dest_path, dest_extension, expect): """ Check the image format :param dest_path: Path of the copy to create :param expect: Expect image format """ # And now because the QemuImg will add the extension for us # we have to remove it here. path_noext = dest_path.strip(dest_extension) params['image_name'] = path_noext params['image_format'] = expect image = qemu_storage.QemuImg(params, "/", path_noext) if image.get_format() == expect: logging.debug("%s format is %s", dest_path, expect) else: raise exceptions.TestFail("%s format is not %s" % (dest_path, expect)) def _blockjob_and_libvirtd_chk(cmd_result): """ Raise TestFail when blockcopy fail with block-job-complete error or blockcopy hang with state change lock. This is a specific bug verify, so ignore status_error here. """ bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592" err_msg = "internal error: unable to execute QEMU command" err_msg += " 'block-job-complete'" if err_msg in cmd_result.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url_) err_pattern = "Timed out during operation: cannot acquire" err_pattern += " state change lock" ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error") if ret: raise exceptions.TestFail("Hit on bug: %s" % bug_url_) def _make_snapshot(): """ Make external disk snapshot """ snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "blockcopy_snap" snap_xml.snap_name = snapshot_name snap_xml.description = "blockcopy snapshot" # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] src_disk_xml = disks[0] disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if disk_xml.source.attrs.has_key('file'): new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap") snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif (disk_xml.source.attrs.has_key('dev') or disk_xml.source.attrs.has_key('name') or disk_xml.source.attrs.has_key('pool')): if (disk_xml.type_name == 'block' or disk_source_protocol == 'iscsi'): disk_xml.type_name = 'block' if new_attrs.has_key('name'): del new_attrs['name'] del new_attrs['protocol'] elif new_attrs.has_key('pool'): del new_attrs['pool'] del new_attrs['volume'] del new_attrs['mode'] back_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size="1G", emulated_image=back_n) emulated_iscsi.append(back_n) cmd = "qemu-img create -f qcow2 %s 1G" % back_path process.run(cmd, shell=True) new_attrs.update({'dev': back_path}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: raise exceptions.TestFail(snapshot_result.stderr) snap_path = '' save_path = '' emulated_iscsi = [] nfs_cleanup = False try: # Prepare dest_path tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") tmp_file += dest_extension if not dest_path: if with_blockdev: blkdev_n = 'blockdev-iscsi' dest_path = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=image_size, emulated_image=blkdev_n) emulated_iscsi.append(blkdev_n) # Make sure the new disk show up utils_misc.wait_for(lambda: os.path.exists(dest_path), 5) else: if copy_to_nfs: tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name) dest_path = os.path.join(tmp_dir, tmp_file) # Domain disk replacement with desire type if replace_vm_disk: # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs # after test, such as pool, volume, nfs, iscsi and so on # TODO: remove this function in the future if disk_source_protocol == 'iscsi': emulated_iscsi.append(emu_image) if disk_source_protocol == 'netfs': nfs_cleanup = True utl.set_vm_disk(vm, params, tmp_dir, test) new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_shallow: _make_snapshot() # Prepare transient/persistent vm if persistent_vm == "no" and vm.is_persistent(): vm.undefine() elif persistent_vm == "yes" and not vm.is_persistent(): new_xml.define() # Run blockcopy command to create destination file if rerun_flag == 1: options1 = "--wait %s --finish --verbose" % dest_format if with_blockdev: options1 += " --blockdev" if with_shallow: options1 += " --shallow" cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1, **extra_dict) status = cmd_result.exit_status if status != 0: raise exceptions.TestFail("Run blockcopy command fail: %s" % cmd_result.stdout + cmd_result.stderr) elif not os.path.exists(dest_path): raise exceptions.TestFail("Cannot find the created copy") # Run the real testing command cmd_result = virsh.blockcopy(vm_name, target, dest_path, options, **extra_dict) # check BZ#1197592 _blockjob_and_libvirtd_chk(cmd_result) status = cmd_result.exit_status if not libvirtd_utl.is_running(): raise exceptions.TestFail("Libvirtd service is dead") if not status_error: if status == 0: ret = utils_misc.wait_for( lambda: check_xml(vm_name, target, dest_path, options), 5) if not ret: raise exceptions.TestFail("Domain xml not expected after" " blockcopy") if options.count("--bandwidth"): if options.count('--bytes'): bandwidth += 'B' else: bandwidth += 'M' if not utl.check_blockjob(vm_name, target, "bandwidth", bandwidth): raise exceptions.TestFail("Check bandwidth failed") val = options.count("--pivot") + options.count("--finish") # Don't wait for job finish when using --byte option val += options.count('--bytes') if val == 0: try: finish_job(vm_name, target, timeout) except JobTimeout, excpt: raise exceptions.TestFail("Run command failed: %s" % excpt) if options.count("--raw") and not with_blockdev: check_format(dest_path, dest_extension, dest_format) if active_snap: snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path ret = virsh.snapshot_create_as(vm_name, snap_opt, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if active_save: save_path = "%s/%s.save" % (tmp_dir, vm_name) ret = virsh.save(vm_name, save_path, ignore_status=True, debug=True) utl.check_exit_status(ret, active_error) if check_state_lock: # Run blockjob pivot in subprocess as it will hang # for a while, run blockjob info again to check # job state command = "virsh blockjob %s %s --pivot" % (vm_name, target) session = aexpect.ShellSession(command) ret = virsh.blockjob(vm_name, target, "--info") err_info = "cannot acquire state change lock" if err_info in ret.stderr: raise exceptions.TestFail("Hit on bug: %s" % bug_url) utl.check_exit_status(ret, status_error) session.close() else: raise exceptions.TestFail(cmd_result.stdout + cmd_result.stderr) else:
if output.splitlines()[i].split()[-1] != expect_vcpu_num[j]: raise error.TestFail("Virsh vcpucount output is unexpected") except IndexError, detail: raise error.TestFail(detail) logging.debug("Command vcpucount check pass") # Check virsh vcpuinfo output, (1) count vcpu number, if domain is # alive, vcpu number(current) correspond to expect_vcpu_num[3], # otherwise, it correspond to expect_vcpu_num[2]; (2) get cpus affinity, # and check them in virsh vcpupin command if vm.is_alive(): i = 3 else: i = 2 result = virsh.vcpuinfo(vm.name, ignore_status=True, debue=True) libvirt.check_exit_status(result) output = result.stdout.strip() vcpuinfo_num = len(output.split("\n\n")) logging.debug("Get %s vcpus in virsh vcpuinfo output", vcpuinfo_num) if vcpuinfo_num != int(expect_vcpu_num[i]): raise error.TestFail("Vcpu number in virsh vcpuinfo is unexpected") vcpuinfo_affinity = re.findall('CPU Affinity: +([-y]+)', output) logging.debug("Command vcpuinfo check pass") # Check vcpu number in domain XML, if setvcpu with '--config' option, # or domain is dead, vcpu number correspond to expect_vcpu_num[2], # otherwise, it correspond to expect_vcpu_num[3] dumpxml_option = "" if setvcpu_option == "--config" or vm.is_dead(): dumpxml_option = "--inactive" i = 2
def run(test, params, env): """ Domain CPU management testing. 1. Prepare a domain for testing, install qemu-guest-ga if needed. 2. Plug vcpu for the domain. 3. Checking: 3.1. Virsh vcpucount. 3.2. Virsh vcpuinfo. 3.3. Current vcpu number in domain xml. 3.4. Virsh vcpupin and vcpupin in domain xml. 3.5. The vcpu number in domain. 3.6. Virsh cpu-stats. 4. Repeat step 3 to check again. 5. Control domain(save, managedsave, s3, s4, migrate, etc.). 6. Repeat step 3 to check again. 7. Recover domain(restore, wakeup, etc.). 8. Repeat step 3 to check again. 9. Unplug vcpu for the domain. 10. Repeat step 3 to check again. 11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip save/managedsave/migrate related actions). 12. Repeat step 3 to check again. 13. Repeat step 7 to recover domain. 14. Repeat step 3 to check again. 15. Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_operation = params.get("vm_operation", "null") vcpu_max_num = params.get("vcpu_max_num") vcpu_current_num = params.get("vcpu_current_num") vcpu_plug = "yes" == params.get("vcpu_plug", "no") vcpu_plug_num = params.get("vcpu_plug_num") vcpu_unplug = "yes" == params.get("vcpu_unplug", "no") vcpu_unplug_num = params.get("vcpu_unplug_num") setvcpu_option = params.get("setvcpu_option", "") agent_channel = "yes" == params.get("agent_channel", "yes") install_qemuga = "yes" == params.get("install_qemuga", "no") start_qemuga = "yes" == params.get("start_qemuga", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no") status_error = "yes" == params.get("status_error", "no") pin_before_plug = "yes" == params.get("pin_before_plug", "no") pin_after_plug = "yes" == params.get("pin_after_plug", "no") pin_before_unplug = "yes" == params.get("pin_before_unplug", "no") pin_after_unplug = "yes" == params.get("pin_after_unplug", "no") pin_vcpu = params.get("pin_vcpu") pin_cpu_list = params.get("pin_cpu_list", "x") check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no") # Init expect vcpu count values expect_vcpu_num = [vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num, vcpu_current_num] if check_after_plug_fail: expect_vcpu_num_bk = list(expect_vcpu_num) # Init expect vcpu pin values expect_vcpupin = {} # Init cpu-list for vcpupin host_cpu_count = utils.count_cpus() if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"): raise error.TestNAError("We need more cpus on host in this case for" " the cpu-list=%s. But current number of cpu" " on host is %s." % (pin_cpu_list, host_cpu_count)) cpu_max = int(host_cpu_count) - 1 if pin_cpu_list == "x": pin_cpu_list = str(cpu_max) if pin_cpu_list == "x-y": pin_cpu_list = "0-%s" % cpu_max elif pin_cpu_list == "x,y": pin_cpu_list = "0,%s" % cpu_max elif pin_cpu_list == "x-y,^z": pin_cpu_list = "0-%s,^%s" % (cpu_max, cpu_max) else: # Just use the value get from cfg pass need_mkswap = False # Back up domain XML vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() try: # Customize domain vcpu number if vm.is_alive(): vm.destroy() if agent_channel: vmxml.set_agent_channel() else: vmxml.remove_agent_channels() vmxml.sync() vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num)) vmxml.set_pm_suspend(vm_name, "yes", "yes") vm.start() # Create swap partition/file if nessesary if vm_operation == "s4": need_mkswap = not vm.has_swap() if need_mkswap: logging.debug("Creating swap partition") vm.create_swap_partition() # Prepare qemu guest agent if install_qemuga: vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga) vm.setenforce(0) else: # Remove qemu-guest-agent for negative test vm.remove_package('qemu-guest-agent') # Run test check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin) # plug vcpu if vcpu_plug: # Pin vcpu if pin_before_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debue=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) check_setvcpus_result(result, status_error) if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_plug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num if not status_error: if not online_new_vcpu(vm, vcpu_plug_num): raise error.TestFail("Fail to enable new added cpu") # Pin vcpu if pin_after_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debue=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if status_error and check_after_plug_fail: check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option) if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Unplug vcpu if vcpu_unplug: # Pin vcpu if pin_before_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debue=True) libvirt.check_exit_status(result) # As the vcpu will unplug later, so set expect_vcpupin to empty expect_vcpupin = {} result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) try: check_setvcpus_result(result, status_error) except error.TestNAError: raise error.TestWarn("Skip unplug vcpu as it is not supported") if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_unplug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num # Pin vcpu if pin_after_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debue=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml") def config_ceph(): """ Write the configs to the file. """ src_host = disk_src_host.split() src_port = disk_src_port.split() conf_str = "mon_host = " hosts = [] for host, port in zip(src_host, src_port): hosts.append("%s:%s" % (host, port)) with open(disk_src_config, 'w') as f: f.write(conf_str + ','.join(hosts) + '\n') def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip(disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ( "--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool( pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err) def create_vol(vol_params): """ Create volume. :param p_name. Pool name. :param vol_params. Volume parameters dict. :return: True if create successfully. """ pvt = libvirt.PoolVolumeTest(test, params) if create_by_xml: pvt.pre_vol_by_xml(pool_name, **vol_params) else: pvt.pre_vol(vol_name, None, '2G', None, pool_name) def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) process.run(cmd, shell=True) if disk_src_name: cmd += " | grep file=rbd:%s:" % disk_src_name if auth_user and auth_key: cmd += ('id=%s:auth_supported=cephx' % auth_user) if disk_src_config: cmd += " | grep 'conf=%s'" % disk_src_config elif mon_host: hosts = '\:6789\;'.join(mon_host.split()) cmd += " | grep 'mon_host=%s'" % hosts if driver_iothread: cmd += " | grep iothread%s" % driver_iothread # Run the command process.run(cmd, shell=True) def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = [ "disk name='%s' snapshot='external' type='file'" % target_dev ] xml_dom_exp = [ "source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name ] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot: libvirt.check_result(ret, expected_fails=unsupported_err) elif test_disk_readonly: if libvirt_version.version_compare(6, 0, 0): libvirt.check_result(ret) else: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml") def check_blockcopy(target): """ Block copy operation test. """ blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd") if os.path.exists(blk_file): os.remove(blk_file) blk_mirror = ("mirror type='file' file='%s' " "format='raw' job='copy'" % blk_file) # Do blockcopy ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_result(ret, skip_if=unsupported_err) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count(blk_mirror): test.fail("Can't see block job in domain xml") # Abort ret = virsh.blockjob(vm_name, target, "--abort") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if dom_xml.count(blk_mirror): test.fail("Failed to abort block job") if os.path.exists(blk_file): os.remove(blk_file) # Sleep for a while after abort operation. time.sleep(5) # Do blockcopy again ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_exit_status(ret) # Wait for complete def wait_func(): ret = virsh.blockjob(vm_name, target, "--info") return ret.stderr.count("Block Copy: [100 %]") timeout = params.get("blockjob_timeout", 600) utils_misc.wait_for(wait_func, int(timeout)) # Pivot ret = virsh.blockjob(vm_name, target, "--pivot") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count("source file='%s'" % blk_file): test.fail("Failed to pivot block job") # Remove the disk file. if os.path.exists(blk_file): os.remove(blk_file) def check_in_vm(vm_obj, target, old_parts, read_only=False): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm_obj.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;" " touch /mnt/testfile; umount /mnt)".format(added_part)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info("Check disk operation in VM:\n, %s, %s", s, o) # Readonly fs, check the error messages. # The command may return True, read-only # messges can be found from the command output if read_only: if "Read-only file system" not in o: return False else: return True # Other errors if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def clean_up_volume_snapshots(): """ Get all snapshots for rbd_vol.img volume,unprotect and then clean up them. """ cmd = ("rbd -m {0} {1} info {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) if process.run(cmd, ignore_status=True, shell=True).exit_status: return # Get snapshot list. cmd = ("rbd -m {0} {1} snap" " list {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text snap_names = [] if snaps_out: for line in snaps_out.rsplit("\n"): if line.startswith("SNAPID") or line == "": continue snap_line = line.rsplit() if len(snap_line) == 4: snap_names.append(snap_line[1]) logging.debug("Find snapshots: %s", snap_names) # Unprotect snapshot first,otherwise it will fail to purge volume for snap_name in snap_names: cmd = ("rbd -m {0} {1} snap" " unprotect {2}@{3}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name)) process.run(cmd, ignore_status=True, shell=True) # Purge volume,and then delete volume. cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) process.run(cmd, ignore_status=True, shell=True) def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = { "s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file) } for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append( first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = secret_list_result.stdout_text.strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_config = params.get("disk_source_config") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") disk_src_pool = params.get("disk_source_pool") disk_format = params.get("disk_format", "raw") driver_iothread = params.get("driver_iothread") snap_name = params.get("disk_snap_name") attach_device = "yes" == params.get("attach_device", "no") attach_disk = "yes" == params.get("attach_disk", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") test_blockcopy = "yes" == params.get("test_blockcopy", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_vm_parts = "yes" == params.get("test_vm_parts", "no") additional_guest = "yes" == params.get("additional_guest", "no") create_snapshot = "yes" == params.get("create_snapshot", "no") convert_image = "yes" == params.get("convert_image", "no") create_volume = "yes" == params.get("create_volume", "no") rbd_blockcopy = "yes" == params.get("rbd_blockcopy", "no") enable_slice = "yes" == params.get("enable_slice", "no") create_by_xml = "yes" == params.get("create_by_xml", "no") client_key = params.get("client_key") client_name = params.get("client_name") auth_key = params.get("auth_key") auth_user = params.get("auth_user") auth_type = params.get("auth_type") auth_usage = params.get("secret_usage") pool_name = params.get("pool_name") pool_type = params.get("pool_type") vol_name = params.get("vol_name") cloned_vol_name = params.get("cloned_volume", "cloned_test_volume") create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume") vol_cap = params.get("vol_cap") vol_cap_unit = params.get("vol_cap_unit") start_vm = "yes" == params.get("start_vm", "no") test_disk_readonly = "yes" == params.get("test_disk_readonly", "no") test_disk_internal_snapshot = "yes" == params.get( "test_disk_internal_snapshot", "no") test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no") disk_snapshot_with_sanlock = "yes" == params.get( "disk_internal_with_sanlock", "no") auth_place_in_source = params.get("auth_place_in_source") # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) # After libvirt 3.9.0, auth element can be put into source part. if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0): test.cancel( "place auth in source is not supported in current libvirt version") # After libvirt 6.0.0, blockcopy rbd backend feature is support. if rbd_blockcopy and not libvirt_version.version_compare(6, 0, 0): test.cancel( "blockcopy rbd backend is not supported in current libvirt version" ) # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) if additional_guest: guest_name = "%s_%s" % (vm_name, '1') timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout, ignore_status=False) additional_vm = vm.clone(guest_name) if start_vm: virsh.start(guest_name) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) key_opt = "" secret_uuid = None snapshot_path = None key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) front_end_img_file = os.path.join(data_dir.get_tmp_dir(), "%s_frontend_test.img" % vm_name) # Construct a unsupported error message list to skip these kind of tests unsupported_err = [] if driver_iothread: unsupported_err.append('IOThreads not supported') if test_snapshot: unsupported_err.append('live disk snapshot not supported') if test_disk_readonly: if not libvirt_version.version_compare(5, 0, 0): unsupported_err.append('Could not create file: Permission denied') unsupported_err.append('Permission denied') else: unsupported_err.append( 'unsupported configuration: external snapshot ' + 'for readonly disk vdb is not supported') if test_disk_internal_snapshot: unsupported_err.append( 'unsupported configuration: internal snapshot for disk ' + 'vdb unsupported for storage type raw') if test_blockcopy: unsupported_err.append('block copy is not supported') if attach_disk: unsupported_err.append('No such file or directory') if create_volume: unsupported_err.append("backing 'volume' disks isn't yet supported") unsupported_err.append('this function is not supported') try: # Clean up dirty secrets in test environments if there have. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Prepare test environment. qemu_config = LibvirtQemuConfig() if disk_snapshot_with_sanlock: # Install necessary package:sanlock,libvirt-lock-sanlock if not utils_package.package_install(["sanlock"]): test.error("fail to install sanlock") if not utils_package.package_install(["libvirt-lock-sanlock"]): test.error("fail to install libvirt-lock-sanlock") # Set virt_use_sanlock result = process.run("setsebool -P virt_use_sanlock 1", shell=True) if result.exit_status: test.error("Failed to set virt_use_sanlock value") # Update lock_manager in qemu.conf qemu_config.lock_manager = 'sanlock' # Update qemu-sanlock.conf. san_lock_config = LibvirtSanLockConfig() san_lock_config.user = '******' san_lock_config.group = 'sanlock' san_lock_config.host_id = 1 san_lock_config.auto_disk_leases = True process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True) san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock" san_lock_config.require_lease_for_disks = False # Start sanlock service and restart libvirtd to enforce changes. result = process.run("systemctl start wdmd", shell=True) if result.exit_status: test.error("Failed to start wdmd service") result = process.run("systemctl start sanlock", shell=True) if result.exit_status: test.error("Failed to start sanlock service") utils_libvirtd.Libvirtd().restart() # Prepare lockspace and lease file for sanlock in order. sanlock_cmd_dict = OrderedDict() sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS" sanlock_cmd_dict[ "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0" sanlock_cmd_dict[ "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS" sanlock_cmd_dict[ "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock" sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock" sanlock_cmd_dict[ "sanlock direct init -r TEST_LS:test-disk-resource-lock:" + "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock" sanlock_cmd_dict[ "chown sanlock:sanlock " + "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc" sanlock_cmd_dict[ "sanlock client add_lockspace -s TEST_LS:1:" + "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0" for sanlock_cmd in sanlock_cmd_dict.keys(): result = process.run(sanlock_cmd, shell=True) if result.exit_status: test.error(sanlock_cmd_dict[sanlock_cmd]) # Create one lease device and add it to VM. san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) lease_device = Lease() lease_device.lockspace = 'TEST_LS' lease_device.key = 'test-disk-resource-lock' lease_device.target = { 'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock' } san_lock_vmxml.add_device(lease_device) san_lock_vmxml.sync() # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb") # To be compatible with create_disk_xml function, # some parameters need to be updated. params.update({ "type_name": params.get("disk_type", "network"), "target_bus": params.get("disk_target_bus"), "target_dev": targetdev, "secret_uuid": secret_uuid, "source_protocol": params.get("disk_source_protocol"), "source_name": disk_src_name, "source_host_name": disk_src_host, "source_host_port": disk_src_port }) # Prepare disk image if convert_image: first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) elif create_volume: vol_params = { "name": vol_name, "capacity": int(vol_cap), "capacity_unit": vol_cap_unit, "format": disk_format } create_pool() create_vol(vol_params) check_vol(vol_params) elif rbd_blockcopy: # Create one disk to attach to VM as second disk device second_disk_params = {} disk_size = params.get("virt_disk_device_size", "50M") device_source = libvirt.create_local_disk("file", img_file, disk_size, disk_format="qcow2") second_disk_params.update({"source_file": device_source}) second_disk_params.update({"driver_type": "qcow2"}) second_xml_file = libvirt.create_disk_xml(second_disk_params) opts = params.get("attach_option", "--config") ret = virsh.attach_device(vm_name, second_xml_file, flagstr=opts, debug=True) libvirt.check_result(ret) else: # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" % (disk_format, img_file, img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_cmd = ( "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) # Create disk snapshot if needed. if create_snapshot: snap_cmd = ("rbd -m %s %s snap create %s@%s" % (mon_host, key_opt, disk_src_name, snap_name)) process.run(snap_cmd, ignore_status=False, shell=True) if test_json_pseudo_protocol: # After block-dev introduced, qemu-img: warning: RBD options encoded in the filename as keyvalue pairs is deprecated if libvirt_version.version_compare(6, 0, 0): test.cancel( "qemu-img: warning: RBD options encoded in the filename as keyvalue pairs in json format is deprecated" ) # Create one frontend image with the rbd backing file. json_str = ('json:{"file.driver":"rbd",' '"file.filename":"rbd:%s:mon_host=%s"}' % (disk_src_name, mon_host)) # pass different json string according to the auth config if auth_user and auth_key: json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key)) disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" % (json_str, front_end_img_file)) disk_path = front_end_img_file process.run(disk_cmd, ignore_status=False, shell=True) # If hot plug, start VM first, and then wait the OS boot. # Otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() if attach_device: if create_volume: params.update({"source_pool": pool_name}) params.update({"type_name": "volume"}) # No need auth options for volume if "auth_user" in params: params.pop("auth_user") if "auth_type" in params: params.pop("auth_type") if "secret_type" in params: params.pop("secret_type") if "secret_uuid" in params: params.pop("secret_uuid") if "secret_usage" in params: params.pop("secret_usage") # After 3.9.0,the auth element can be place in source part. if auth_place_in_source: params.update({"auth_in_source": auth_place_in_source}) xml_file = libvirt.create_disk_xml(params) if additional_guest: # Copy xml_file for additional guest VM. shutil.copyfile(xml_file, additional_xml_file) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) if additional_guest: # Make sure the additional VM is running if additional_vm.is_dead(): additional_vm.start() additional_vm.wait_for_login().close() ret = virsh.attach_device(guest_name, additional_xml_file, "", debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif attach_disk: opts = params.get("attach_option", "") ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_readonly: params.update({'readonly': "yes"}) xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_internal_snapshot: xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif disk_snapshot_with_sanlock: if vm.is_dead(): vm.start() snapshot_path = make_snapshot() if vm.is_alive(): vm.destroy() elif rbd_blockcopy: if enable_slice: disk_cmd = ("rbd -m %s %s create %s --size 400M 2> /dev/null" % (mon_host, key_opt, disk_src_name)) process.run(disk_cmd, ignore_status=False, shell=True) slice_dict = { "slice_type": "storage", "slice_offset": "12345", "slice_size": "105185280" } params.update({"disk_slice": slice_dict}) logging.debug( 'create one volume on ceph backend storage for slice testing' ) # Create one file on VM before doing blockcopy try: session = vm.wait_for_login() cmd = ( "mkfs.ext4 -F /dev/{0} && mount /dev/{0} /mnt && ls /mnt && (sleep 3;" " touch /mnt/rbd_blockcopyfile; umount /mnt)".format( targetdev)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info( "touch one file in new added disk in VM:\n, %s, %s", s, o) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) # Create rbd backend xml rbd_blockcopy_xml_file = libvirt.create_disk_xml(params) logging.debug("The rbd blockcopy xml is: %s" % rbd_blockcopy_xml_file) dest_path = " --xml %s" % rbd_blockcopy_xml_file options1 = params.get("rbd_pivot_option", " --wait --verbose --transient-job --pivot") extra_dict = {'debug': True} cmd_result = virsh.blockcopy(vm_name, targetdev, dest_path, options1, **extra_dict) libvirt.check_exit_status(cmd_result) elif not create_volume: libvirt.set_vm_disk(vm, params) if test_blockcopy: logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): vm.start() # Wait for vm is running vm.wait_for_login(timeout=600).close() if additional_guest: if additional_vm.is_dead(): additional_vm.start() # After block-dev introduced in libvirt 6.0.0 afterwards, file=rbd:* format information is not provided from qemu output if libvirt_version.version_compare(6, 0, 0): test_qemu_cmd = False # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check partitions in vm if test_vm_parts: if not check_in_vm( vm, targetdev, old_parts, read_only=create_snapshot): test.fail("Failed to check vm partitions") if additional_guest: if not check_in_vm(additional_vm, targetdev, old_parts): test.fail("Failed to check vm partitions") # Save and restore operation if test_save_restore: check_save_restore() if test_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option) if test_blockcopy: check_blockcopy(targetdev) if test_disk_readonly and not libvirt_version.version_compare(6, 0, 0): snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, 'vdb') if test_disk_internal_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, targetdev) # Check rbd blockcopy inside VM if rbd_blockcopy: try: session = vm.wait_for_login() cmd = ( "mount /dev/{0} /mnt && ls /mnt/rbd_blockcopyfile && (sleep 3;" " umount /mnt)".format(targetdev)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info( "list one file in new rbd backend disk in VM:\n, %s, %s", s, o) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) debug_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) def _check_slice_in_xml(): """ Check slice attribute in disk xml. """ debug_vmxml = virsh.dumpxml(vm_name, "", debug=True).stdout.strip() if 'slices' in debug_vmxml: return True else: return False if enable_slice: if not _check_slice_in_xml(): test.fail("Failed to find slice attribute in VM xml") # Detach the device. if attach_device: xml_file = libvirt.create_disk_xml(params) ret = virsh.detach_device(vm_name, xml_file) libvirt.check_exit_status(ret) if additional_guest: ret = virsh.detach_device(guest_name, xml_file) libvirt.check_exit_status(ret) elif attach_disk: ret = virsh.detach_disk(vm_name, targetdev) libvirt.check_exit_status(ret) # Check disk in vm after detachment. if attach_device or attach_disk: session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) if len(new_parts) != len(old_parts): test.fail("Disk still exists in vm" " after detachment") session.close() except virt_vm.VMStartError as details: for msg in unsupported_err: if msg in str(details): test.cancel(str(details)) else: test.fail("VM failed to start." "Error: %s" % str(details)) finally: # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) if additional_guest: virsh.remove_domain(guest_name, "--remove-all-storage", ignore_stauts=True) # Remove the snapshot. if create_snapshot: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) elif create_volume: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name))) process.run(cmd, ignore_status=True, shell=True) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format( mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume))) process.run(cmd, ignore_status=True, shell=True) clean_up_volume_snapshots() else: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Delete tmp files. if os.path.exists(key_file): os.remove(key_file) if os.path.exists(img_file): os.remove(img_file) # Clean up volume, pool if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout): virsh.vol_delete(vol_name, pool_name) if pool_name and pool_name in virsh.pool_state_dict(): virsh.pool_destroy(pool_name, **virsh_dargs) virsh.pool_undefine(pool_name, **virsh_dargs) # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) logging.info("Restoring vm...") vmxml_backup.sync() if disk_snapshot_with_sanlock: # Restore virt_use_sanlock setting. process.run("setsebool -P virt_use_sanlock 0", shell=True) # Restore qemu config qemu_config.restore() utils_libvirtd.Libvirtd().restart() # Force shutdown sanlock service. process.run("sanlock client shutdown -f 1", shell=True) # Clean up lockspace folder process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True) if snapshot_path is not None: for snapshot in snapshot_path: if os.path.exists(snapshot): os.remove(snapshot)
def vm_stress_events(self, event, vm): """ Stress events :param event: event name :param vm: vm object """ dargs = {'ignore_status': True, 'debug': True} for itr in range(self.iterations): if "vcpupin" in event: for vcpu in range(int(self.current_vcpu)): result = virsh.vcpupin(vm.name, vcpu, random.choice(self.host_cpu_list), **dargs) if not self.ignore_status: libvirt.check_exit_status(result) elif "emulatorpin" in event: for vcpu in range(int(self.current_vcpu)): result = virsh.emulatorpin( vm.name, random.choice(self.host_cpu_list), **dargs) if not self.ignore_status: libvirt.check_exit_status(result) elif "suspend" in event: result = virsh.suspend(vm.name, **dargs) if not self.ignore_status: libvirt.check_exit_status(result) time.sleep(self.event_sleep_time) result = virsh.resume(vm.name, **dargs) if not self.ignore_status: libvirt.check_exit_status(result) elif "cpuhotplug" in event: result = virsh.setvcpus(vm.name, self.max_vcpu, "--live", **dargs) if not self.ignore_status: libvirt.check_exit_status(result) exp_vcpu = { 'max_config': self.max_vcpu, 'max_live': self.max_vcpu, 'cur_config': self.current_vcpu, 'cur_live': self.max_vcpu, 'guest_live': self.max_vcpu } utils_hotplug.check_vcpu_value(vm, exp_vcpu, option="--live") time.sleep(self.event_sleep_time) result = virsh.setvcpus(vm.name, self.current_vcpu, "--live", **dargs) if not self.ignore_status: libvirt.check_exit_status(result) exp_vcpu = { 'max_config': self.max_vcpu, 'max_live': self.max_vcpu, 'cur_config': self.current_vcpu, 'cur_live': self.current_vcpu, 'guest_live': self.current_vcpu } utils_hotplug.check_vcpu_value(vm, exp_vcpu, option="--live") elif "reboot" in event: vm.reboot() else: raise NotImplementedError
def run(test, params, env): """ Test the virsh pool commands with acl, initiate a pool then do following operations. (1) Undefine a given type pool (2) Define the pool from xml (3) Build given type pool (4) Start pool (5) Destroy pool (6) Refresh pool after start it (7) Run vol-list with the pool (9) Delete pool For negative cases, redo failed step to make the case run continue. Run cleanup at last restore env. """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name vol_path = os.path.join(pool_target, vol_name) define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") start_acl = "yes" == params.get("start_acl", "no") destroy_acl = "yes" == params.get("destroy_acl", "no") build_acl = "yes" == params.get("build_acl", "no") delete_acl = "yes" == params.get("delete_acl", "no") refresh_acl = "yes" == params.get("refresh_acl", "no") vol_list_acl = "yes" == params.get("vol_list_acl", "no") list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no") src_pool_error = "yes" == params.get("src_pool_error", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") start_error = "yes" == params.get("start_error", "no") destroy_error = "yes" == params.get("destroy_error", "no") build_error = "yes" == params.get("build_error", "no") delete_error = "yes" == params.get("delete_error", "no") refresh_error = "yes" == params.get("refresh_error", "no") vol_list_error = "yes" == params.get("vol_list_error", "no") # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster cleanup_env = [False, False, False, "", False] # libvirt acl related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") acl_dargs = { 'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True } def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable if list_dumpxml_acl: result = virsh.pool_list(option, **acl_dargs) else: result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: test.fail("Unexpected pool '%s' exist." % pool_name) if not expect_error and not found: test.fail("Expect pool '%s' doesn't exist." % pool_name) # Run Testcase kwargs = {'source_format': params.get('pool_source_format', 'ext4')} try: _pool = libvirt_storage.StoragePool() # Init a pool for test result = utlv.define_pool(pool_name, pool_type, pool_target, cleanup_env, **kwargs) utlv.check_exit_status(result, src_pool_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if list_dumpxml_acl: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs) else: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (1) # Undefine pool if undefine_acl: result = virsh.pool_undefine(pool_name, **acl_dargs) else: result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, undefine_error) if undefine_error: check_pool_list(pool_name, "--all", False) # Redo under negative case to keep case continue result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) else: check_pool_list(pool_name, "--all", True) # Step (2) # Define pool from XML file if define_acl: result = virsh.pool_define(pool_xml, **acl_dargs) else: result = virsh.pool_define(pool_xml) utlv.check_exit_status(result, define_error) if define_error: # Redo under negative case to keep case continue result = virsh.pool_define(pool_xml) utlv.check_exit_status(result) # Step (3) # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if pool_type != "logical": option = '' if pool_type in ['disk', 'fs']: option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) if build_acl: result = virsh.pool_build(pool_name, option, **acl_dargs) else: result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result, build_error) if build_error: # Redo under negative case to keep case continue result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) # For iSCSI pool, we need discover targets before start the pool if pool_type == 'iscsi': cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1' process.run(cmd, shell=True) # Step (4) # Pool start if start_acl: result = virsh.pool_start(pool_name, **acl_dargs) else: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, start_error) if start_error: # Redo under negative case to keep case continue result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (5) # Pool destroy if destroy_acl: result = virsh.pool_destroy(pool_name, **acl_dargs) else: result = virsh.pool_destroy(pool_name) if result: if destroy_error: test.fail("Expect fail, but run successfully.") else: if not destroy_error: test.fail("Pool %s destroy failed, not expected." % pool_name) else: # Redo under negative case to keep case continue if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (6) # Pool refresh for 'dir' type pool # Pool start result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) if pool_type == "dir": os.mknod(vol_path) if refresh_acl: result = virsh.pool_refresh(pool_name, **acl_dargs) else: result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result, refresh_error) # Step (7) # Pool vol-list if vol_list_acl: result = virsh.vol_list(pool_name, **acl_dargs) else: result = virsh.vol_list(pool_name) utlv.check_exit_status(result, vol_list_error) # Step (8) # Pool delete for 'dir' type pool if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) if delete_acl: result = virsh.pool_delete(pool_name, **acl_dargs) else: result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result, delete_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if not delete_error: if os.path.exists(pool_target): test.fail("The target path '%s' still exist." % pool_target) result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.run(cmd, shell=True).stdout_text lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name, shell=True) if cleanup_env[1]: utlv.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utlv.setup_or_cleanup_nfs(False, restore_selinux=cleanup_env[3])
def run(test, params, env): """ Test disk encryption option. 1.Prepare test environment, destroy or suspend a VM. 2.Prepare tgtd and secret config. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def check_save_restore(save_file): """ Test domain save and restore. """ # Save the domain. ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Restore the domain. ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) def check_snapshot(): """ Test domain snapshot operation. """ snapshot1 = "s1" snapshot2 = "s2" ret = virsh.snapshot_create_as(vm_name, snapshot1) libvirt.check_exit_status(ret) ret = virsh.snapshot_create_as( vm_name, "%s --disk-only --diskspec vda," "file=/tmp/testvm-snap1" % snapshot2) libvirt.check_exit_status(ret, True) ret = virsh.snapshot_create_as( vm_name, "%s --memspec file=%s,snapshot=external" " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2)) libvirt.check_exit_status(ret, True) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] elif target.startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Cann't see added partition in VM") return False utils_disk.linux_disk_check(session, added_part) session.close() return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_qemu_cmd(): """ Check qemu-kvm command line options """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if driver_iothread: cmd += " | grep iothread=iothread%s" % driver_iothread if process.system(cmd, ignore_status=True, shell=True): test.fail("Can't see disk option '%s' " "in command line" % cmd) def check_auth_plaintext(vm_name, password): """ Check if libvirt passed the plaintext of the chap authentication password to qemu. :param vm_name: The name of vm to be checked. :param password: The plaintext of password used for chap authentication. :return: True if using plaintext, False if not. """ cmd = ("ps -ef | grep -v grep | grep qemu-kvm | grep %s | grep %s" % (vm_name, password)) return process.system(cmd, ignore_status=True, shell=True) == 0 # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") # Controller specific attributes. cntlr_type = params.get('controller_type', None) cntlr_model = params.get('controller_model', None) cntlr_index = params.get('controller_index', None) controller_addr_options = params.get('controller_addr_options', None) driver_iothread = params.get("driver_iothread") # iscsi options. iscsi_target = params.get("iscsi_target") iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") emulated_size = params.get("iscsi_image_size", "1") uuid = params.get("uuid", "") auth_uuid = "yes" == params.get("auth_uuid", "") auth_usage = "yes" == params.get("auth_usage", "") status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error", "no") test_save_snapshot = "yes" == params.get("test_save_snapshot", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") secret_uuid = "" # Start vm and get all partions in vm. if device == "lun": if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: chap_user = "" chap_passwd = "" if auth_uuid or auth_usage: auth_place_in_location = params.get("auth_place_in_location") if 'source' in auth_place_in_location and not libvirt_version.version_compare( 3, 9, 0): test.cancel( "place auth in source is not supported in current libvirt version" ) auth_type = params.get("auth_type") secret_usage_target = params.get("secret_usage_target") secret_usage_type = params.get("secret_usage_type") chap_user = params.get("iscsi_user") chap_passwd = params.get("iscsi_password") sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "iSCSI secret" sec_xml.auth_type = auth_type sec_xml.auth_username = chap_user sec_xml.usage = secret_usage_type sec_xml.target = secret_usage_target sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid == "": test.error("Failed to get secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode( chap_passwd.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(ret) # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # If we use qcow2 disk format, should format iscsi disk first. if device_format == "qcow2": cmd = ( "qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" % (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size)) process.run(cmd, shell=True) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} # For lun type device, iothread attribute need to be set in controller. if driver_iothread and device != "lun": driver_dict.update({"iothread": driver_iothread}) vmxml.iothreads = int(driver_iothread) elif driver_iothread: vmxml.iothreads = int(driver_iothread) disk_xml.driver = driver_dict # Check if we want to use a faked uuid. if not uuid: uuid = secret_uuid auth_dict = {} if auth_uuid: auth_dict = { "auth_user": chap_user, "secret_type": secret_usage_type, "secret_uuid": uuid } elif auth_usage: auth_dict = { "auth_user": chap_user, "secret_type": secret_usage_type, "secret_usage": secret_usage_target } disk_source = disk_xml.new_disk_source( **{ "attrs": { "protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num) }, "hosts": [{ "name": iscsi_host, "port": iscsi_port }] }) if auth_dict: disk_auth = disk_xml.new_auth(**auth_dict) if 'source' in auth_place_in_location: disk_source.auth = disk_auth if 'disk' in auth_place_in_location: disk_xml.auth = disk_auth disk_xml.source = disk_source if device != "lun": device_str = "serial_" + device_target disk_xml.serial = device_str # Sync VM xml. vmxml.add_device(disk_xml) # After virtio 1.0 is enabled, lun type device need use virtio-scsi # instead of virtio, so additional controller is needed. # Add controller. if device == "lun": ctrl = Controller(type_name=cntlr_type) if cntlr_model is not None: ctrl.model = cntlr_model if cntlr_index is not None: ctrl.index = cntlr_index ctrl_addr_dict = {} for addr_option in controller_addr_options.split(','): if addr_option != "": addr_part = addr_option.split('=') ctrl_addr_dict.update( {addr_part[0].strip(): addr_part[1].strip()}) ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict) # If driver_iothread is true, need add iothread attribute in controller. if driver_iothread: ctrl_driver_dict = {} ctrl_driver_dict.update({"iothread": driver_iothread}) ctrl.driver = ctrl_driver_dict logging.debug("Controller XML is:%s", ctrl) if cntlr_type: vmxml.del_controller(cntlr_type) else: vmxml.del_controller("scsi") vmxml.add_device(ctrl) try: # Start the VM and check status. vmxml.sync() vm.start() if status_error: test.fail("VM started unexpectedly.") # Check Qemu command line if test_qemu_cmd: check_qemu_cmd() except virt_vm.VMStartError as e: if status_error: if re.search(uuid, str(e)): pass else: test.fail("VM failed to start." "Error: %s" % str(e)) except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % xml_error) else: # Check partitions in VM. if check_partitions: if device == "lun": if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") else: session = vm.wait_for_login() added_part = utils_disk.get_disk_by_serial(device_str, session=session) if not added_part: test.fail("Unable to get disk with serial {}".format( device_str)) utils_disk.linux_disk_check(session, added_part) session.close() # Test domain save/restore/snapshot. if test_save_snapshot: save_file = os.path.join(data_dir.get_tmp_dir(), "%.save" % vm_name) check_save_restore(save_file) check_snapshot() if os.path.exists(save_file): os.remove(save_file) # Test libvirt doesn't pass the plaintext of chap password to qemu, # this function is implemented in libvirt 4.3.0-1. if (libvirt_version.version_compare(4, 3, 0) and (auth_uuid or auth_usage) and chap_passwd): if (check_auth_plaintext(vm_name, chap_passwd)): test.fail("Libvirt should not pass plaintext of chap " "password to qemu-kvm.") finally: # Close session. if 'session' in locals(): session.close() # Delete snapshots. libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Delete the tmp files. libvirt.setup_or_cleanup_iscsi(is_setup=False) # Clean up secret if secret_uuid: virsh.secret_undefine(secret_uuid)
def run(test, params, env): """ Test only ppc hosts """ if 'ppc64le' not in platform.machine().lower(): test.cancel('This case is for ppc only.') vm_name = params.get('main_vm', 'EXAMPLE') status_error = 'yes' == params.get('status_error', 'no') case = params.get('case', '') error_msg = params.get('error_msg', '') # Backup vm xml bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Assign address to panic device if case == 'panic_address': # Check if there is already a panic device on vm, remove it if true origin_panic = vmxml.get_devices('panic') if origin_panic: for dev in origin_panic: vmxml.del_device(dev) vmxml.sync() # Create panic device to add to vm panic_dev = Panic() panic_dev.model = 'pseries' panic_dev.addr_type = 'isa' panic_dev.addr_iobase = '0x505' logging.debug(panic_dev) vmxml.add_device(panic_dev) vmxml.sync() cmd_result = virsh.start(vm_name, debug=True, ignore_status=True) # Get Ethernet pci devices if case == 'unavail_pci_device': lspci = process.run('lspci|grep Ethernet', shell=True).stdout_text.splitlines() pci_ids = [line.split()[0] for line in lspci] logging.debug(pci_ids) max_id = max([int(pci_id.split('.')[-1]) for pci_id in pci_ids]) prefix = pci_ids[-1].split('.')[0] # Create fake pci ids for i in range(5): max_id += 1 # function must be <= 7 if max_id > 7: break new_pci_id = '.'.join([prefix, str(max_id)]) new_pci_xml = libvirt.create_hostdev_xml(new_pci_id, xmlfile=False) vmxml.add_device(new_pci_xml) vmxml.sync() logging.debug('Vm xml after adding unavailable pci devices: \n%s', vmxml) # Check result if there's a result to check if 'cmd_result' in locals(): libvirt.check_exit_status(cmd_result, status_error) if error_msg: libvirt.check_result(cmd_result, [error_msg]) finally: # In case vm disappeared after test if case == 'unavail_pci_device': virsh.define(bk_xml.xml, debug=True) else: bk_xml.sync()
def run(test, params, env): """ Do test for vol-download and vol-upload Basic steps are 1. Create pool with type defined in cfg 2. Create image with writing data in it 3. Get md5 value before operation 4. Do vol-download/upload with options(offset, length) 5. Check md5 value after operation """ pool_type = params.get("vol_download_upload_pool_type") pool_name = params.get("vol_download_upload_pool_name") pool_target = params.get("vol_download_upload_pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_download_upload_vol_name") file_name = params.get("vol_download_upload_file_name") file_path = os.path.join(data_dir.get_tmp_dir(), file_name) offset = params.get("vol_download_upload_offset") length = params.get("vol_download_upload_length") capacity = params.get("vol_download_upload_capacity") allocation = params.get("vol_download_upload_allocation") frmt = params.get("vol_download_upload_format") operation = params.get("vol_download_upload_operation") create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes")) setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") b_luks_encrypt = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} sparse_option_support = "yes" == params.get("sparse_option_support", "yes") with_clusterSize = "yes" == params.get("with_clusterSize") vol_clusterSize = params.get("vol_clusterSize", "64") vol_clusterSize_unit = params.get("vol_clusterSize_unit") vol_format = params.get("vol_format", "qcow2") libvirt_version.is_libvirt_feature_supported(params) # libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.error("API acl test not supported in current" " libvirt version.") # Destroy VM. if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest", pre_disk_vol=["50M"]) # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if pool_type == "disk": vol_name = utlv.new_disk_vol_name(pool_name) if vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % vol_name utlv.update_polkit_rule(params, vol_pat, new_value) if create_vol: if b_luks_encrypt: if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS format not supported in " "current libvirt version") params['sec_volume'] = os.path.join(pool_target, vol_name) luks_sec_uuid = utlv.create_secret(params) ret = virsh.secret_set_value(luks_sec_uuid, encryption_password, encode=True) utlv.check_exit_status(ret) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(capacity) vol_arg['allocation'] = int(allocation) if with_clusterSize: vol_arg['format'] = vol_format vol_arg['clusterSize'] = int(vol_clusterSize) vol_arg['clusterSize_unit'] = vol_clusterSize_unit create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name) virsh.pool_refresh(pool_name, debug=True) vol_list = virsh.vol_list(pool_name, debug=True).stdout.strip() # iscsi volume name is different from others if pool_type == "iscsi": # Due to BZ 1843791, the volume cannot be obtained sometimes. if len(vol_list.splitlines()) < 3: test.fail("Failed to get iscsi type volume.") vol_name = vol_list.split('\n')[2].split()[0] vol_path = virsh.vol_path(vol_name, pool_name, ignore_status=False).stdout.strip() logging.debug("vol_path is %s", vol_path) # Add command options if pool_type is not None: options = " --pool %s" % pool_name if offset is not None: options += " --offset %s" % offset offset = int(offset) else: offset = 0 if length is not None: options += " --length %s" % length length = int(length) else: length = 0 logging.debug("%s options are %s", operation, options) if operation == "upload": # write data to file write_file(file_path) # Set length for calculate the offset + length in the following # func get_pre_post_digest() and digest() if length == 0: length = 1048576 def get_pre_post_digest(): """ Get pre region and post region digest if have offset and length :return: pre digest and post digest """ # Get digest of pre region before offset if offset != 0: digest_pre = digest(vol_path, 0, offset) else: digest_pre = 0 logging.debug("pre region digest read from %s 0-%s is %s", vol_path, offset, digest_pre) # Get digest of post region after offset+length digest_post = digest(vol_path, offset + length, 0) logging.debug("post region digest read from %s %s-0 is %s", vol_path, offset + length, digest_post) return (digest_pre, digest_post) # Get pre and post digest before operation for compare (ori_pre_digest, ori_post_digest) = get_pre_post_digest() ori_digest = digest(file_path, 0, 0) logging.debug("ori digest read from %s is %s", file_path, ori_digest) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume upload result = virsh.vol_upload(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation (aft_pre_digest, aft_post_digest) = get_pre_post_digest() aft_digest = digest(vol_path, offset, length) logging.debug("aft digest read from %s is %s", vol_path, aft_digest) # Compare the pre and post part before and after if ori_pre_digest == aft_pre_digest and \ ori_post_digest == aft_post_digest: logging.info("file pre and aft digest match") else: test.fail("file pre or post digests do not" "match, in %s", operation) if operation == "download": # Write data to volume write_file(vol_path) # Record the digest value before operation ori_digest = digest(vol_path, offset, length) logging.debug("original digest read from %s is %s", vol_path, ori_digest) process.run("touch %s" % file_path, ignore_status=True, shell=True) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume download result = virsh.vol_download(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation aft_digest = digest(file_path, 0, 0) logging.debug("new digest read from %s is %s", file_path, aft_digest) if operation != "mix": if result.exit_status != 0: test.fail("Fail to %s volume: %s" % (operation, result.stderr)) # Compare the change part on volume and file if ori_digest == aft_digest: logging.info("file digests match, volume %s succeed", operation) else: test.fail("file digests do not match, volume %s failed" % operation) if operation == "mix": target = params.get("virt_disk_device_target", "vdb") disk_file_path = os.path.join(pool_target, file_name) # Create one disk xml and attach it to VM. custom_disk_xml = create_disk('file', disk_file_path, 'raw', 'file', 'disk', target, 'virtio') ret = virsh.attach_device(vm_name, custom_disk_xml.xml, flagstr="--config", debug=True) libvirt.check_exit_status(ret) if vm.is_dead(): vm.start() # Write 100M data into disk. data_size = 100 write_disk(test, vm, target, data_size) data_size_in_bytes = data_size * 1024 * 1024 # Refresh directory pool. virsh.pool_refresh(pool_name, debug=True) # Download volume to local with sparse option. download_spare_file = "download-sparse.raw" download_file_path = os.path.join(data_dir.get_tmp_dir(), download_spare_file) options += " --sparse" result = virsh.vol_download(file_name, download_file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) libvirt.check_exit_status(result) #Check download image size. one_g_in_bytes = 1073741824 download_img_info = utils_misc.get_image_info(download_file_path) download_disk_size = int(download_img_info['dsize']) if (download_disk_size < data_size_in_bytes or download_disk_size >= one_g_in_bytes): test.fail("download image size:%d is less than the generated " "data size:%d or greater than or equal to 1G." % (download_disk_size, data_size_in_bytes)) # Create one upload sparse image file. upload_sparse_file = "upload-sparse.raw" upload_file_path = os.path.join(pool_target, upload_sparse_file) libvirt.create_local_disk('file', upload_file_path, '1', 'raw') # Refresh directory pool. virsh.pool_refresh(pool_name, debug=True) # Do volume upload, upload sparse file which download last time. result = virsh.vol_upload(upload_sparse_file, download_file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) upload_img_info = utils_misc.get_image_info(upload_file_path) upload_disk_size = int(upload_img_info['dsize']) if (upload_disk_size < data_size_in_bytes or upload_disk_size >= one_g_in_bytes): test.fail("upload image size:%d is less than the generated " "data size:%d or greater than or equal to 1G." % (upload_disk_size, data_size_in_bytes)) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest") for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) if os.path.isfile(file_path): os.remove(file_path)
def run(test, params, env): """ Test SCSI3 Persistent Reservation functions. 1.Prepare iscsi backend storage. 2.Prepare disk xml. 3.Hot/cold plug the disk to vm. 4.Check if SCSI3 Persistent Reservation commands can be issued to that disk. 5.Recover test environment. 6.Confirm the test result. """ def get_delta_parts(vm, old_parts): """ Get the newly added partitions/blockdevs in vm. :param vm: The vm to be operated. :param old_parts: The original partitions/blockdevs in vm. :return: Newly added partitions/blockdevs. """ session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) new_parts = list(set(new_parts).difference(set(old_parts))) session.close() return new_parts def check_pr_cmds(vm, blk_dev): """ Check if SCSI3 Persistent Reservation commands can be used in vm. :param vm: The vm to be checked. :param blk_dev: The block device in vm to be checked. """ session = vm.wait_for_login() cmd = ( "sg_persist --no-inquiry -v --out --register-ignore --param-sark 123aaa /dev/{0} &&" "sg_persist --no-inquiry --in -k /dev/{0} &&" "sg_persist --no-inquiry -v --out --reserve --param-rk 123aaa --prout-type 5 /dev/{0} &&" "sg_persist --no-inquiry --in -r /dev/{0} &&" "sg_persist --no-inquiry -v --out --release --param-rk 123aaa --prout-type 5 /dev/{0} &&" "sg_persist --no-inquiry --in -r /dev/{0} &&" "sg_persist --no-inquiry -v --out --register --param-rk 123aaa --prout-type 5 /dev/{0} &&" "sg_persist --no-inquiry --in -k /dev/{0}".format(blk_dev)) cmd_status, cmd_output = session.cmd_status_output(cmd) session.close() if cmd_status == 127: test.error("sg3_utils not installed in test image") elif cmd_status != 0: test.fail("persistent reservation failed for /dev/%s" % blk_dev) else: logging.info("persistent reservation successful for /dev/%s" % blk_dev) def start_or_stop_qemu_pr_helper( is_start=True, path_to_sock="/var/run/qemu-pr-helper.sock"): """ Start or stop qemu-pr-helper daemon :param is_start: Set True to start, False to stop. """ service_mgr = service.ServiceManager() if is_start: service_mgr.start('qemu-pr-helper') time.sleep(2) shutil.chown(path_to_sock, "qemu", "qemu") else: service_mgr.stop('qemu-pr-helper') def ppc_controller_update(): """ Update controller of ppc vm to 'virtio-scsi' to support 'scsi' type :return: """ if params.get('machine_type') == 'pseries' and device_bus == 'scsi': if not vmxml.get_controllers(device_bus, 'virtio-scsi'): vmxml.del_controller(device_bus) ppc_controller = Controller('controller') ppc_controller.type = device_bus ppc_controller.index = '0' ppc_controller.model = 'virtio-scsi' vmxml.add_device(ppc_controller) vmxml.sync() # Check if SCSI3 Persistent Reservations supported by # current libvirt versions. if not libvirt_version.version_compare(4, 4, 0): test.cancel("The <reservations> tag supported by libvirt from version " "4.4.0") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Disk specific attributes device = params.get("virt_disk_device", "lun") device_target = params.get("virt_disk_device_target", "sdb") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "block") device_bus = params.get("virt_disk_device_bus", "scsi") # Iscsi options iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") emulated_size = params.get("iscsi_image_size", "1G") auth_uuid = "yes" == params.get("auth_uuid") auth_usage = "yes" == params.get("auth_usage") # SCSI3 PR options reservations_managed = "yes" == params.get("reservations_managed", "yes") reservations_source_type = params.get("reservations_source_type", "unix") reservations_source_path = params.get("reservations_source_path", "/var/run/qemu-pr-helper.sock") reservations_source_mode = params.get("reservations_source_mode", "client") secret_uuid = "" # Case step options hotplug_disk = "yes" == params.get("hotplug_disk", "no") # Start vm and get all partitions in vm if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: chap_user = "" chap_passwd = "" if auth_uuid or auth_usage: auth_in_source = "yes" == params.get("auth_in_source", "no") if auth_in_source and not libvirt_version.version_compare(3, 9, 0): test.cancel("place auth in source is not supported in " "current libvirt version.") auth_type = params.get("auth_type", "chap") secret_usage_target = params.get("secret_usage_target", "libvirtiscsi") secret_usage_type = params.get("secret_usage_type", "iscsi") chap_user = params.get("iscsi_user", "redhat") chap_passwd = params.get("iscsi_password", "redhat") sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "iSCSI secret" sec_xml.auth_type = auth_type sec_xml.auth_username = chap_user sec_xml.usage = secret_usage_type sec_xml.target = secret_usage_target sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid == "": test.error("Failed to get secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode( str(chap_passwd).encode(encoding)).decode(encoding) ret = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(ret) # Setup iscsi target blk_dev = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # Add disk xml vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict auth_dict = {} if auth_uuid: auth_dict = { "auth_user": chap_user, "secret_type": secret_usage_type, "secret_uuid": secret_uuid } elif auth_usage: auth_dict = { "auth_user": chap_user, "secret_type": secret_usage_type, "secret_usage": secret_usage_target } disk_source = disk_xml.new_disk_source(**{"attrs": {"dev": blk_dev}}) if auth_dict: disk_auth = disk_xml.new_auth(**auth_dict) if auth_in_source: disk_source.auth = disk_auth else: disk_xml.auth = disk_auth if reservations_managed: reservations_dict = {"reservations_managed": "yes"} else: start_or_stop_qemu_pr_helper(path_to_sock=reservations_source_path) reservations_dict = { "reservations_managed": "no", "reservations_source_type": reservations_source_type, "reservations_source_path": reservations_source_path, "reservations_source_mode": reservations_source_mode } disk_source.reservations = disk_xml.new_reservations( **reservations_dict) disk_xml.source = disk_source # Update controller of ppc vms ppc_controller_update() if not hotplug_disk: vmxml.add_device(disk_xml) try: # Start the VM and check status vmxml.sync() vm.start() vm.wait_for_login().close() time.sleep(5) if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result) new_parts = get_delta_parts(vm, old_parts) if len(new_parts) != 1: logging.error("Expected 1 dev added but has %s" % len(new_parts)) new_part = new_parts[0] check_pr_cmds(vm, new_part) result = virsh.detach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True, wait_for_event=True) libvirt.check_exit_status(result) except virt_vm.VMStartError as e: test.fail("VM failed to start." "Error: %s" % str(e)) except xcepts.LibvirtXMLError as xml_error: test.fail("Failed to define VM:\n%s" % xml_error) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Delete the tmp files. libvirt.setup_or_cleanup_iscsi(is_setup=False) # Clean up secret if secret_uuid: virsh.secret_undefine(secret_uuid) # Stop qemu-pr-helper daemon start_or_stop_qemu_pr_helper(is_start=False)
def run(test, params, env): """ Test command: virsh event and virsh qemu-monitor-event 1. Run virsh event/qemu-monitor-event in a new ShellSession 2. Trigger various events 3. Catch the return of virsh event and qemu-monitor-event, and check it. """ vms = [] if params.get("multi_vms") == "yes": vms = env.get_all_vms() else: vm_name = params.get("main_vm") vms.append(env.get_vm(vm_name)) event_name = params.get("event_name") event_all_option = "yes" == params.get("event_all_option", "no") event_list_option = "yes" == params.get("event_list_option", "no") event_loop = "yes" == params.get("event_loop", "no") event_timeout = params.get("event_timeout") event_option = params.get("event_option", "") status_error = "yes" == params.get("status_error", "no") qemu_monitor_test = "yes" == params.get("qemu_monitor_test", "no") signal_name = params.get("signal", None) panic_model = params.get("panic_model") addr_type = params.get("addr_type") addr_iobase = params.get("addr_iobase") disk_format = params.get("disk_format", "") event_cmd = "event" dump_path = '/var/lib/libvirt/qemu/dump' if qemu_monitor_test: event_cmd = "qemu-monitor-event" events_list = params.get("events_list") if events_list: events_list = events_list.split(",") else: events_list = [] virsh_dargs = {'debug': True, 'ignore_status': True} virsh_session = aexpect.ShellSession(virsh.VIRSH_EXEC) for dom in vms: if dom.is_alive(): dom.destroy() vmxml_backup = [] for dom in vms: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(dom.name) vmxml_backup.append(vmxml.copy()) tmpdir = data_dir.get_tmp_dir() new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name) def create_iface_xml(): """ Create interface xml file """ iface = Interface("bridge") iface.source = eval("{'bridge':'virbr0'}") iface.model = "virtio" logging.debug("Create new interface xml: %s", iface) return iface def add_disk(vm_name, init_source, target_device, extra_param, format=''): """ Add disk/cdrom for test vm :param vm_name: guest name :param init_source: source file :param target_device: target of disk device :param extra_param: additional arguments to command :param format: init_source format(qcow2 or raw) """ if not os.path.exists(new_disk): if format == "qcow2": process.run( 'qemu-img create -f qcow2 %s %s -o preallocation=full' % (new_disk, '1G'), shell=True, verbose=True) elif format == "raw": process.run('qemu-img create -f raw %s %s' % (new_disk, '1G'), shell=True, verbose=True) else: open(new_disk, 'a').close() if virsh.is_alive(vm_name) and 'cdrom' in extra_param: virsh.destroy(vm_name) virsh.attach_disk(vm_name, init_source, target_device, extra_param, **virsh_dargs) def wait_for_shutoff(vm): """ Wait for the vm to reach state shutoff :param vm: VM instance """ def is_shutoff(): state = vm.state() logging.debug("Current state: %s", state) return "shut off" in state utils_misc.wait_for(is_shutoff, timeout=90, first=1, step=1, text="Waiting for vm state to be shut off") def trigger_events(dom, events_list=[]): """ Trigger various events in events_list :param dom: the vm objects corresponding to the domain :return: the expected output that virsh event command prints out """ expected_events_list = [] save_path = os.path.join(tmpdir, "%s_event.save" % dom.name) print(dom.name) xmlfile = dom.backup_xml() try: for event in events_list: logging.debug("Current event is: %s", event) if event in [ 'start', 'restore', 'create', 'edit', 'define', 'undefine', 'crash' ]: if dom.is_alive(): dom.destroy() if event in ['create', 'define']: dom.undefine() else: if not dom.is_alive(): dom.start() dom.wait_for_login().close() if event == "resume": dom.pause() if event == "undefine": virsh.undefine(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Undefined Removed") elif event == "create": virsh.create(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") elif event == "destroy": virsh.destroy(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed") elif event == "define": virsh.define(xmlfile, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Defined Added") elif event == "start": virsh.start(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") expected_events_list.append("'lifecycle' for %s:" " Started Booted") dom.wait_for_login().close() elif event == "suspend": virsh.suspend(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") if not libvirt_version.version_compare(5, 3, 0): expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") elif event == "resume": virsh.resume(dom.name, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "save": virsh.save(dom.name, save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Suspended Paused") expected_events_list.append("'lifecycle' for %s:" " Stopped Saved") elif event == "restore": if not os.path.exists(save_path): logging.error("%s not exist", save_path) else: virsh.restore(save_path, **virsh_dargs) expected_events_list.append("'lifecycle' for %s:" " Started Restored") expected_events_list.append("'lifecycle' for %s:" " Resumed Snapshot") elif event == "edit": #Check whether 'description' element exists. domxml = virsh.dumpxml(dom.name).stdout.strip() find_desc = parseString(domxml).getElementsByTagName( "description") if find_desc == []: #If not exists, add one for it. logging.info("Adding <description> to guest") virsh.desc(dom.name, "--config", "Added desc for testvm", **virsh_dargs) #The edit operation is to delete 'description' element. edit_cmd = [r":g/<description.*<\/description>/d"] utlv.exec_virsh_edit(dom.name, edit_cmd) expected_events_list.append("'lifecycle' for %s:" " Defined Updated") elif event == "shutdown": if signal_name is None: virsh.shutdown(dom.name, **virsh_dargs) # Wait a few seconds for shutdown finish time.sleep(3) if utils_misc.compare_qemu_version(2, 9, 0): #Shutdown reason distinguished from qemu_2.9.0-9 expected_events_list.append( "'lifecycle' for %s:" " Shutdown Finished after guest request") else: os.kill(dom.get_pid(), getattr(signal, signal_name)) if utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append( "'lifecycle' for %s:" " Shutdown Finished after host request") if not utils_misc.compare_qemu_version(2, 9, 0): expected_events_list.append("'lifecycle' for %s:" " Shutdown Finished") wait_for_shutoff(dom) expected_events_list.append("'lifecycle' for %s:" " Stopped Shutdown") elif event == "crash": if not vmxml.xmltreefile.find('devices').findall('panic'): # Set panic device panic_dev = Panic() panic_dev.model = panic_model panic_dev.addr_type = addr_type panic_dev.addr_iobase = addr_iobase vmxml.add_device(panic_dev) vmxml.on_crash = "coredump-restart" vmxml.sync() logging.info("Guest xml now is: %s", vmxml) dom.start() session = dom.wait_for_login() # Stop kdump in the guest session.cmd("systemctl stop kdump", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") try: # Crash the guest session.cmd("echo c > /proc/sysrq-trigger", timeout=90) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'lifecycle' for %s:" " Crashed Panicked") expected_events_list.append("'lifecycle' for %s:" " Resumed Unpaused") elif event == "reset": virsh.reset(dom.name, **virsh_dargs) expected_events_list.append("'reboot' for %s") elif event == "vcpupin": virsh.vcpupin(dom.name, '0', '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0") elif event == "emulatorpin": virsh.emulatorpin(dom.name, '0', **virsh_dargs) expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0") elif event == "setmem": mem_size = int(params.get("mem_size", 512000)) virsh.setmem(dom.name, mem_size, **virsh_dargs) expected_events_list.append("'balloon-change' for %s:") elif event == "device-added-removed": add_disk(dom.name, new_disk, 'vdb', '') expected_events_list.append("'device-added' for %s:" " virtio-disk1") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " virtio-disk1") iface_xml_obj = create_iface_xml() iface_xml_obj.xmltreefile.write() virsh.detach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-removed' for %s:" " net0") time.sleep(2) virsh.attach_device(dom.name, iface_xml_obj.xml, **virsh_dargs) expected_events_list.append("'device-added' for %s:" " net0") elif event == "block-threshold": add_disk(dom.name, new_disk, 'vdb', '', format=disk_format) logging.debug(process.run('qemu-img info %s -U' % new_disk)) virsh.domblkthreshold(vm_name, 'vdb', '100M') session = dom.wait_for_login() session.cmd( "mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && " "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync" ) time.sleep(5) session.close() expected_events_list.append( "'block-threshold' for %s:" " dev: vdb(%s) 104857600 29368320") virsh.detach_disk(dom.name, 'vdb', **virsh_dargs) elif event == "change-media": target_device = "hdc" device_target_bus = params.get("device_target_bus", "ide") disk_blk = vm_xml.VMXML.get_disk_blk(dom.name) logging.info("disk_blk %s", disk_blk) if target_device not in disk_blk: logging.info("Adding cdrom to guest") if dom.is_alive(): dom.destroy() add_disk( dom.name, "''", target_device, ("--type cdrom --sourcetype file --driver qemu " + "--config --targetbus %s" % device_target_bus)) dom.start() all_options = new_disk + " --insert" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " closed") all_options = new_disk + " --eject" virsh.change_media(dom.name, target_device, all_options, **virsh_dargs) expected_events_list.append("'tray-change' for %s disk" + " .*%s.*:" % device_target_bus + " opened") elif event == "hwclock": session = dom.wait_for_login() try: session.cmd("hwclock --systohc", timeout=60) except (ShellTimeoutError, ShellProcessTerminatedError) as details: logging.info(details) session.close() expected_events_list.append("'rtc-change' for %s:") elif event == "metadata_set": metadata_uri = params.get("metadata_uri") metadata_key = params.get("metadata_key") metadata_value = params.get("metadata_value") virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, new_metadata=metadata_value, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "metadata_edit": metadata_uri = "http://herp.derp/" metadata_key = "herp" metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>" virsh_cmd = r"virsh metadata %s --uri %s --key %s %s" virsh_cmd = virsh_cmd % (dom.name, metadata_uri, metadata_key, "--edit") session = aexpect.ShellSession("sudo -s") logging.info("Running command: %s", virsh_cmd) try: session.sendline(virsh_cmd) session.sendline(r":insert") session.sendline(metadata_value) session.sendline(".") session.send('ZZ') remote.handle_prompts(session, None, None, r"[\#\$]\s*$", debug=True, timeout=60) except Exception as e: test.error("Error occured: %s" % e) session.close() # Check metadata after edit virsh.metadata(dom.name, metadata_uri, options="", key=metadata_key, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") elif event == "metadata_remove": virsh.metadata(dom.name, metadata_uri, options="--remove", key=metadata_key, **virsh_dargs) expected_events_list.append("'metadata-change' for %s: " "element http://app.org/") else: test.error("Unsupported event: %s" % event) # Event may not received immediately time.sleep(3) finally: if os.path.exists(save_path): os.unlink(save_path) if os.path.exists(new_disk): os.unlink(new_disk) return [(dom.name, event) for event in expected_events_list] def check_output(output, expected_events_list): """ Check received domain event in output. :param output: The virsh shell output, such as: Welcome to virsh, the virtualization interactive terminal. Type: 'help' for help with commands 'quit' to quit virsh # event 'lifecycle' for domain avocado-vt-vm1: Started Booted events received: 1 virsh # :param expected_events_list: A list of expected events """ logging.debug("Actual events: %s", output) event_idx = 0 for dom_name, event in expected_events_list: if event in expected_events_list[0]: event_idx = 0 if re.search("block-threshold", event): event_str = "block-threshold" else: event_str = "event " + event % ("domain %s" % dom_name) logging.info("Expected event: %s", event_str) match = re.search(event_str, output[event_idx:]) if match: event_idx = event_idx + match.start(0) + len(match.group(0)) continue else: test.fail("Not find expected event:%s. Is your " "guest too slow to get started in %ss?" % (event_str, event_timeout)) try: # Set vcpu placement to static to avoid emulatorpin fail vmxml.placement = 'static' # Using a large memeoy(>1048576) to avoid setmem fail vmxml.max_mem = 2097152 vmxml.current_mem = 2097152 vmxml.sync() if event_all_option and not qemu_monitor_test: event_option += " --all" if event_list_option: event_option += " --list" if event_loop: event_option += " --loop" if not status_error and not event_list_option: event_cmd += " %s" % event_option if event_name and not qemu_monitor_test: event_cmd += " --event %s" % event_name if event_timeout: event_cmd += " --timeout %s" % event_timeout # Run the command in a new virsh session, then waiting for # various events logging.info("Sending '%s' to virsh shell", event_cmd) virsh_session.sendline(event_cmd) elif qemu_monitor_test: result = virsh.qemu_monitor_event(event=event_name, event_timeout=event_timeout, options=event_option, **virsh_dargs) utlv.check_exit_status(result, status_error) else: result = virsh.event(event=event_name, event_timeout=event_timeout, options=event_option, **virsh_dargs) utlv.check_exit_status(result, status_error) if not status_error: if not event_list_option: expected_events_list = [] virsh_dargs['ignore_status'] = False for dom in vms: expected_events_list.extend( trigger_events(dom, events_list)) if event_timeout: # Make sure net-event will timeout on time time.sleep(int(event_timeout)) elif event_loop: virsh_session.send_ctrl("^C") time.sleep(5) ret_output = virsh_session.get_stripped_output() if qemu_monitor_test: # Not check for qemu-monitor-event output expected_events_list = [] check_output(ret_output, expected_events_list) finally: for dom in vms: if dom.is_alive(): dom.destroy() virsh_session.close() for xml in vmxml_backup: xml.sync() if os.path.exists(dump_path): shutil.rmtree(dump_path) os.mkdir(dump_path)
def run(test, params, env): """ Test domiftune tuning 1) Positive testing 1.1) get the current domiftune parameters for a running guest 1.2) set the current domiftune parameters for a running guest 2) Negative testing 2.1) get domiftune parameters 2.2) set domiftune parameters """ # Run test case vm_name = params.get("main_vm") vm = env.get_vm(vm_name) status_error = params.get("status_error", "no") start_vm = params.get("start_vm", "yes") change_parameters = params.get("change_parameters", "no") interface_ref = params.get("interface_ref", "name") interface = [] if vm and not vm.is_alive(): vm.start() if vm and vm.is_alive(): virt_xml_obj = vm_xml.VMXML(virsh_instance=virsh) interface = virt_xml_obj.get_iface_dev(vm_name) if_mac = interface[0] # Get interface name vmxml = virt_xml_obj.new_from_dumpxml(vm_name) if_node = vmxml.get_iface_all().get(if_mac) if_name = if_node.find('target').get('dev') if interface_ref == "name": interface = if_name if interface_ref == "mac": interface = if_mac logging.debug("the interface is %s", interface) test_dict = dict(params) test_dict['vm'] = vm if interface: test_dict['iface_dev'] = interface if start_vm == "no" and vm and vm.is_alive(): vm.destroy() # positive and negative testing ######### libvirtd = utils_libvirtd.Libvirtd() if change_parameters == "no": get_domiftune_parameter(test_dict, test, libvirtd) else: set_domiftune_parameter(test_dict, test, libvirtd) if change_parameters != "no": ret = virsh.domiftune(vm_name, interface, 'current', '0', '0', debug=True) libvirt.check_exit_status(ret)
def check_result(result, status_error): """ Check virt-v2v command result """ utlv.check_exit_status(result, status_error) output = result.stdout_text + result.stderr_text if not status_error: if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt( params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') if output_mode == 'libvirt': try: virsh.start(vm_name, debug=True, ignore_status=False) except Exception as e: test.fail('Start vm failed: %s' % str(e)) # Check guest following the checkpoint document after convertion vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if params.get('skip_vm_check') != 'yes': ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") logging.debug(vmchecker.vmxml) if checkpoint == 'multi_kernel': check_boot_kernel(vmchecker.checker) check_vmlinuz_initramfs(output) if checkpoint == 'floppy': # Convert to rhv will remove all removeable devices(floppy, # cdrom) if output_mode in ['local', 'libvirt']: check_floppy_exist(vmchecker.checker) if checkpoint == 'multi_disks': check_disks(vmchecker.checker) if checkpoint == 'multi_netcards': check_multi_netcards(params['mac_address'], vmchecker.vmxml) if checkpoint.startswith(('spice', 'vnc')): if checkpoint == 'spice_encrypt': vmchecker.check_graphics(params[checkpoint]) else: graph_type = checkpoint.split('_')[0] vmchecker.check_graphics({'type': graph_type}) video_type = vmchecker.xmltree.find( './devices/video/model').get('type') if video_type.lower() != 'qxl': log_fail('Video expect QXL, actual %s' % video_type) if checkpoint.startswith('listen'): listen_type = vmchecker.xmltree.find( './devices/graphics/listen').get('type') logging.info('listen type is: %s', listen_type) if listen_type != checkpoint.split('_')[-1]: log_fail('listen type changed after conversion') if checkpoint.startswith('selinux'): status = vmchecker.checker.session.cmd( 'getenforce').strip().lower() logging.info('Selinux status after v2v:%s', status) if status != checkpoint[8:]: log_fail('Selinux status not match') if checkpoint == 'check_selinuxtype': expect_output = vmchecker.checker.session.cmd( 'cat /etc/selinux/config') expect_selinuxtype = re.search(r'^SELINUXTYPE=\s*(\S+)$', expect_output, re.MULTILINE).group(1) actual_output = vmchecker.checker.session.cmd('sestatus') actual_selinuxtype = re.search( r'^Loaded policy name:\s*(\S+)$', actual_output, re.MULTILINE).group(1) if actual_selinuxtype != expect_selinuxtype: log_fail('Seliunx type not match') if checkpoint == 'guest_firewalld_status': check_firewalld_status(vmchecker.checker, params[checkpoint]) if checkpoint in ['ntpd_on', 'sync_ntp']: check_time_keep(vmchecker.checker) # Merge 2 error lists error_list.extend(vmchecker.errors) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list))
def run(test, params, env): """ Test for vhba hostdev passthrough. 1. create a vhba 2. prepare hostdev xml for lun device of the newly created vhba 3.1 If hot attach, attach-device the hostdev xml to vm 3.2 If cold attach, add the hostdev to vm and start it 4. login the vm and check the attached disk 5. detach-device the hostdev xml 6. login the vm to check the partitions """ def check_in_vm(vm, target, old_parts): """ Check mount/read/write disk in VM. :param vm: VM guest. :param target: Disk dev in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test" .format(added_part)) try: cmd_status, cmd_output = session.cmd_status_output(cmd) except Exception as detail: test.error("Error occurred when run cmd: fdisk, %s" % detail) logging.info("Check disk operation in VM:\n%s", cmd_output) session.close() if cmd_status != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as detail: logging.error(str(detail)) return False try: status_error = "yes" == params.get("status_error", "no") vm_name = params.get("main_vm", "avocado-vt-vm1") device_target = params.get("hostdev_disk_target", "hdb") scsi_wwnn = params.get("scsi_wwnn", "ENTER.YOUR.WWNN") scsi_wwpn = params.get("scsi_wwpn", "ENTER.YOUR.WWPN") attach_method = params.get('attach_method', 'hot') vm = env.get_vm(vm_name) vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} new_vhbas = [] if scsi_wwnn.count("ENTER.YOUR.WWNN") or \ scsi_wwpn.count("ENTER.YOUR.WWPN"): test.cancel("You didn't provide proper wwpn/wwnn") if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) # find first online hba online_hbas = [] online_hbas = utils_npiv.find_hbas("hba") if not online_hbas: test.cancel("NO ONLINE HBAs!") first_online_hba = online_hbas[0] # create vhba based on the first online hba old_vhbas = utils_npiv.find_hbas("vhba") logging.debug("Original online vHBAs: %s", old_vhbas) new_vhba = utils_npiv.nodedev_create_from_xml( {"nodedev_parent": first_online_hba, "scsi_wwnn": scsi_wwnn, "scsi_wwpn": scsi_wwpn}) if not utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT): test.fail("vhba not successfully created") new_vhbas.append(new_vhba) # find first available lun of the newly created vhba lun_dicts = [] first_lun = {} if not utils_misc.wait_for(lambda: utils_npiv.find_scsi_luns(new_vhba), timeout=_TIMEOUT): test.fail("There is no available lun storage for " "wwpn: %s, please check your wwns or " "contact IT admins" % scsi_wwpn) lun_dicts = utils_npiv.find_scsi_luns(new_vhba) logging.debug("The luns discovered are: %s", lun_dicts) first_lun = lun_dicts[0] # prepare hostdev xml for the first lun kwargs = {'addr_bus': first_lun['bus'], 'addr_target': first_lun['target'], 'addr_unit': first_lun['unit']} new_hostdev_xml = utils_npiv.create_hostdev_xml( adapter_name="scsi_host"+first_lun['scsi'], **kwargs) logging.info("New hostdev xml as follow:") logging.info(new_hostdev_xml) new_hostdev_xml.xmltreefile.write() if attach_method == "hot": # attach-device the lun's hostdev xml to guest vm result = virsh.attach_device(vm_name, new_hostdev_xml.xml) libvirt.check_exit_status(result, status_error) elif attach_method == "cold": if vm.is_alive(): vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml.devices = vmxml.devices.append(new_hostdev_xml) vmxml.sync() vm.start() session = vm.wait_for_login() logging.debug("The new vm's xml is: \n%s", vmxml) # login vm and check the disk check_result = check_in_vm(vm, device_target, old_parts) if not check_result: test.fail("check disk in vm failed") result = virsh.detach_device(vm_name, new_hostdev_xml.xml) libvirt.check_exit_status(result, status_error) # login vm and check disk actually removed parts_after_detach = libvirt.get_parts_list(session) old_parts.sort() parts_after_detach.sort() if parts_after_detach == old_parts: logging.info("hostdev successfully detached.") else: test.fail("Device not successfully detached. " "Still existing in vm's /proc/partitions") finally: utils_npiv.vhbas_cleanup(new_vhbas) # recover vm if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() process.system('service multipathd restart', verbose=True)
def run(test, params, env): """ Test command: virsh domstate. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh domstate operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) libvirtd_state = params.get("libvirtd", "on") vm_ref = params.get("domstate_vm_ref") status_error = (params.get("status_error", "no") == "yes") extra = params.get("domstate_extra", "") vm_action = params.get("domstate_vm_action", "") vm_oncrash_action = params.get("domstate_vm_oncrash") reset_action = "yes" == params.get("reset_action", "no") dump_option = params.get("dump_option", "") start_action = params.get("start_action", "normal") kill_action = params.get("kill_action", "normal") check_libvirtd_log = params.get("check_libvirtd_log", "no") err_msg = params.get("err_msg", "") remote_uri = params.get("remote_uri") domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid # Back up xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Back up qemu.conf qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() # Config libvirtd log if check_libvirtd_log == "yes": libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_log_file = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor ' '3:remote 4:event"') libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() # Get image file image_source = vm.get_first_disk_devices()['source'] logging.debug("image source: %s" % image_source) new_image_source = image_source + '.rename' dump_path = os.path.join(data_dir.get_tmp_dir(), "dump/") logging.debug("dump_path: %s", dump_path) try: os.mkdir(dump_path) except OSError: # If the path already exists then pass pass dump_file = "" try: # Let's have guest memory less so that dumping core takes # time which doesn't timeout the testcase if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: memory_value = int(params.get("memory_value", "2097152")) memory_unit = params.get("memory_unit", "KiB") vmxml.set_memory(memory_value) vmxml.set_memory_unit(memory_unit) logging.debug(vmxml) vmxml.sync() if vm_action == "crash": if vm.is_alive(): vm.destroy(gracefully=False) vmxml.on_crash = vm_oncrash_action if not vmxml.xmltreefile.find('devices').findall('panic'): # Add <panic> device to domain panic_dev = Panic() if "ppc" not in platform.machine(): panic_dev.addr_type = "isa" panic_dev.addr_iobase = "0x505" vmxml.add_device(panic_dev) vmxml.sync() # Config auto_dump_path in qemu.conf qemu_conf.auto_dump_path = dump_path libvirtd.restart() if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']: dump_file = dump_path + "*" + vm_name[:20] + "-*" # Start VM and check the panic device virsh.start(vm_name, ignore_status=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) # Skip this test if no panic device find if not vmxml_new.xmltreefile.find('devices').findall('panic'): test.cancel("No 'panic' device in the guest. Maybe your " "libvirt version doesn't support it.") try: if vm_action == "suspend": virsh.suspend(vm_name, ignore_status=False) elif vm_action == "resume": virsh.suspend(vm_name, ignore_status=False) virsh.resume(vm_name, ignore_status=False) elif vm_action == "destroy": virsh.destroy(vm_name, ignore_status=False) elif vm_action == "start": virsh.destroy(vm_name, ignore_status=False) if start_action == "rename": # rename the guest image file to make guest fail to start os.rename(image_source, new_image_source) virsh.start(vm_name, ignore_status=True) else: virsh.start(vm_name, ignore_status=False) if start_action == "restart_libvirtd": libvirtd.restart() elif vm_action == "kill": if kill_action == "stop_libvirtd": libvirtd.stop() utils_misc.kill_process_by_pattern(vm_name) libvirtd.restart() elif kill_action == "reboot_vm": virsh.reboot(vm_name, ignore_status=False) utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL) else: utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL) elif vm_action == "crash": session = vm.wait_for_login() session.cmd("service kdump stop", ignore_all_errors=True) # Enable sysRq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # Send key ALT-SysRq-c to crash VM, and command will not # return as vm crashed, so fail early for 'destroy' and # 'preserve' action. For 'restart', 'coredump-restart' # and 'coredump-destroy' actions, they all need more time # to dump core file or restart OS, so using the default # session command timeout(60s) try: if vm_oncrash_action in ['destroy', 'preserve']: timeout = 3 else: timeout = 60 session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout) except (ShellTimeoutError, ShellProcessTerminatedError): pass session.close() elif vm_action == "dump": dump_file = dump_path + "*" + vm_name + "-*" virsh.dump(vm_name, dump_file, dump_option, ignore_status=False) except process.CmdError as detail: test.error("Guest prepare action error: %s" % detail) if libvirtd_state == "off": libvirtd.stop() # Timing issue cause test to check domstate before prior action # kill gets completed if vm_action == "kill": time.sleep(2) if remote_uri: remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") if remote_ip.count("EXAMPLE.COM"): test.cancel("Test 'remote' parameters not setup") ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) result = virsh.domstate(vm_ref, extra, ignore_status=True, debug=True, uri=remote_uri) status = result.exit_status output = result.stdout.strip() # check status_error if status_error: if not status: if libvirtd_state == "off" and libvirt_version.version_compare( 5, 6, 0): logging.info( "From libvirt version 5.6.0 libvirtd is restarted " "and command should succeed.") else: test.fail("Run successfully with wrong command!") else: if status or not output: test.fail("Run failed with right command") if extra.count("reason"): if vm_action == "suspend": # If not, will cost long time to destroy vm virsh.destroy(vm_name) if not output.count("user"): test.fail(err_msg % vm_action) elif vm_action == "resume": if not output.count("unpaused"): test.fail(err_msg % vm_action) elif vm_action == "destroy": if not output.count("destroyed"): test.fail(err_msg % vm_action) elif vm_action == "start": if start_action == "rename": if not output.count("shut off (failed)"): test.fail(err_msg % vm_action) else: if not output.count("booted"): test.fail(err_msg % vm_action) elif vm_action == "kill": if not output.count("crashed"): test.fail(err_msg % vm_action) elif vm_action == "crash": if not check_crash_state(output, vm_oncrash_action, vm_name, dump_file): test.fail(err_msg % vm_action) # VM will be in preserved state, perform virsh reset # and check VM reboots and domstate reflects running # state from crashed state as bug is observed here if vm_oncrash_action == "preserve" and reset_action: virsh_dargs = {'debug': True, 'ignore_status': True} ret = virsh.reset(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.domstate(vm_name, extra, **virsh_dargs).stdout.strip() if "paused (crashed)" not in ret: test.fail("vm fails to change state from crashed" " to paused after virsh reset") # it will be in paused (crashed) state after reset # and resume is required for the vm to reboot ret = virsh.resume(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) vm.wait_for_login() cmd_output = virsh.domstate(vm_name, '--reason').stdout.strip() if "running" not in cmd_output: test.fail("guest state failed to get updated") if vm_oncrash_action in [ 'coredump-destroy', 'coredump-restart' ]: if not find_dump_file: test.fail("Core dump file is not created in dump " "path: %s" % dump_path) # For cover bug 1178652 if (vm_oncrash_action == "rename-restart" and check_libvirtd_log == "yes"): libvirtd.restart() if not os.path.exists(libvirtd_log_file): test.fail("Expected VM log file: %s not exists" % libvirtd_log_file) cmd = ("grep -nr '%s' %s" % (err_msg, libvirtd_log_file)) if not process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail( "Find error message %s from log file: %s." % (err_msg, libvirtd_log_file)) elif vm_action == "dump": if dump_option == "--live": if not output.count("running (unpaused)"): test.fail(err_msg % vm_action) elif dump_option == "--crash": if not output.count("shut off (crashed)"): test.fail(err_msg % vm_action) if vm_ref == "remote": if not (re.search("running", output) or re.search( "blocked", output) or re.search("idle", output)): test.fail("Run failed with right command") finally: qemu_conf.restore() if check_libvirtd_log == "yes": libvirtd_conf.restore() if os.path.exists(libvirtd_log_file): os.remove(libvirtd_log_file) libvirtd.restart() if vm_action == "start" and start_action == "rename": os.rename(new_image_source, image_source) if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if os.path.exists(dump_path): shutil.rmtree(dump_path)
def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip(disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ( "--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool( pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err)
def run(test, params, env): """ Test command: virsh change-media. The command changes the media used by CD or floppy drives. Test steps: 1. Prepare test environment. 2. Perform virsh change-media operation. 3. Recover test environment. 4. Confirm the test result. """ def is_attached(vmxml_devices, disk_type, source_file, target_dev): """ Check attached device and disk exist or not. :param vmxml_devices: VMXMLDevices instance :param disk_type: disk's device type: cdrom or floppy :param source_file : disk's source file to check :param target_dev : target device name :return: True/False if backing file and device found """ disks = vmxml_devices.by_device_tag('disk') for disk in disks: if disk.device != disk_type: continue if disk.target['dev'] != target_dev: continue if disk.xmltreefile.find('source') is not None: if disk.source.attrs['file'] != source_file: continue else: continue # All three conditions met logging.debug("Find %s in given disk XML", source_file) return True logging.debug("Not find %s in gievn disk XML", source_file) return False def check_result(vm_name, disk_source, disk_type, disk_target, flags, vm_state, attach=True): """ Check the test result of attach/detach-device command. """ active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) active_attached = is_attached(active_vmxml.devices, disk_type, disk_source, disk_target) if vm_state != "transient": inactive_vmxml = vm_xml.VMXML.new_from_dumpxml( vm_name, options="--inactive") inactive_attached = is_attached(inactive_vmxml.devices, disk_type, disk_source, disk_target) if flags.count("config") and not flags.count("live"): if vm_state != "transient": if attach: if not inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated" " when --config options used for" " attachment") if vm_state != "shutoff": if active_attached: raise exceptions.TestFail( "Active domain XML updated" " when --config options used" " for attachment") else: if inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated" " when --config options used for" " detachment") if vm_state != "shutoff": if not active_attached: raise exceptions.TestFail( "Active domain XML updated" " when --config options used" " for detachment") elif flags.count("live") and not flags.count("config"): if attach: if vm_state in ["paused", "running", "transient"]: if not active_attached: raise exceptions.TestFail( "Active domain XML not updated" " when --live options used for" " attachment") if vm_state in ["paused", "running"]: if inactive_attached: raise exceptions.TestFail( "Inactive domain XML updated" " when --live options used for" " attachment") else: if vm_state in ["paused", "running", "transient"]: if active_attached: raise exceptions.TestFail( "Active domain XML not updated" " when --live options used for" " detachment") if vm_state in ["paused", "running"]: if not inactive_attached: raise exceptions.TestFail( "Inactive domain XML updated" " when --live options used for" " detachment") elif flags.count("live") and flags.count("config"): if attach: if vm_state in ["paused", "running"]: if not active_attached: raise exceptions.TestFail( "Active domain XML not updated" " when --live --config options" " used for attachment") if not inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated" " when --live --config options " "used for attachment") else: if vm_state in ["paused", "running"]: if active_attached: raise exceptions.TestFail( "Active domain XML not updated " "when --live --config options " "used for detachment") if inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated" " when --live --config options " "used for detachment") elif flags.count("current") or flags == "": if attach: if vm_state in ["paused", "running", "transient"]: if not active_attached: raise exceptions.TestFail( "Active domain XML not updated" " when --current options used " "for attachment") if vm_state in ["paused", "running"]: if inactive_attached: raise exceptions.TestFail( "Inactive domain XML updated " "when --current options used " "for live attachment") if vm_state == "shutoff" and not inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated " "when --current options used for " "attachment") else: if vm_state in ["paused", "running", "transient"]: if active_attached: raise exceptions.TestFail( "Active domain XML not updated" " when --current options used " "for detachment") if vm_state in ["paused", "running"]: if not inactive_attached: raise exceptions.TestFail( "Inactive domain XML updated " "when --current options used " "for live detachment") if vm_state == "shutoff" and inactive_attached: raise exceptions.TestFail( "Inactive domain XML not updated " "when --current options used for " "detachment") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_ref = params.get("change_media_vm_ref") action = params.get("change_media_action") action_twice = params.get("change_media_action_twice", "") pre_vm_state = params.get("pre_vm_state") options = params.get("change_media_options") options_twice = params.get("change_media_options_twice", "") device_type = params.get("change_media_device_type", "cdrom") target_device = params.get("change_media_target_device", "hdc") init_iso_name = params.get("change_media_init_iso") old_iso_name = params.get("change_media_old_iso") new_iso_name = params.get("change_media_new_iso") virsh_dargs = {"debug": True, "ignore_status": True} if device_type not in ['cdrom', 'floppy']: raise exceptions.TestSkipError("Got a invalid device type:/n%s" % device_type) # Backup for recovery. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) old_iso = os.path.join(data_dir.get_tmp_dir(), old_iso_name) new_iso = os.path.join(data_dir.get_tmp_dir(), new_iso_name) if vm_ref == "name": vm_ref = vm_name if vm.is_alive(): vm.destroy(gracefully=False) try: if not init_iso_name: init_iso = "" else: init_iso = os.path.join(data_dir.get_tmp_dir(), init_iso_name) # Prepare test files. libvirt.create_local_disk("iso", old_iso) libvirt.create_local_disk("iso", new_iso) # Check domain's disk device disk_blk = vm_xml.VMXML.get_disk_blk(vm_name) logging.info("disk_blk %s", disk_blk) if target_device not in disk_blk: if vm.is_alive(): virsh.destroy(vm_name) logging.info("Adding device") libvirt.create_local_disk("iso", init_iso) disk_params = { "disk_type": "file", "device_type": device_type, "driver_name": "qemu", "driver_type": "raw", "target_bus": "ide", "readonly": "yes" } libvirt.attach_additional_device(vm_name, target_device, init_iso, disk_params) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Turn VM into certain state. if pre_vm_state == "running": logging.info("Starting %s..." % vm_name) if vm.is_dead(): vm.start() vm.wait_for_login().close() elif pre_vm_state == "shutoff": logging.info("Shuting down %s..." % vm_name) if vm.is_alive(): vm.destroy(gracefully=False) elif pre_vm_state == "paused": logging.info("Pausing %s..." % vm_name) if vm.is_dead(): vm.start() vm.wait_for_login().close() if not vm.pause(): raise exceptions.TestSkipError("Cann't pause the domain") time.sleep(5) elif pre_vm_state == "transient": logging.info("Creating %s..." % vm_name) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() raise exceptions.TestSkipError("Cann't create the domain") # Libvirt will ignore --source when action is eject attach = True device_source = old_iso if action == "--eject ": source = "" attach = False else: source = device_source all_options = action + options + " " + source ret = virsh.change_media(vm_ref, target_device, all_options, ignore_status=True, debug=True) status_error = False if pre_vm_state == "shutoff": if options.count("live"): status_error = True elif pre_vm_state == "transient": if options.count("config"): status_error = True if vm.is_paused(): vm.resume() vm.wait_for_login().close() # For paused vm, change_media for eject/update operation # should be executed again for it takes effect if ret.exit_status: if not action.count("insert") and not options.count("force"): ret = virsh.change_media(vm_ref, target_device, all_options, ignore_status=True, debug=True) if not status_error and ret.exit_status: raise exceptions.TestFail("Please check: Bug 1289069 - Ejecting " "locked cdrom tray using update-device" " fails but next try succeeds") libvirt.check_exit_status(ret, status_error) if not ret.exit_status: check_result(vm_name, device_source, device_type, target_device, options, pre_vm_state, attach) if action_twice: if pre_vm_state == "paused": if not vm.pause(): raise exceptions.TestFail("Cann't pause the domain") time.sleep(5) attach = True device_source = new_iso if action_twice == "--eject ": #options_twice += " --force " source = "" attach = False else: source = device_source all_options = action_twice + options_twice + " " + source time.sleep(5) ret = virsh.change_media(vm_ref, target_device, all_options, ignore_status=True, debug=True) status_error = False if pre_vm_state == "shutoff": if options_twice.count("live"): status_error = True elif pre_vm_state == "transient": if options_twice.count("config"): status_error = True if action_twice == "--insert ": if pre_vm_state in ["running", "paused"]: if options in ["--force", "--current", "", "--live"]: if options_twice.count("config"): status_error = True elif options == "--config": if options_twice in ["--force", "--current", ""]: status_error = True elif options_twice.count("live"): status_error = True elif pre_vm_state == "transient": if ret.exit_status: status_error = True elif pre_vm_state == "shutoff": if options.count("live"): status_error = True if vm.is_paused(): vm.resume() vm.wait_for_login().close() # For paused vm, change_media for eject/update operation # should be executed again for it takes effect if ret.exit_status and not action_twice.count("insert"): ret = virsh.change_media(vm_ref, target_device, all_options, ignore_status=True, debug=True) if not status_error and ret.exit_status: raise exceptions.TestFail( "Please check: Bug 1289069 - Ejecting " "locked cdrom tray using update-device" " fails but next try succeeds") libvirt.check_exit_status(ret, status_error) if not ret.exit_status: check_result(vm_name, device_source, device_type, target_device, options_twice, pre_vm_state, attach) # Try to start vm. if vm.is_dead(): vm.start() vm.wait_for_login().close() finally: if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync() # Remove disks if os.path.exists(init_iso): os.remove(init_iso) if os.path.exists(old_iso): os.remove(old_iso) if os.path.exists(init_iso): os.remove(new_iso)
def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err)
def run(test, params, env): """ Test command: virsh set-user-password The command set the user password inside the domain 1. Prepare test environment, start vm with guest agent 2. Perform virsh set-user-password operation(encrypted/ non-encrypted) 3. Login the vm with new/old password 4. Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) encrypted = params.get("encrypted", "no") == "yes" option = params.get("option", "no") == "yes" add_user = params.get("add_user", "no") == "yes" set_user_name = params.get("set_user_name", "root") status_error = params.get("status_error", "no") err_domain = params.get("err_domain", "") err_msg = params.get("err_msg", "") start_ga = params.get("start_ga", "yes") == "yes" ori_passwd = vm.params.get("password") new_passwd = "a" + ori_passwd passwd = new_passwd # Back up domain XML vmxml_bak = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vmxml = vmxml_bak.copy() if start_ga: # Start guest agent in vm vm.prepare_guest_agent(prepare_xml=False, channel=False, start=True) # Error test if status_error == "yes": if err_domain: vm_name = err_domain ret = virsh.set_user_password(vm_name, set_user_name, new_passwd, encrypted=encrypted, option=option, debug=True) libvirt.check_result(ret, err_msg) # Normal test else: # Get guest ip address session = vm.wait_for_login(timeout=30, username="******", password=ori_passwd) vm_mac = vm.get_virsh_mac_address() vm_ip = utils_net.get_guest_ip_addr(session, vm_mac) # Add user if add_user: cmd = " rm -f /etc/gshadow.lock & useradd %s" % set_user_name status, output = session.cmd_status_output(cmd) if status: test.error("Adding user '%s' got failed: '%s'" % (set_user_name, output)) session.close() # Set the user password in vm if encrypted: cmd = "openssl passwd -crypt %s" % new_passwd ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) en_passwd = str(ret.stdout.strip()) passwd = en_passwd ret = virsh.set_user_password(vm_name, set_user_name, passwd, encrypted=encrypted, option=option, debug=True) libvirt.check_exit_status(ret) # Login with new password try: session = remote.wait_for_login("ssh", vm_ip, "22", set_user_name, new_passwd, r"[\#\$]\s*$", timeout=30) session.close() except remote.LoginAuthenticationError, e: logging.debug(e) # Login with old password try: session = remote.wait_for_login("ssh", vm_ip, "22", set_user_name, ori_passwd, r"[\#\$]\s*$", timeout=10) session.close() except remote.LoginAuthenticationError: logging.debug("Login with old password failed as expected.") # Change the password back in VM ret = virsh.set_user_password(vm_name, set_user_name, ori_passwd, False, option=option, debug=True) libvirt.check_exit_status(ret) # Login with the original password try: session = remote.wait_for_login("ssh", vm_ip, "22", set_user_name, ori_passwd, r"[\#\$]\s*$", timeout=30) session.close() except remote.LoginAuthenticationError, e: logging.debug(e) if start_ga: # Stop guest agent in vm vm.prepare_guest_agent(prepare_xml=False, channel=False, start=False) # Del user if add_user: session = vm.wait_for_login(timeout=30, username="******", password=ori_passwd) cmd = "userdel -r %s" % set_user_name status, output = session.cmd_status_output(cmd) if status: test.error("Deleting user '%s' got failed: '%s'" % (set_user_name, output)) session.close()
def check_source_update(xml_policy_file): """ Update source and policy at the same time,then check those changes. :param xml_policy_file: VM xml policy file """ xmltreefile = XMLTreeFile(xml_policy_file) policy_item = xmltreefile.find('/source') def configure_startup_policy(update=False, policy='optional'): """ Configure startupPolicy attribute value. :param update: update value or not :param policy: policy value :return: flag_option and boolean value """ if update: del policy_item.attrib["startupPolicy"] else: policy_item.set("startupPolicy", policy) flag_option = "--live" xmltreefile.write(xml_policy_file, encoding="UTF-8") return flag_option, False # Update source and startUpPolicy attribute value. def update_source_policy(update=True, policy='optional'): """ Update startupPolicy source value. :param update: update value or not :param policy: policy value :return: flag_option and boolean value """ source_file = policy_item.get('file') if update: new_source_file = source_file+".empty" else: new_source_file = source_file+".new" shutil.copyfile(source_file, new_source_file) policy_item.set("file", new_source_file) policy_item.set("startupPolicy", policy) flag_option = "--persistent" xmltreefile.write(xml_policy_file, encoding="UTF-8") return flag_option, False function_list = [configure_startup_policy, update_source_policy, configure_startup_policy, update_source_policy] function_parameter = [False, False, True, True] # Loop all above scenarios to update device. for index in list(range(len(function_list))): try: func = function_list[index] para = function_parameter[index] flag_option, update_error = func(para) ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_option, debug=True) libvirt.check_exit_status(ret, expect_error=update_error) except AttributeError as elem_attr: test.error("Fail to remove startupPolicy attribute:%s" % str(elem_attr)) except Exception as update_device_exception: test.error("Fail to update device:%s" % str(update_device_exception)) finally: source_file = policy_item.get('file') new_source_file = source_file+".new" if os.path.exists(new_source_file): os.remove(new_source_file)
def run(test, params, env): """ Test startupPolicy for CD-ROM/floppy/Volume disks. Steps: 1. Prepare disk media image. 2. Setup startupPolicy for a disk. 3. Start the domain. 4. Save the domain. 5. Remove the disk source file and restore the domain. 6. Update startupPolicy for a disk. 7. Destroy the domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) startup_policy = params.get("policy") def create_iscsi_pool(): """ Setup iSCSI target,and create one iSCSI pool. """ libvirt.setup_or_cleanup_iscsi(is_setup=False) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size='1G', chap_user="", chap_passwd="", portal_ip=disk_src_host) # Define an iSCSI pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = pool_name poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iSCSI pool. virsh.pool_destroy(pool_name, **virsh_dargs) cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) def create_volume(pvt, created_vol_name=None): """ Create iSCSI volume. :param pvt: PoolVolumeTest object :param created_vol_name: Created volume name """ try: if pool_type == "iscsi": create_iscsi_pool() else: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) pvt.pre_vol(vol_name=created_vol_name, vol_format=vol_format, capacity=capacity, allocation=None, pool_name=pool_name) except Exception as pool_exception: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **virsh_dargs) test.error("Error occurred when prepare" + "pool xml with message %s:\n" % str(pool_exception)) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(pool_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout)) try: return vol_list[1] except IndexError: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: tmp_vol_name, tmp_vol_path = vol_info else: test.error("Failed to get volume info") process.run('qemu-img create -f qcow2 %s %s' % (tmp_vol_path, '100M'), shell=True) return vol_info def check_disk_source(vm_name, target_dev, expect_value): """ Check the disk source: file and startupPolicy. :param vm_name: Domain name :param target_dev: Disk's target device :param expect_value: Expect value of source file and source startupPolicy """ logging.debug("Expect source file is '%s'", expect_value[0]) logging.debug("Expect source startupPolicy is '%s'", expect_value[1]) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.get_disk_all() source_value = [] try: disk_source = disks[target_dev].find('source') source_value.append(disk_source.get('file')) source_value.append(disk_source.get('startupPolicy')) except KeyError: test.error("No %s in domain %s" % (target_dev, vm_name)) logging.debug("Actual source file is '%s'", source_value[0]) logging.debug("Actual source startupPolicy is '%s'", source_value[1]) if source_value == expect_value: logging.debug("Domain disk XML check pass") else: test.error("Domain disk XML check fail") def create_disk_xml(): """ Create a disk xml file for attaching to a domain. """ if disk_type == "file": process.run("qemu-img create %s %s" % (media_file, image_size), shell=True) disk_params = {'device_type': device_type, 'type_name': disk_type, 'target_dev': target_dev, 'target_bus': target_bus} if disk_type == "file": disk_params_src = {'source_protocol': "file", 'source_file': media_file, 'source_startupPolicy': startup_policy} elif disk_type == "volume": disk_params_src = {'source_pool': pool_name, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_startupPolicy': startup_policy} if pool_type == "iscsi": disk_params_src.update({'source_mode': "host"}) disk_params.update(disk_params_src) disk_xml = libvirt.create_disk_xml(disk_params) shutil.copyfile(disk_xml, disk_xml_file) return disk_xml def check_in_vm(old_parts): """ Check mount/read/write disk in VM. :param old_parts: pre-operated partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) logging.debug("new parted:%s", new_parts) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False if 'sr' not in added_part and 'fd' not in added_part: cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test" .format(added_part)) status, output = session.cmd_status_output(cmd) logging.info("Check disk operation in VM:\n%s", output) if status != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_policy_update(origin_policy, policy_list, xml_policy_file, device_type, flag_str): """ Check updated policy after executing virsh update-device. :param origin_policy: the inherit startup policy value. :param policy_list: updated policy list. :param xml_policy_file: xml file for startupPolicy. :param device_type: device type,cdrom or disk.,etc :param flag_str: it can be --config,--live and --persistent. """ for policy in policy_list: xmltreefile = XMLTreeFile(xml_policy_file) try: policy_item = xmltreefile.find('/source') policy_item.set('startupPolicy', policy) except AttributeError as elem_attr: test.error("Fail to find startupPolicy attribute.%s", str(elem_attr)) xmltreefile.write(xml_policy_file, encoding="UTF-8") ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_str, debug=True) if all([device_type == "disk", policy == "requisite"]): libvirt.check_exit_status(ret, True) return else: libvirt.check_exit_status(ret) def check_policy_value(active_policy, inactive_policy): """ Check policy value in dumpxml with active or inactive option :param active_policy: active policy attribute value :param inactive_policy: inactive policy attribute value """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_list = vmxml.devices.by_device_tag("disk") disk = disk_list[len(disk_list)-1] if not active_policy == disk.source.attrs["startupPolicy"]: test.error("Actual policy:%s in active state is not equal to expected:%s" % (active_policy, disk.source.attrs["startupPolicy"])) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disk_list = vmxml.devices.by_device_tag("disk") disk = disk_list[len(disk_list)-1] if not inactive_policy == disk.source.attrs["startupPolicy"]: test.error("Actual policy:%s in inactive state is not equal to expected: %s" % (inactive_policy, disk.source.attrs["startupPolicy"])) if flag_str == "--live": check_policy_value(policy, origin_policy) elif flag_str == "--config": check_policy_value(origin_policy, policy) elif flag_str == "--persistent": check_policy_value(policy, policy) def check_source_update(xml_policy_file): """ Update source and policy at the same time,then check those changes. :param xml_policy_file: VM xml policy file """ xmltreefile = XMLTreeFile(xml_policy_file) policy_item = xmltreefile.find('/source') def configure_startup_policy(update=False, policy='optional'): """ Configure startupPolicy attribute value. :param update: update value or not :param policy: policy value :return: flag_option and boolean value """ if update: del policy_item.attrib["startupPolicy"] else: policy_item.set("startupPolicy", policy) flag_option = "--live" xmltreefile.write(xml_policy_file, encoding="UTF-8") return flag_option, False # Update source and startUpPolicy attribute value. def update_source_policy(update=True, policy='optional'): """ Update startupPolicy source value. :param update: update value or not :param policy: policy value :return: flag_option and boolean value """ source_file = policy_item.get('file') if update: new_source_file = source_file+".empty" else: new_source_file = source_file+".new" shutil.copyfile(source_file, new_source_file) policy_item.set("file", new_source_file) policy_item.set("startupPolicy", policy) flag_option = "--persistent" xmltreefile.write(xml_policy_file, encoding="UTF-8") return flag_option, False function_list = [configure_startup_policy, update_source_policy, configure_startup_policy, update_source_policy] function_parameter = [False, False, True, True] # Loop all above scenarios to update device. for index in list(range(len(function_list))): try: func = function_list[index] para = function_parameter[index] flag_option, update_error = func(para) ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_option, debug=True) libvirt.check_exit_status(ret, expect_error=update_error) except AttributeError as elem_attr: test.error("Fail to remove startupPolicy attribute:%s" % str(elem_attr)) except Exception as update_device_exception: test.error("Fail to update device:%s" % str(update_device_exception)) finally: source_file = policy_item.get('file') new_source_file = source_file+".new" if os.path.exists(new_source_file): os.remove(new_source_file) def rename_file(source_file, target_file, revert=False): """ Rename a file or revert it. :param source_file: The source file name. :param target_file: The target file name. :param revert: It can be True or False. """ try: if not revert: os.rename(source_file, target_file) logging.debug("Rename %s to %s", source_file, target_file) else: os.rename(target_file, source_file) logging.debug("Rename %s to %s", target_file, source_file) except OSError as err: test.fail("Rename image failed: %s" % str(err)) # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Get start,restore configuration parameters. start_error = "yes" == params.get("start_error", "no") restore_error = "yes" == params.get("restore_error", "no") virsh_dargs = {'debug': True, 'ignore_status': True} attach_option = params.get("attach_option") # Create disk xml and attach it. device_type = params.get("device_type") disk_type = params.get("disk_type", "network") disk_src_host = params.get("disk_source_host", "127.0.0.1") target_dev = params.get("target_dev") target_bus = params.get("disk_target_bus", "virtio") image_size = params.get("image_size", "1.44M") emulated_image = "emulated-iscsi" # Storage pool and volume related paramters. pool_name = params.get("pool_name", "iscsi_pool") pool_type = params.get("pool_type") pool_target = params.get("pool_target", "/dev/disk/by-path") pool_src_host = params.get("pool_source_host", "127.0.0.1") vol_name = params.get("volume_name") capacity = params.get("volume_size", "1048576") vol_format = params.get("volume_format") # Source file parameters. media_name = params.get("media_name") media_file = os.path.join(test.tmpdir, media_name) media_file_new = media_file + ".new" save_file = os.path.join(test.tmpdir, "vm.save") snapshot_name = "s1" # Policy related paramters. disk_xml_file = os.path.join(test.tmpdir, "attach_disk.xml") disk_xml_policy_file = os.path.join(test.tmpdir, "attach_policy_disk.xml") update_policy = "yes" == params.get("update_policy", "no") policy_only = "yes" == params.get("policy_only", "no") update_policy_list = params.get("update_policy_list").split() expect_value = [None, startup_policy] try: if disk_type == "volume": pvt = libvirt.PoolVolumeTest(test, params) vol_name, vol_path = create_volume(pvt, vol_name) vol_path_new = vol_path + ".new" # Create disk xml. create_disk_xml() if vm.is_alive(): vm.destroy() try: # Backup disk xml file for policy update if update_policy=True. if update_policy: shutil.copyfile(disk_xml_file, disk_xml_policy_file) result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml_file, flagstr="--config", **virsh_dargs) # For iSCSI pool volume,startupPolicy attribute is not valid for it. # Moreover,setting disk 'requisite' is allowed only for cdrom or floppy. if pool_type == "iscsi" or all([device_type == "disk", startup_policy == "requisite"]): libvirt.check_exit_status(result, expect_error=True) return else: libvirt.check_exit_status(result, expect_error=False) except Exception as attach_device_exception: logging.debug("Attach device throws exception:%s", str(attach_device_exception)) os.remove(media_file) test.error("Attach %s fail" % device_type) # Check update policy operations. if disk_type == "file" and update_policy: vm.start() if policy_only: check_policy_update(startup_policy, update_policy_list, disk_xml_policy_file, device_type, attach_option) else: check_source_update(disk_xml_policy_file) elif disk_type == "file": # Step 1. Start domain and destroy it normally vm.start() vm.destroy() # Step 2. Remove the source_file then start the domain rename_file(media_file, media_file_new) result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(result, expect_error=start_error) # For libvirt version >=2.0.0, feature is updated and startup policy attribute # can not exist alone without source protocol. if not start_error and not libvirt_version.version_compare(2, 0, 0): check_disk_source(vm_name, target_dev, expect_value) # Step 3. Move back the source file and start the domain(if needed). rename_file(media_file, media_file_new, revert=True) if not vm.is_alive(): vm.start() # Step 4. Save the domain normally, then remove the source file # and restore it back vm.save_to_file(save_file) rename_file(media_file, media_file_new) result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=restore_error) if not restore_error and not libvirt_version.version_compare(2, 0, 0): check_disk_source(vm_name, target_dev, expect_value) # Step 5. Move back the source file and restore the domain(if needed) rename_file(media_file, media_file_new, revert=True) if not vm.is_alive(): result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=False) elif disk_type == "volume": # Step 1. Start domain and destroy it normally. vm.start() # Step 1 Start VM successfully. if not check_in_vm(old_parts): test.fail("Check disk partitions in VM failed") # Step 2 Move the volume to other place, refresh the pool, then reboot the guest. rename_file(vol_path, vol_path_new) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) vm.destroy() result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(result, expect_error=start_error) # Step 3 Move back the source file and start. rename_file(vol_path, vol_path_new, revert=True) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) if not vm.is_alive(): vm.start() # Step 4 Save the domain normally, then remove the source file,then restore domain. vm.save_to_file(save_file) rename_file(vol_path, vol_path_new) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=restore_error) # Step 5, Create snapshot,move the source to other place,then revert snapshot. if device_type == "disk": rename_file(vol_path, vol_path_new, revert=True) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) if restore_error: result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result) ret = virsh.snapshot_create_as(vm_name, snapshot_name, **virsh_dargs) libvirt.check_exit_status(ret) rename_file(vol_path, vol_path_new) ret = virsh.snapshot_revert(vm_name, snapshot_name, **virsh_dargs) # Clean up snapshot. libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() if disk_type == "volume": pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **virsh_dargs) if os.path.exists(save_file): os.remove(save_file) if os.path.exists(disk_xml_file): os.remove(disk_xml_file) if os.path.exists(media_file): os.remove(media_file) if os.path.exists(disk_xml_policy_file): os.remove(disk_xml_policy_file)
def manipulate_domain(vm_name, vm_operation, recover=False): """ Operate domain to given state or recover it. """ tmpdir = os.path.join(data_dir.get_root_dir(), 'tmp') save_file = os.path.join(tmpdir, vm_name + ".save") if not recover: if vm_operation == "save": save_option = "" result = virsh.save(vm_name, save_file, save_option, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "managedsave": managedsave_option = "" result = virsh.managedsave(vm_name, managedsave_option, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s3": suspend_target = "mem" result = virsh.dompmsuspend(vm_name, suspend_target, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s4": suspend_target = "disk" result = virsh.dompmsuspend(vm_name, suspend_target, ignore_status=True, debug=True) libvirt.check_exit_status(result) # Wait domain state change: 'in shutdown' -> 'shut off' utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5) else: logging.debug("No operation for the domain") else: if vm_operation == "save": if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) os.remove(save_file) else: raise error.TestError("No save file for domain restore") elif vm_operation in ["managedsave", "s4"]: result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif vm_operation == "s3": suspend_target = "mem" result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) else: logging.debug("No need recover the domain")
def run(test, params, env): """ Test command: virsh net-dhcp-leases 1. Create a new network and run virsh command to check dhcp leases info. 2. Attach an interface before or after start the domain, then check the dhcp leases info. 3. Clean the environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) net_name = params.get("net_name", "default") net_option = params.get("net_option", "") status_error = "yes" == params.get("status_error", "no") prepare_net = "yes" == params.get("prepare_net", "yes") hotplug_iface = "yes" == params.get("hotplug_interface", "no") filter_by_mac = "yes" == params.get("filter_by_mac", "no") invalid_mac = "yes" == params.get("invalid_mac", "no") # Generate a random string as the MAC address nic_mac = None if invalid_mac: nic_mac = utils_misc.generate_random_string(17) # Command won't fail on old libvirt if not libvirt_version.version_compare(1, 3, 1) and invalid_mac: logging.debug("Reset case to positive as BZ#1261432") status_error = False def create_network(): """ Create a network """ net_ip_addr = params.get("net_ip_addr", "192.168.200.1") net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0") net_dhcp_start = params.get("net_dhcp_start", "192.168.200.2") net_dhcp_end = params.get("net_dhcp_end", "192.168.200.254") netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'mode': "nat"} ipxml = network_xml.IPXML() ipxml.address = net_ip_addr ipxml.netmask = net_ip_netmask ipxml.dhcp_ranges = {'start': net_dhcp_start, "end": net_dhcp_end} netxml.set_ip(ipxml) netxml.create() def get_net_dhcp_leases(output): """ Return the dhcp lease info in a list """ leases = [] lines = output.splitlines() if not lines: return leases try: pat = r"\S+\ ?\S+\ ?\S+\ ?\S+|\S+" keys = re.findall(pat, lines[0]) for line in lines[2:]: values = re.findall(pat, line) leases.append(dict(list(zip(keys, values)))) return leases except Exception: test.error("Fail to parse output: %s" % output) def get_ip_by_mac(mac_addr, try_dhclint=False, timeout=120): """ Get interface IP address by given MAC addrss. If try_dhclint is True, then try to allocate IP addrss for the interface. """ session = vm.wait_for_login(login_nic_index, timeout=timeout, serial=True) def f(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = utils_misc.wait_for(f, 10) if ip_addr is None: iface_name = utils_net.get_linux_ifname(session, mac_addr) if try_dhclint: session.cmd("dhclient %s" % iface_name) ip_addr = utils_misc.wait_for(f, 10) else: # No IP for the interface, just print the interface name logging.warn( "Find '%s' with MAC address '%s', " "but which has no IP address", iface_name, mac_addr) finally: session.close() return ip_addr def check_net_lease(net_leases, expected_find=True): """ Check the dhcp lease info. """ if not net_leases: if expected_find: test.fail("Lease info is empty") else: logging.debug("No dhcp lease info find as expected") else: if not expected_find: test.fail("Find unexpected dhcp lease info: %s" % net_leases) find_mac = False for net_lease in net_leases: net_mac = net_lease['MAC address'] net_ip = net_lease['IP address'][:-3] if vm_xml.VMXML.get_iface_by_mac(vm_name, net_mac): find_mac = True logging.debug("Find '%s' in domain XML", net_mac) else: logging.debug("Not find '%s' in domain XML", net_mac) continue iface_ip = get_ip_by_mac(net_mac) if iface_ip and iface_ip != net_ip: test.fail("Address '%s' is not expected" % iface_ip) if expected_find and not find_mac: test.fail("No matched MAC address") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if vm.is_alive(): vm.destroy(gracefully=False) login_nic_index = 0 new_nic_index = 0 # Cleanup dirty dnsmaq, firstly get all network,and destroy all networks except # default net_state = virsh.net_state_dict(only_names=True) logging.debug( "current networks: %s, destroy and undefine networks " "except default!", net_state) for net in net_state: if net != "default": virsh.net_destroy(net) virsh.net_undefine(net) cmd = "ps aux|grep dnsmasq|grep -v grep | grep -v default | awk '{print $2}'" pid_list = results_stdout_52lts(process.run( cmd, shell=True)).strip().split('\n') for pid in pid_list: utils_misc.safe_kill(pid, signal.SIGKILL) # Create new network if prepare_net: create_network() nets = virsh.net_state_dict() if net_name not in list(nets.keys()) and not status_error: test.error("Not find network '%s'" % net_name) expected_find = False try: result = virsh.net_dhcp_leases(net_name, mac=nic_mac, options=net_option, debug=True, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) if not status_error: iface_mac = utils_net.generate_mac_address_simple() if filter_by_mac: nic_mac = iface_mac op = "--type network --source %s --mac %s" % (net_name, iface_mac) nic_params = { 'mac': iface_mac, 'nettype': 'bridge', 'ip_version': 'ipv4' } login_timeout = 120 if not hotplug_iface: op += " --config" virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) vm.add_nic(**nic_params) vm.start() new_nic_index = vm.get_nic_index_by_mac(iface_mac) if new_nic_index > 0: login_nic_index = new_nic_index else: vm.start() # wait for VM start before hotplug interface vm.wait_for_serial_login() virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) vm.add_nic(**nic_params) # As VM already started, so the login timeout could be shortened login_timeout = 10 new_interface_ip = get_ip_by_mac(iface_mac, try_dhclint=True, timeout=login_timeout) # Allocate IP address for the new interface may fail, so only # check the result if get new IP address if new_interface_ip: expected_find = True result = virsh.net_dhcp_leases(net_name, mac=nic_mac, debug=False, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) finally: # Delete the new attached interface if new_nic_index > 0: vm.del_nic(new_nic_index) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() if prepare_net: virsh.net_destroy(net_name)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Global variable to store max/current memory, # it may change after attach/detach new_max_mem = None new_cur_mem = None def get_vm_memtotal(session): """ Get guest total memory """ proc_meminfo = session.cmd_output("cat /proc/meminfo") # verify format and units are expected return int(re.search(r'MemTotal:\s+(\d+)\s+[kK]B', proc_meminfo).group(1)) def consume_vm_mem(size=1000, timeout=360): """ To consume guest memory, default size is 1000M """ session = vm.wait_for_login() # Mount tmpfs on /mnt and write to a file on it, # it is the memory operation sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs " "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M" " count={0}".format(size)) session.cmd(sh_cmd, timeout=timeout) session.close() def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if max_mem_rt: cmd += (" | grep 'slots=%s,maxmem=%sk'" % (max_mem_slots, max_mem_rt)) if tg_size: size = int(tg_size) * 1024 cmd += (" | grep 'memory-backend-ram,id=memdimm0,size=%s" % size) if pg_size: cmd += ",host-nodes=%s" % node_mask if numa_memnode: for node in numa_memnode: if ('nodeset' in node and node['nodeset'] in node_mask): cmd += ",policy=%s" % node['mode'] cmd += ".*pc-dimm,node=%s" % tg_node if mem_addr: cmd += (".*slot=%s,addr=%s" % (mem_addr['slot'], int(mem_addr['base'], 16))) cmd += "'" # Run the command utils.run(cmd) def check_guest_meminfo(old_mem): """ Check meminfo on guest. """ assert old_mem is not None session = vm.wait_for_login() # Hot-plugged memory should be online by udev rules udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules" udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",' ' ATTR{state}=="offline", ATTR{state}="online"') cmd = ("grep memory %s || echo '%s' >> %s" % (udev_file, udev_rules, udev_file)) session.cmd(cmd) # Wait a while for new memory to be detected. utils_misc.wait_for( lambda: get_vm_memtotal(session) != int(old_mem), 5) new_mem = get_vm_memtotal(session) session.close() logging.debug("Memtotal on guest: %s", new_mem) if new_mem != int(old_mem) + int(tg_size): raise error.TestFail("Total memory on guest couldn't" " changed after attach memory " "device") def check_dom_xml(at_mem=False, dt_mem=False): """ Check domain xml options. """ # Global variable to store max/current memory global new_max_mem global new_cur_mem if attach_option.count("config"): dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) else: dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) try: xml_max_mem_rt = int(dom_xml.max_mem_rt) xml_max_mem = int(dom_xml.max_mem) xml_cur_mem = int(dom_xml.current_mem) assert int(max_mem_rt) == xml_max_mem_rt # Check attached/detached memory if at_mem: assert int(max_mem) + int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory assert int(cur_mem) + int(tg_size) == xml_cur_mem new_max_mem = xml_max_mem new_cur_mem = xml_cur_mem mem_dev = dom_xml.get_devices("memory") if len(mem_dev) != 1: raise error.TestFail("Found wrong number of" " memory device") assert int(tg_size) == int(mem_dev[0].target.size) assert int(tg_node) == int(mem_dev[0].target.node) elif dt_mem: assert int(new_max_mem) - int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory assert int(new_cur_mem) - int(tg_size) == xml_cur_mem except AssertionError: utils.log_last_traceback() raise error.TestFail("Found unmatched memory setting" " from domain xml") def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(test.tmpdir, "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def create_mem_xml(): """ Create memory device xml. """ mem_xml = memory.Memory() mem_model = params.get("mem_model", "dimm") mem_xml.mem_model = mem_model if tg_size: tg_xml = memory.Memory.Target() tg_xml.size = int(tg_size) tg_xml.size_unit = tg_sizeunit tg_xml.node = int(tg_node) mem_xml.target = tg_xml if pg_size: src_xml = memory.Memory.Source() src_xml.pagesize = int(pg_size) src_xml.pagesize_unit = pg_unit src_xml.nodemask = node_mask mem_xml.source = src_xml if mem_addr: mem_xml.address = mem_xml.new_mem_address( **{"attrs": mem_addr}) logging.debug("Memory device xml: %s", mem_xml) return mem_xml.copy() def add_device(dev_xml, at_error=False): """ Add memory device by attachment or modify domain xml. """ if attach_device: ret = virsh.attach_device(vm_name, dev_xml.xml, flagstr=attach_option) libvirt.check_exit_status(ret, at_error) else: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if numa_cells: del vmxml.max_mem del vmxml.current_mem vmxml.add_device(dev_xml) vmxml.sync() def modify_domain_xml(): """ Modify domain xml and define it. """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) mem_unit = params.get("mem_unit", "KiB") vcpu = params.get("vcpu", "4") if max_mem_rt: vmxml.max_mem_rt = int(max_mem_rt) vmxml.max_mem_rt_slots = max_mem_slots vmxml.max_mem_rt_unit = mem_unit if vcpu: vmxml.vcpu = int(vcpu) vcpu_placement = params.get("vcpu_placement", "static") vmxml.placement = vcpu_placement if numa_memnode: vmxml.numa_memory = {} vmxml.numa_memnode = numa_memnode else: try: del vmxml.numa_memory del vmxml.numa_memnode except: # Not exists pass if numa_cells: cells = [ast.literal_eval(x) for x in numa_cells] cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu><numa/></cpu>" cpu_mode = params.get("cpu_mode") model_fallback = params.get("model_fallback") if cpu_mode: cpu_xml.mode = cpu_mode if model_fallback: cpu_xml.fallback = model_fallback cpu_xml.numa_cell = cells vmxml.cpu = cpu_xml # Delete memory and currentMemory tag, # libvirt will fill it automatically del vmxml.max_mem del vmxml.current_mem # hugepages setting if huge_pages: membacking = vm_xml.VMMemBackingXML() hugepages = vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(huge_pages)): pagexml = hugepages.PageXML() pagexml.update(huge_pages[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug("vm xml: %s", vmxml) vmxml.sync() pre_vm_state = params.get("pre_vm_state", "running") attach_device = "yes" == params.get("attach_device", "no") detach_device = "yes" == params.get("detach_device", "no") attach_error = "yes" == params.get("attach_error", "no") start_error = "yes" == params.get("start_error", "no") detach_error = "yes" == params.get("detach_error", "no") maxmem_error = "yes" == params.get("maxmem_error", "no") attach_option = params.get("attach_option", "") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_managedsave = "yes" == params.get("test_managedsave", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_mem_binding = "yes" == params.get("test_mem_binding", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") add_mem_device = "yes" == params.get("add_mem_device", "no") test_dom_xml = "yes" == params.get("test_dom_xml", "no") max_mem = params.get("max_mem") max_mem_rt = params.get("max_mem_rt") max_mem_slots = params.get("max_mem_slots", "16") cur_mem = params.get("current_mem") numa_cells = params.get("numa_cells", "").split() set_max_mem = params.get("set_max_mem") # params for attached device tg_size = params.get("tg_size") tg_sizeunit = params.get("tg_sizeunit", 'KiB') tg_node = params.get("tg_node", 0) pg_size = params.get("page_size") pg_unit = params.get("page_unit", "KiB") node_mask = params.get("node_mask", "0") mem_addr = ast.literal_eval(params.get("memory_addr", "{}")) huge_pages = [ast.literal_eval(x) for x in params.get("huge_pages", "").split()] numa_memnode = [ast.literal_eval(x) for x in params.get("numa_memnode", "").split()] # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Drop caches first for host has enough memory drop_caches() # Destroy domain first if vm.is_alive(): vm.destroy(gracefully=False) modify_domain_xml() # Start the domain any way if attach memory device old_mem_total = None if attach_device: vm.start() session = vm.wait_for_login() old_mem_total = get_vm_memtotal(session) logging.debug("Memtotal on guest: %s", old_mem_total) session.close() dev_xml = None # To attach the memory device. if add_mem_device: at_times = int(params.get("attach_times", 1)) dev_xml = create_mem_xml() for x in xrange(at_times): # If any error excepted, command error status should be # checked in the last time if x == at_times - 1: add_device(dev_xml, attach_error) else: add_device(dev_xml) # Check domain xml after attach device. if test_dom_xml: check_dom_xml(at_mem=attach_device) # Set domain state if pre_vm_state == "transient": logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() raise error.TestFail("Cann't create the domain") elif vm.is_dead(): try: vm.start() vm.wait_for_login().close() except virt_vm.VMStartError: if start_error: pass else: raise error.TestFail("VM Failed to start" " for some reason!") # Set memory operation if set_max_mem: max_mem_option = params.get("max_mem_option", "") ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option) libvirt.check_exit_status(ret, maxmem_error) # Check domain xml after start the domain. if test_dom_xml: check_dom_xml(at_mem=attach_device) # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check guest meminfo after attachment if (attach_device and not attach_option.count("config") and not any([attach_error, start_error])): check_guest_meminfo(old_mem_total) # Consuming memory on guest, # to verify memory changes by numastat if test_mem_binding: pid = vm.get_pid() old_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", old_numastat) consume_vm_mem() new_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", new_numastat) # Only check total memory which is the last element if float(new_numastat[-1]) - float(old_numastat[-1]) < 0: raise error.TestFail("Numa memory can't be consumed" " on guest") # Run managedsave command to check domain xml. if test_managedsave: ret = virsh.managedsave(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) vm.start() vm.wait_for_login().close() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Run save and restore command to check domain xml if test_save_restore: check_save_restore() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Check domain xml after restarting libvirtd if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Detach the memory device if detach_device: if not dev_xml: dev_xml = create_mem_xml() ret = virsh.detach_device(vm_name, dev_xml.xml, flagstr=attach_option) libvirt.check_exit_status(ret, detach_error) if test_dom_xml: check_dom_xml(dt_mem=detach_device) finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync()
def run(test, params, env): """ Test virtiofs filesystem device: 1.Start guest with 1/2 virtiofs filesystem devices. 2.Start 2 guest with the same virtiofs filesystem device. 3.Coldplug/Coldunplug virtiofs filesystem device 4.Share data between guests and host. 5.Lifecycle for guest with virtiofs filesystem device. """ def generate_expected_process_option(expected_results): """ Generate expected virtiofsd process option """ if cache_mode != "auto": expected_results = "cache=%s" % cache_mode if xattr == "on": expected_results += ",xattr" elif xattr == "off": expected_results += ",no_xattr" if flock == "on": expected_results += ",flock" else: expected_results += ",no_flock" if lock_posix == "on": expected_results += ",posix_lock" else: expected_results += ",no_posix_lock" logging.debug(expected_results) return expected_results def shared_data(vm_names, fs_devs): """ Shared data between guests and host: 1.Mount dir in guest; 2.Write a file in guest; 3.Check the md5sum value are the same in guests and host; """ md5s = [] for vm in vms: session = vm.wait_for_login() for fs_dev in fs_devs: logging.debug(fs_dev) mount_dir = '/var/tmp/' + fs_dev.target['dir'] session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=False) session.cmd('mkdir -p %s' % mount_dir) logging.debug("mount virtiofs dir in guest") cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'], mount_dir) status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("mount virtiofs dir failed: %s" % output) if vm == vms[0]: filename_guest = mount_dir + '/' + vm.name cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename_guest status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("Write data failed: %s" % output) md5_value = session.cmd_status_output( "md5sum %s" % filename_guest)[1].strip().split()[0] md5s.append(md5_value) logging.debug(md5_value) md5_value = process.run( "md5sum %s" % filename_guest).stdout_text.strip().split()[0] logging.debug(md5_value) md5s.append(md5_value) session.close() if len(set(md5s)) != len(fs_devs): test.fail("The md5sum value are not the same in guests and host") def launch_externally_virtiofs(source_dir, source_socket): """ Launch externally virtiofs :param source_dir: the dir shared on host :param source_socket: the socket file listened on """ process.run('chcon -t virtd_exec_t %s' % path, ignore_status=False, shell=True) cmd = "systemd-run %s --socket-path=%s -o source=%s" % ( path, source_socket, source_dir) try: process.run(cmd, ignore_status=False, shell=True) # Make sure the socket is created utils_misc.wait_for(lambda: os.path.isdir(source_socket), timeout=3) process.run("chown qemu:qemu %s" % source_socket, ignore_status=False) process.run('chcon -t svirt_image_t %s' % source_socket, ignore_status=False, shell=True) except Exception as err: cmd = "pkill virtiofsd" process.run(cmd, shell=True) test.fail("{}".format(err)) def prepare_stress_script(script_path, script_content): """ Refer to xfstest generic/531. Create stress test script to create a lot of unlinked files. :param source_path: The path of script :param content: The content of stress script """ logging.debug("stress script path: %s content: %s" % (script_path, script_content)) script_lines = script_content.split(';') try: with open(script_path, 'w') as fd: fd.write('\n'.join(script_lines)) os.chmod(script_path, 0o777) except Exception as e: test.error("Prepare the guest stress script failed %s" % e) def run_stress_script(session, script_path): """ Run stress script in the guest :param session: guest session :param script_path: The path of script in the guest """ # Set ULIMIT_NOFILE to increase the number of unlinked files session.cmd("ulimit -n 500000 && /usr/bin/python3 %s" % script_path, timeout=120) def umount_fs(vm): """ Unmount the filesystem in guest :param vm: filesystem in this vm that should be unmounted """ if vm.is_alive(): session = vm.wait_for_login() for fs_dev in fs_devs: mount_dir = '/var/tmp/' + fs_dev.target['dir'] session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True) session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True) session.close() def check_detached_xml(vm): """ Check whether there is xml about the filesystem device in the vm xml :param vm: the vm to be checked """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) filesystems = vmxml.devices.by_device_tag('filesystem') if filesystems: test.fail("There should be no filesystem devices in guest " "xml after hotunplug") def check_filesystem_in_guest(vm, fs_dev): """ Check whether there is virtiofs in vm :param vm: the vm to be checked :param fs_dev: the virtiofs device to be checked """ session = vm.wait_for_login() mount_dir = '/var/tmp/' + fs_dev.target['dir'] cmd = "mkdir %s; mount -t virtiofs %s %s" % ( mount_dir, fs_dev.target['dir'], mount_dir) status, output = session.cmd_status_output(cmd, timeout=300) session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True) if not status: test.fail( "Mount virtiofs should failed after hotunplug device. %s" % output) session.close() start_vm = params.get("start_vm", "no") vm_names = params.get("vms", "avocado-vt-vm1").split() cache_mode = params.get("cache_mode", "none") xattr = params.get("xattr", "on") lock_posix = params.get("lock_posix", "on") flock = params.get("flock", "on") xattr = params.get("xattr", "on") path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd") queue_size = int(params.get("queue_size", "512")) driver_type = params.get("driver_type", "virtiofs") guest_num = int(params.get("guest_num", "1")) fs_num = int(params.get("fs_num", "1")) vcpus_per_cell = int(params.get("vcpus_per_cell", 2)) dir_prefix = params.get("dir_prefix", "mount_tag") error_msg_start = params.get("error_msg_start", "") error_msg_save = params.get("error_msg_save", "") status_error = params.get("status_error", "no") == "yes" socket_file_checking = params.get("socket_file_checking", "no") == "yes" suspend_resume = params.get("suspend_resume", "no") == "yes" managedsave = params.get("managedsave", "no") == "yes" coldplug = params.get("coldplug", "no") == "yes" hotplug_unplug = params.get("hotplug_unplug", "no") == "yes" detach_device_alias = params.get("detach_device_alias", "no") == "yes" extra_hugepages = params.get_numeric("extra_hugepages") edit_start = params.get("edit_start", "no") == "yes" with_hugepages = params.get("with_hugepages", "yes") == "yes" with_numa = params.get("with_numa", "yes") == "yes" with_memfd = params.get("with_memfd", "no") == "yes" source_socket = params.get("source_socket", "/var/tmp/vm001.socket") launched_mode = params.get("launched_mode", "auto") destroy_start = params.get("destroy_start", "no") == "yes" bug_url = params.get("bug_url", "") script_content = params.get("stress_script", "") fs_devs = [] vms = [] vmxml_backups = [] expected_fails_msg = [] expected_results = "" host_hp_size = utils_memory.get_huge_page_size() backup_huge_pages_num = utils_memory.get_num_huge_pages() huge_pages_num = 0 if len(vm_names) != guest_num: test.cancel("This test needs exactly %d vms." % guest_num) if not libvirt_version.version_compare(7, 0, 0) and not with_numa: test.cancel("Not supported without NUMA before 7.0.0") if not libvirt_version.version_compare(7, 6, 0) and destroy_start: test.cancel("Bug %s is not fixed on current build" % bug_url) try: # Define filesystem device xml for index in range(fs_num): driver = {'type': driver_type, 'queue': queue_size} source_dir = os.path.join('/var/tmp/', str(dir_prefix) + str(index)) logging.debug(source_dir) not os.path.isdir(source_dir) and os.mkdir(source_dir) target_dir = dir_prefix + str(index) source = {'socket': source_socket} target = {'dir': target_dir} if launched_mode == "auto": binary_keys = [ 'path', 'cache_mode', 'xattr', 'lock_posix', 'flock' ] binary_values = [path, cache_mode, xattr, lock_posix, flock] binary_dict = dict(zip(binary_keys, binary_values)) source = {'dir': source_dir} accessmode = "passthrough" fsdev_keys = [ 'accessmode', 'driver', 'source', 'target', 'binary' ] fsdev_values = [ accessmode, driver, source, target, binary_dict ] else: fsdev_keys = ['driver', 'source', 'target'] fsdev_values = [driver, source, target] fsdev_dict = dict(zip(fsdev_keys, fsdev_values)) logging.debug(fsdev_dict) fs_dev = libvirt_device_utils.create_fs_xml( fsdev_dict, launched_mode) logging.debug(fs_dev) fs_devs.append(fs_dev) #Start guest with virtiofs filesystem device for index in range(guest_num): logging.debug("prepare vm %s", vm_names[index]) vm = env.get_vm(vm_names[index]) vms.append(vm) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) vmxml_backup = vmxml.copy() vmxml_backups.append(vmxml_backup) if vmxml.max_mem < 1024000: vmxml.max_mem = 1024000 if with_hugepages: huge_pages_num += vmxml.max_mem // host_hp_size + extra_hugepages utils_memory.set_num_huge_pages(huge_pages_num) vmxml.remove_all_device_by_type('filesystem') vmxml.sync() numa_no = None if with_numa: numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1 vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name, vmxml.vcpu, numa_number=numa_no) vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name, access_mode="shared", hpgs=with_hugepages, memfd=with_memfd) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) logging.debug(vmxml) if launched_mode == "externally": launch_externally_virtiofs(source_dir, source_socket) if coldplug: ret = virsh.attach_device(vm_names[index], fs_devs[0].xml, flagstr='--config', debug=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) else: if not hotplug_unplug: for fs in fs_devs: vmxml.add_device(fs) vmxml.sync() logging.debug(vmxml) libvirt_pcicontr.reset_pci_num(vm_names[index]) result = virsh.start(vm_names[index], debug=True) if hotplug_unplug: for fs_dev in fs_devs: ret = virsh.attach_device(vm_names[index], fs_dev.xml, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error: return if status_error and not managedsave: expected_error = error_msg_start utils_test.libvirt.check_exit_status(result, expected_error) return else: utils_test.libvirt.check_exit_status(result, expect_error=False) expected_results = generate_expected_process_option( expected_results) if launched_mode == "auto": cmd = 'ps aux | grep virtiofsd | head -n 1' utils_test.libvirt.check_cmd_output(cmd, content=expected_results) if managedsave: expected_error = error_msg_save result = virsh.managedsave(vm_names[0], ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: shared_data(vm_names, fs_devs) if suspend_resume: virsh.suspend(vm_names[0], debug=True, ignore_status=False) time.sleep(30) virsh.resume(vm_names[0], debug=True, ignore_statue=False) elif destroy_start: session = vm.wait_for_login(timeout=120) # Prepare the guest test script script_path = os.path.join(fs_devs[0].source["dir"], "test.py") script_content %= (fs_devs[0].source["dir"], fs_devs[0].source["dir"]) prepare_stress_script(script_path, script_content) # Run guest stress script stress_script_thread = threading.Thread( target=run_stress_script, args=(session, script_path)) stress_script_thread.setDaemon(True) stress_script_thread.start() # Create a lot of unlink files time.sleep(60) virsh.destroy(vm_names[0], debug=True, ignore_status=False) ret = virsh.start(vm_names[0], debug=True) libvirt.check_exit_status(ret) elif edit_start: vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml( vm_names[0]) if vm.is_alive(): virsh.destroy(vm_names[0]) cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[ 0] cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug(virsh.dumpxml(vm_names[0])) if cmd_result.exit_status: test.error("virt-xml edit guest failed: %s" % cmd_result) result = virsh.start(vm_names[0], ignore_status=True, debug=True) if error_msg_start: expected_fails_msg.append(error_msg_start) utils_test.libvirt.check_result( result, expected_fails=expected_fails_msg) if not libvirt_version.version_compare(6, 10, 0): # Because of bug #1897105, it was fixed in libvirt-6.10.0, # before this version, need to recover the env manually. cmd = "pkill virtiofsd" process.run(cmd, shell=True) if not vm.is_alive(): # Restoring vm and check if vm can start successfully vmxml_virtio_backup.sync() virsh.start(vm_names[0], ignore_status=False, shell=True) elif socket_file_checking: result = virsh.domid(vm_names[0]) domid = result.stdout.strip() domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[ 0] if result.exit_status: test.fail("Get domid failed.") for fs_dev in fs_devs: alias = fs_dev.alias['name'] expected_pid = domain_dir + alias + '-fs.pid' expected_sock = alias + '-fs.sock' status1 = process.run('ls -l %s' % expected_pid, shell=True).exit_status status2 = process.run('ls -l %s' % expected_sock, shell=True).exit_status if not (status1 and status2): test.fail( "The socket and pid file is not as expected") elif hotplug_unplug: for vm in vms: umount_fs(vm) for fs_dev in fs_devs: if detach_device_alias: alias = fs_dev.alias['name'] ret = virsh.detach_device_alias( vm.name, alias, ignore_status=True, debug=True, wait_for_event=True) else: ret = virsh.detach_device(vm.name, fs_dev.xml, ignore_status=True, debug=True, wait_for_event=True) libvirt.check_exit_status(ret, status_error) check_filesystem_in_guest(vm, fs_dev) check_detached_xml(vm) finally: for vm in vms: if vm.is_alive(): umount_fs(vm) vm.destroy(gracefully=False) for vmxml_backup in vmxml_backups: vmxml_backup.sync() for index in range(fs_num): process.run('rm -rf %s' % '/var/tmp/' + str(dir_prefix) + str(index), ignore_status=False) process.run('rm -rf %s' % source_socket, ignore_status=False, shell=True) if launched_mode == "externally": process.run('restorecon %s' % path, ignore_status=False, shell=True) utils_memory.set_num_huge_pages(backup_huge_pages_num)
def run(test, params, env): """ Test pure checkpoint commands """ def prepare_checkpoints(disk="vdb", num=1, cp_prefix="test_checkpoint_"): """ Create checkpoints for specific disk :param disk: The disk to create checkpoint. :param num: How many checkpoints to be created :param cp_prefix: The prefix to name the checkpoint. """ option_pattern = ("{0} --diskspec vda,checkpoint=no " "--diskspec {1},checkpoint=bitmap,bitmap={0}") for i in range(num): # create checkpoints checkpoint_name = cp_prefix + str(i) options = option_pattern.format(checkpoint_name, disk) virsh.checkpoint_create_as(vm_name, options, **virsh_dargs) current_checkpoints.append(checkpoint_name) # Cancel the test if libvirt version is too low if not libvirt_version.version_compare(6, 0, 0): test.cancel("Current libvirt version doesn't support " "incremental backup.") checkpoint_cmd = params.get("checkpoint_cmd") cmd_flag = params.get("flag") required_checkpoints = int(params.get("required_checkpoints", 0)) test_disk_size = params.get("test_disk_size", "100M") test_disk_target = params.get("test_disk_target", "vdb") status_error = "yes" == params.get("status_error") tmp_dir = data_dir.get_tmp_dir() current_checkpoints = [] virsh_dargs = {'debug': True, 'ignore_status': False} try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() # Enable vm incremental backup capability. This is only a workaround # to make sure incremental backup can work for the vm. Code needs to # be removded immediately when the function enabled by default, which # is tracked by bz1799015 tree = ET.parse(vmxml.xml) root = tree.getroot() for elem in root.iter('domain'): elem.set('xmlns:qemu', 'http://libvirt.org/schemas/domain/qemu/1.0') qemu_cap = ET.Element("qemu:capabilities") elem.insert(-1, qemu_cap) incbackup_cap = ET.Element("qemu:add") incbackup_cap.set('capability', 'incremental-backup') qemu_cap.insert(1, incbackup_cap) vmxml.undefine() tmp_vm_xml = os.path.join(tmp_dir, "tmp_vm.xml") tree.write(tmp_vm_xml) virsh.define(tmp_vm_xml) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("Script insert xml elements to make sure vm can support " "incremental backup. This should be removded when " "bz 1799015 fixed.") if vm.is_alive(): vm.destroy(gracefully=False) # Prepare the disk to be used. disk_params = {} disk_path = "" image_name = "{}_image.qcow2".format(test_disk_target) disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, test_disk_size, "qcow2") disk_params = {"device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": test_disk_target, "source_file": disk_path} disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", **virsh_dargs) vm.start() session = vm.wait_for_login().close() if required_checkpoints > 0: prepare_checkpoints(test_disk_target, required_checkpoints) if checkpoint_cmd == "checkpoint-create": if not current_checkpoints: test.fail("No existing checkpoints prepared.") if "--redefine" in cmd_flag: no_domain = "yes" == params.get("no_domain") cp_dumpxml_options = "" if no_domain: cp_dumpxml_options = "--no-domain" checkpoint_redef = current_checkpoints[0] cp_xml = checkpoint_xml.CheckpointXML.new_from_checkpoint_dumpxml( vm_name, checkpoint_redef, cp_dumpxml_options) logging.debug("Checkpoint XML to be redefined is: %s", cp_xml) xml_file = cp_xml.xml virsh.checkpoint_delete(vm_name, checkpoint_redef, "--metadata", **virsh_dargs) cmd_options = xml_file + " " + cmd_flag result = virsh.checkpoint_create(vm_name, cmd_options, debug=True) libvirt.check_exit_status(result, status_error) elif checkpoint_cmd == "checkpoint-create-as": if "--print-xml" in cmd_flag: checkpoint_name = "test_checkpoint_0" options = ("{0} --diskspec vda,checkpoint=no --diskspec {1}," "checkpoint=bitmap,bitmap={0} " "--print-xml".format(checkpoint_name, test_disk_target)) virsh.checkpoint_create_as(vm_name, options, **virsh_dargs) # The checkpiont should not be created, so we have following check cp_list_result = virsh.checkpoint_list(vm_name, checkpoint_name, debug=True) libvirt.check_exit_status(cp_list_result, True) elif checkpoint_cmd == "checkpoint-info": if len(current_checkpoints) != 3: test.fail("We should prepare 3 checkpoints.") parent_checkpoint = current_checkpoints[0] test_checkpoint = current_checkpoints[1] stdout = virsh.checkpoint_info(vm_name, test_checkpoint, **virsh_dargs).stdout_text.strip() if ( not re.search("domain.*%s" % vm_name, stdout, re.IGNORECASE) or not re.search("parent.*%s" % parent_checkpoint, stdout, re.IGNORECASE) or not re.search("children.*1", stdout, re.IGNORECASE) or not re.search("descendants.*1", stdout, re.IGNORECASE) ): test.fail("checkpoint-info return inaccurate informaion: %s" % stdout) elif checkpoint_cmd == "checkpoint-list": logic_error = False if not cmd_flag: stdout = virsh.checkpoint_list(vm_name, **virsh_dargs).stdout_text.strip() for checkpoint in current_checkpoints: if checkpoint not in stdout: logic_error = True elif cmd_flag == "--parent": stdout = virsh.checkpoint_list(vm_name, cmd_flag, **virsh_dargs).stdout_text.strip() for checkpoint in current_checkpoints: if checkpoint == current_checkpoints[-1]: if stdout.count(checkpoint) != 1: logic_error = True else: if stdout.count(checkpoint) != 2: logic_error = True elif cmd_flag == "--roots": stdout = virsh.checkpoint_list(vm_name, cmd_flag, **virsh_dargs).stdout_text.strip() for checkpoint in current_checkpoints: if checkpoint == current_checkpoints[0]: if stdout.count(checkpoint) != 1: logic_eror = True else: if stdout.count(checkpoint) != 0: logic_error = True elif cmd_flag == "--tree": stdout = virsh.checkpoint_list(vm_name, cmd_flag, **virsh_dargs).stdout_text.strip() lines = stdout.splitlines() prev_indent_num = -1 for line in lines: for checkpoint in current_checkpoints: if checkpoint in line: cur_indent_num = line.rstrip().count(" ") if cur_indent_num <= prev_indent_num: logic_error = True break prev_indent_num = cur_indent_num elif cmd_flag == "--name": stdout = virsh.checkpoint_list(vm_name, cmd_flag, **virsh_dargs).stdout_text.strip() checkpoint_names = stdout.splitlines() if not operator.eq(checkpoint_names, current_checkpoints): logic_error = True elif cmd_flag == "--topological": stdout = virsh.checkpoint_list(vm_name, cmd_flag, **virsh_dargs).stdout_text.strip() for checkpoint in current_checkpoints: if checkpoint not in stdout: logical_error = True elif cmd_flag == "--from": cmd_options = cmd_flag + " " + current_checkpoints[0] stdout = virsh.checkpoint_list(vm_name, cmd_options, **virsh_dargs).stdout_text.strip() if (current_checkpoints[0] in stdout or current_checkpoints[2] in stdout or current_checkpoints[1] not in stdout): logic_error = True elif cmd_flag == "--descendants": cmd_options = cmd_flag + " " + current_checkpoints[0] stdout = virsh.checkpoint_list(vm_name, cmd_options, **virsh_dargs).stdout_text.strip() if (current_checkpoints[0] in stdout or current_checkpoints[1] not in stdout or current_checkpoints[2] not in stdout): logic_error = True elif cmd_flag == "--no-leaves": stdout = virsh.checkpoint_list(vm_name, cmd_flag, **virsh_dargs).stdout_text.strip() if (current_checkpoints[0] not in stdout or current_checkpoints[1] not in stdout or current_checkpoints[2] in stdout): logic_error = True elif cmd_flag == "--leaves": stdout = virsh.checkpoint_list(vm_name, cmd_flag, **virsh_dargs).stdout_text.strip() if (current_checkpoints[0] in stdout or current_checkpoints[1] in stdout or current_checkpoints[2] not in stdout): logic_error = True if logic_error: test.fail("checkpoint-list with '%s' gives wrong output" % cmd_flag) elif checkpoint_cmd == "checkpoint-dumpxml": if "--size" in cmd_flag: test.cancel("'--size' not supported yet(bz1814573)") elif "--security-info" in cmd_flag: if vm.is_alive(): vm.destroy(gracefully=False) password = "******" vm_xml.VMXML.set_graphics_attr(vm_name, {'passwd': password}) vm.start() vm.wait_for_login().close() prepare_checkpoints() test_checkpoint = current_checkpoints[0] stdout = virsh.checkpoint_dumpxml(vm_name, test_checkpoint, **virsh_dargs).stdout_text.strip() if password in stdout: logging.debug("checkpoint xml is: %s", stdout) test.fail("Security info displayed in unsecurity dumpxml.") stdout = virsh.checkpoint_dumpxml(vm_name, test_checkpoint + " --security-info", **virsh_dargs).stdout_text.strip() if password not in stdout: logging.debug("checkpoint xml is: %s", stdout) test.fail("Security info not displayed in security dumpxml.") elif checkpoint_cmd == "virsh_list": stdout = virsh.dom_list(cmd_flag, **virsh_dargs).stdout_text.strip() if ((vm_name in stdout and cmd_flag == "--without-checkpoint") or (vm_name not in stdout and cmd_flag == "--with-checkpoint")): test.fail("virsh list with '%s' contains wrong data" % cmd_flag) finally: # Remove checkpoints if "current_checkpoints" in locals() and current_checkpoints: for checkpoint_name in current_checkpoints: virsh.checkpoint_delete(vm_name, checkpoint_name, ignore_status=True, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync()
def run(test, params, env): """ Test disk encryption option. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare test image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Disk specific attributes. image_path = params.get("virt_disk_device_source", "/var/lib/libvirt/images/test.img") sock_path = params.get("source_file", "tmp/vhost.sock") device_target = params.get("target_dev", "vdb") if not libvirt_version.version_compare(7, 0, 0): test.cancel("Cannot support vhostuser disk feature in " "this libvirt version.") hotplug = "yes" == params.get("virt_disk_device_hotplug") status_error = "yes" == params.get("status_error") vsock_service_id = None vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Start VM and get all partitions in VM if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: create_backend_image_file(image_path) vsock_service_id = start_vhost_sock_service(image_path, sock_path) # Prepare the disk. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) mb_params = {'source_type': 'memfd', 'access_mode': 'shared'} vmxml.mb = libvirt_disk.create_mbxml(mb_params) vmxml.sync() vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("memory backing VM xml is:\n%s" % vmxml) disk_xml = create_vhostuser_disk(params) logging.debug("vhostuser disk xml is:\n%s" % disk_xml) if not hotplug: # Sync VM xml. vmxml.add_device(disk_xml) vmxml.sync() vm.start() vm.wait_for_login() if status_error: if hotplug: logging.debug("attaching disk, expecting error...") result = virsh.attach_device(vm_name, disk_xml.xml) libvirt.check_exit_status(result, status_error) else: test.fail("VM started unexpectedly.") else: if hotplug: virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) if not libvirt_disk.check_in_vm(vm, device_target, old_parts): test.fail("Check encryption disk in VM failed") virsh.detach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True, wait_for_event=True) if not libvirt_disk.check_in_vm(vm, device_target, old_parts, is_equal=True): test.fail("can not detach device successfully") else: if not libvirt_disk.check_in_vm(vm, device_target, old_parts): test.fail("Check encryption disk in VM failed") except virt_vm.VMStartError as e: if status_error: if hotplug: test.fail("In hotplug scenario, VM should " "start successfully but not." "Error: %s", str(e)) else: logging.debug("VM failed to start as expected." "Error: %s", str(e)) else: test.fail("VM failed to start." "Error: %s" % str(e)) except Exception as ex: test.fail("unexpected exception happen: %s" % str(ex)) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() # Kill all qemu-storage-daemon process on host process.run("pidof qemu-storage-daemon && killall qemu-storage-daemon", ignore_status=True, shell=True) if vsock_service_id: stop_vsock_service_cmd = "systemctl stop %s" % vsock_service_id process.run(stop_vsock_service_cmd, ignore_status=True, shell=True) # Clean up images for file_path in [image_path, sock_path]: if os.path.exists(file_path): os.remove(file_path)
def run(test, params, env): """ Test virsh nwfilter-binding-list 1)Prepare parameters 2)Run nwfilter_binding_list command 3)check result 4)Clean env """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) status_error = "yes" == params.get("status_error") new_filter_1 = params.get("newfilter_1") new_filter_2 = params.get("newfilter_2") time_wait = params.get("time_wait", 10) option = params.get("option") vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # prepare vm filterrfer parameters dict list filter_param_list_1 = [] params_key_1 = [] filter_param_list_2 = [] params_key_2 = [] for i in params.keys(): if 'parameters_name_' in i: params_key_1.append(i) params_key_1.sort() for i in range(len(params_key_1)): params_dict = {} params_dict['name'] = params[params_key_1[i]] params_dict['value'] = params['parameters_value_%s' % i] filter_param_list_1.append(params_dict) filterref_dict_1 = {} filterref_dict_1['name'] = new_filter_1 filterref_dict_1['parameters'] = filter_param_list_1 for i in params.keys(): if 'parameters_dhcp_' in i: params_key_2.append(i) params_key_2.sort() for i in range(len(params_key_2)): params_dict = {} params_dict['name'] = params[params_key_2[i]] params_dict['value'] = params['dhcp_value_%s' % i] filter_param_list_2.append(params_dict) filterref_dict_2 = {} filterref_dict_2['name'] = new_filter_2 filterref_dict_2['parameters'] = filter_param_list_2 utils_package.package_install('libvirt-daemon-config-nwfilter') def env_setting(filterref_dict_1, filterref_dict_2): ret = virsh.attach_interface(vm_name, option) utlv.check_exit_status(ret, status_error) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) devices = vmxml.get_devices('interface') iface_xml = devices[0] logging.debug("iface_xml : %s" % iface_xml) iface_xml_2 = devices[1] vmxml.del_device(iface_xml) vmxml.del_device(iface_xml_2) new_iface_1 = interface.Interface('network') logging.debug("new_iface_1 : %s" % new_iface_1) new_iface_2 = interface.Interface('network') new_iface_1.xml = iface_xml.xml new_iface_1.type_name = "network" logging.debug("new_iface_1 : %s" % new_iface_1) new_iface_2.xml = iface_xml_2.xml new_iface_1.source = {'network': "default"} new_filterref = new_iface_1.new_filterref(**filterref_dict_1) new_iface_1.filterref = new_filterref new_filterref = new_iface_2.new_filterref(**filterref_dict_2) new_iface_2.filterref = new_filterref logging.debug("new interface xml is: %s \n %s" % (new_iface_1, new_iface_2)) vmxml.add_device(new_iface_1) vmxml.add_device(new_iface_2) vmxml.sync() return new_iface_1, new_iface_2 def attach_new_device(): newnet_iface = interface.Interface('network') newnet_iface.source = {'network': "default"} newnet_iface.model = 'virtio' filterref_dict = {} filterref_list = [{'name': "CTRL_IP_LEARNING", 'value': "dhcp"}] filterref_dict['name'] = "clean-traffic" filterref_dict['parameters'] = filterref_list newnet_iface.filterref = newnet_iface.new_filterref(**filterref_dict) ret = virsh.attach_device(domainarg=vm_name, filearg=newnet_iface.xml, debug=True) utlv.check_exit_status(ret, status_error) try: # set env of start vm new_iface_1, new_iface_2 = env_setting(filterref_dict_1, filterref_dict_2) # start vm virsh.start(vm_name, debug=True) # list binding port dev logging.debug("check nwfilter binding for 2 interfaces") ret = virsh.nwfilter_binding_list(debug=True) utlv.check_result(ret, expected_match=[r"vnet\d+\s+clean-traffic"]) utlv.check_result(ret, expected_match=[r"vnet\d+\s+allow-dhcp-server"]) # detach a interface, before detach, make sure guest boot up vm.cleanup_serial_console() vm.create_serial_console() vm.wait_for_serial_login().close option = "--type network" + " --mac " + new_iface_1.mac_address ret = virsh.detach_interface(vm_name, option, debug=True) time.sleep(time_wait) utlv.check_exit_status(ret, status_error) logging.debug("check nwfilter binding after detach one interface:") time.sleep(3) ret = virsh.nwfilter_binding_list(debug=True) if re.search(r'vnet\d+\s+clean-traffic.*', ret.stdout): test.fail( "vnet binding clean-traffic still exists after detach the interface!" ) utlv.check_result(ret, expected_match=[r"vnet\d+\s+allow-dhcp-server"]) # update_device to delete the filter iface_dict = {'del_filter': True} new_xml = utlv.modify_vm_iface(vm_name, 'get_xml', iface_dict) virsh.update_device(domainarg=vm_name, filearg=new_xml, debug=True) logging.debug("check nwfilter-binding after delete the only interface") ret = virsh.nwfilter_binding_list(debug=True) if re.search(r'vnet\d+\s+allow-dhcp-server.*', ret.stdout): test.fail( "vnet binding allow-dhcp-server still exists after detach the interface!" ) utlv.check_exit_status(ret, status_error) # attach new interface attach_new_device() ret = virsh.nwfilter_binding_list(debug=True) logging.debug("Check nwfilter-binding exists after attach device") utlv.check_result(ret, expected_match=[r"vnet\d+\s+clean-traffic"]) finally: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync()
def run(test, params, env): """ Test libvirt support features in qemu cmdline. 1) Config test feature in VM XML; 2) Try to start VM; 3) Check corresponding feature flags in qemu cmdline; 4) Login VM to test feature if necessary. """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) expect_fail = "yes" == params.get("expect_start_vm_fail", "no") expect_define_vm_fail = 'yes' == params.get('expect_define_vm_fail', 'no') test_feature = params.get("test_feature") # All test case Function start with 'test_feature' prefix testcase = globals()['config_feature_%s' % test_feature] test_feature_attr = params.get("test_feature_attr", '').split(",") test_feature_valu = params.get("test_feature_valu", '').split(",") # Parameters for test case if len(test_feature_attr) != len(test_feature_valu): test.error("Attribute number not match with value number") test_dargs = dict(list(zip(test_feature_attr, test_feature_valu))) if expect_define_vm_fail: test_dargs.update({ 'expect_define_vm_fail': expect_define_vm_fail, 'expected_msg': params.get('expected_msg', '') }) if vm.is_alive(): vm.destroy() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() virsh_dargs = {'debug': True, 'ignore_status': False} if 'ppc64le' in platform.machine().lower() and test_feature == 'pv_eoi': if not libvirt_version.version_compare(6, 0, 0): test.cancel('Feature %s is supported since version 6.0.0' % test_feature) try: # Run test case qemu_flags = testcase(test, vmxml, **test_dargs) if not qemu_flags and expect_define_vm_fail: return result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(result, expect_fail) # Check qemu flag vm_pid = vm.get_pid() with open("/proc/%s/cmdline" % vm_pid) as cmdline_f: cmdline_content = cmdline_f.read() logging.debug("VM cmdline:\n%s", cmdline_content.replace('\x00', ' ')) msg = "Find '%s' in qemu cmdline? %s" found_flags = [] index = 0 for flag in qemu_flags: # Here, flag could be a list, so uniform it to list for next # step check. And, check can pass if any element in the list # exist in cmdline if not isinstance(flag, list): flag = [flag] found_f = [] for f in flag: if f in cmdline_content: found_f.append(True) break else: found_f.append(False) found_flags.append(any(found_f)) logging.info(msg % (flag, found_flags[index])) index += 1 if False in found_flags: test.fail("Not find all flags") finally: vmxml_backup.sync()