def detach_reattach_nodedev(device_address, params, options=""): """ Do the detach and reattach. Step1.Do detach. Step2.Check the result of detach. Step3.Do reattach. Step4.Check the result of reattach """ # Libvirt acl polkit related params uri = params.get("virsh_uri") # Nodedev-detach/reattach are special, the connect driver is still qemu # with split daemon, and the connect_driver in polkit rule # should be 'QEMU' for detach, 'nodedev' for read. update the polkit # rule to include both QEMU and nodedev in such situation. set_polkit = 'yes' == params.get('setup_libvirt_polkit', 'no') if utils_split_daemons.is_modular_daemon() and set_polkit: rule_path = '/etc/polkit-1/rules.d/500-libvirt-acl-virttest.rules' cmd = '''sed -i "s/'nodedev'/'nodedev'||'QEMU'/g" %s''' % rule_path process.run(cmd) process.run('cat /etc/polkit-1/rules.d/500-libvirt-acl-virttest.rules') unprivileged_user = params.get('unprivileged_user') readonly = (params.get('nodedev_detach_readonly', 'no') == 'yes') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Do the detach logging.debug('Node device name is %s.', device_address) CmdResult = virsh.nodedev_detach(device_address, options, unprivileged_user=unprivileged_user, uri=uri, readonly=readonly, debug=True) # Check the exit_status. libvirt.check_exit_status(CmdResult) # Check the driver. driver = get_driver_readlink(device_address) logging.debug('Driver after detach is %s.', driver) if libvirt_version.version_compare(1, 1, 1): device_driver_name = 'vfio-pci' else: device_driver_name = 'pci-stub' if (driver is None) or (not driver.endswith(device_driver_name)): test.fail("Driver for %s is not %s " "after nodedev-detach" % (device_address, device_driver_name)) # Do the reattach. CmdResult = virsh.nodedev_reattach(device_address, options) # Check the exit_status. libvirt.check_exit_status(CmdResult) # Check the driver. driver = get_driver_readlink(device_address) if libvirt_version.version_compare(1, 1, 1): device_driver_name = 'vfio-pci' else: device_driver_name = 'pci-stub' if driver and driver.endswith(device_driver_name): test.fail("Driver for %s is not %s " "after nodedev-detach" % (device_address, device_driver_name))
def check_server_name(server_name="virtproxyd"): """ Determine the server name under different daemon mode. :param server_name: name of the managed server :return: name of the managed server """ if not utils_split_daemons.is_modular_daemon(): server_name = "libvirtd" return server_name
def check_libvirtd_process_id(ori_pid_libvirtd, test): """ Check libvirtd process id not change :param params: original libvirtd process id :param test: test assert object """ if not utils_split_daemons.is_modular_daemon(): aft_pid_libvirtd = process.getoutput("pidof libvirtd") if not utils_libvirtd.libvirtd_is_running() or ori_pid_libvirtd != aft_pid_libvirtd: test.fail("Libvirtd crash after attaching ccw addr devices")
def detach_reattach_nodedev(device_address, params, options=""): """ Do the detach and reattach. Step1.Do detach. Step2.Check the result of detach. Step3.Do reattach. Step4.Check the result of reattach """ # Libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') readonly = (params.get('nodedev_detach_readonly', 'no') == 'yes') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Do the detach logging.debug('Node device name is %s.', device_address) CmdResult = virsh.nodedev_detach(device_address, options, unprivileged_user=unprivileged_user, uri=uri, readonly=readonly) # Check the exit_status. libvirt.check_exit_status(CmdResult) # Check the driver. driver = get_driver_readlink(device_address) logging.debug('Driver after detach is %s.', driver) if libvirt_version.version_compare(1, 1, 1): device_driver_name = 'vfio-pci' else: device_driver_name = 'pci-stub' if (driver is None) or (not driver.endswith(device_driver_name)): test.fail("Driver for %s is not %s " "after nodedev-detach" % (device_address, device_driver_name)) # Do the reattach. CmdResult = virsh.nodedev_reattach(device_address, options) # Check the exit_status. libvirt.check_exit_status(CmdResult) # Check the driver. driver = get_driver_readlink(device_address) if libvirt_version.version_compare(1, 1, 1): device_driver_name = 'vfio-pci' else: device_driver_name = 'pci-stub' if driver and driver.endswith(device_driver_name): test.fail("Driver for %s is not %s " "after nodedev-detach" % (device_address, device_driver_name))
def __init__(self, service_name=None, session=None, all_daemons=False): """ Initialize an service object for libvirtd. :params service_name: Service name such as virtqemud or libvirtd. If service_name is None or 'libvirt' and all_daemons is True, all sub daemons will be operated when modular daemon environment is enabled. Otherwise, if service_name is a single string, only the given daemon/service will be operated. :params session: An session to guest or remote host. :params all_daemons: Whether to operate all daemons when modular daemons are enabled. It only works if service_name is None or 'libvirtd'. """ self.session = session if self.session: self.remote_runner = remote_old.RemoteRunner(session=self.session) runner = self.remote_runner.run else: runner = process.run self.all_daemons = all_daemons self.daemons = [] self.service_list = [] if LIBVIRTD is None: LOG.warning("Libvirtd service is not available in host, " "utils_libvirtd module will not function normally") self.service_name = "libvirtd" if not service_name else service_name if libvirt_version.version_compare(5, 6, 0, self.session): if utils_split_daemons.is_modular_daemon(session=self.session): if self.service_name in ["libvirtd", "libvirtd.service"]: self.service_name = "virtqemud" if self.all_daemons: self.service_list = ['virtqemud', 'virtproxyd', 'virtnetworkd', 'virtinterfaced', 'virtnodedevd', 'virtsecretd', 'virtstoraged', 'virtnwfilterd'] elif self.service_name == "libvirtd.socket": self.service_name = "virtqemud.socket" elif self.service_name in ["libvirtd-tcp.socket", "libvirtd-tls.socket"]: self.service_name = re.sub("libvirtd", "virtproxyd", self.service_name) else: self.service_name = re.sub("^virt.*d", "libvirtd", self.service_name) else: self.service_name = "libvirtd" if not self.service_list: self.service_list = [self.service_name] for serv in self.service_list: self.daemons.append(service.Factory.create_service(serv, run=runner))
def managed_daemon_config(conf_type="virtproxyd"): """ Determine different daemon config under different daemon mode. :param conf_type: The configuration type to get For example, "libvirtd" or "virtqemud" :return: utils_config.LibvirtConfigCommon object """ if not utils_split_daemons.is_modular_daemon(): conf_type = "libvirtd" config = utils_config.get_conf_obj(conf_type) return config
def remove_key_for_modular_daemon(params, remote_dargs=None): """ Remove some configuration keys if the modular daemon is enabled. If you set "do_search" or/and "do_not_search" in params, it first checks the values and then removes the keys from the config file. :param param: The param to use :param remote_dargs: The params for remote access :return: remote.RemoteFile object for remote file or utils_config.LibvirtConfigCommon object for local configuration file """ conf_obj = None session = None if remote_dargs: server_ip = remote_dargs.get("server_ip", remote_dargs.get("remote_ip")) server_pwd = remote_dargs.get("server_pwd", remote_dargs.get("remote_pwd")) server_user = remote_dargs.get("server_user", remote_dargs.get("remote_user")) if not all([server_ip, server_pwd, server_user]): raise exceptions.TestError("server_[ip|user|pwd] are necessary!") session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if utils_split_daemons.is_modular_daemon(session): remove_key = eval(params.get("remove_key", "['remote_mode']")) conf_type = params.get("conf_type", "libvirt") search_cond = eval(params.get("do_search", '{}')) no_search_cond = eval(params.get("no_search", '{}')) for k, v in search_cond.items(): if not re.search(v, k, re.IGNORECASE): LOG.debug( "The key '%s' does not contain '%s', " "no need to remove %s in %s conf file.", k, v, remove_key, conf_type) return for k, v in no_search_cond.items(): if re.search(v, k, re.IGNORECASE): LOG.debug( "The key '%s' contains '%s', " "no need to remove %s in %s conf file.", k, v, remove_key, conf_type) return conf_obj = remove_key_in_conf(remove_key, conf_type=conf_type, remote_params=remote_dargs) if session: session.close() return conf_obj
def get_daemon_configs(): """ Get the daemon configs :returns: daemon configs file path """ if utils_split_daemons.is_modular_daemon(): daemon_conf = "/etc/libvirt/virtproxyd.conf" daemon_socket_conf = "/usr/lib/systemd/system/virtproxyd-tls.socket" else: daemon_conf = "/etc/libvirt/libvirtd.conf" daemon_socket_conf = "/usr/lib/systemd/system/libvirtd-tls.socket" return daemon_conf, daemon_socket_conf
def destroy_nodedev(test, params): """ Destroy (stop) a device on the node :params: the parameter dictionary """ dev_name = params.get("nodedev_dev_name") if dev_name == "nodedev_NIC_name": dev_name = params.get("nodedev_NIC_name") else: # Check nodedev value # if not check_nodedev(dev_name): # logging.info(result.stdout) dev_name = params.get("nodedev_new_dev") options = params.get("nodedev_options") status_error = params.get("status_error", "no") # libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' result = virsh.nodedev_destroy(dev_name, options, uri=uri, debug=True, unprivileged_user=unprivileged_user) status = result.exit_status # Check status_error if status_error == "yes": if status: logging.info("It's an expected %s", result.stderr) else: test.fail("%d not a expected command " "return value", status) elif status_error == "no": if status: test.fail(result.stderr) else: # Check nodedev value if not check_nodedev(dev_name): logging.info(result.stdout.strip()) else: test.fail("The relevant directory still exists" "or mismatch with result")
def prepare_hook_file(hook_op): """ Create hook file. """ logging.info("hook script: %s", hook_op) hook_lines = hook_op.split(';') hook_dir = os.path.dirname(hook_file) logging.info("hook script: %s", hook_op) if not os.path.exists(hook_dir): os.mkdir(hook_dir) with open(hook_file, 'w') as hf: hf.write('\n'.join(hook_lines)) os.chmod(hook_file, 0o755) # restart libvirtd libvirtd.restart() if utils_split_daemons.is_modular_daemon() and test_network: utils_libvirtd.Libvirtd("virtnetworkd").restart()
def __init__(self, logging_handler=None, logging_params=(), logging_pattern=r'.*'): """ :param logging_handler: Callback function to handle logging :param logging_params: Where log is stored :param logging_pattern: Regex for filtering specific log lines """ if not utils_split_daemons.is_modular_daemon(): raise exceptions.TestFail( "Embedded qemu driver needs modular daemon mode.") self.tail = None self.running = False self.service_exec = "virt-qemu-run" cmd = "pgrep qemu | wc -l" self.qemu_pro_num = int( process.run(cmd, shell=True).stdout_text.strip()) self.logging_handler = logging_handler self.logging_params = logging_params self.logging_pattern = logging_pattern
def run(test, params, env): """ Do test for vol-download and vol-upload Basic steps are 1. Create pool with type defined in cfg 2. Create image with writing data in it 3. Get md5 value before operation 4. Do vol-download/upload with options(offset, length) 5. Check md5 value after operation """ pool_type = params.get("vol_download_upload_pool_type") pool_name = params.get("vol_download_upload_pool_name") pool_target = params.get("vol_download_upload_pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_download_upload_vol_name") file_name = params.get("vol_download_upload_file_name") file_path = os.path.join(data_dir.get_tmp_dir(), file_name) offset = params.get("vol_download_upload_offset") length = params.get("vol_download_upload_length") capacity = params.get("vol_download_upload_capacity") allocation = params.get("vol_download_upload_allocation") frmt = params.get("vol_download_upload_format") operation = params.get("vol_download_upload_operation") create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes")) setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") b_luks_encrypt = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} sparse_option_support = "yes" == params.get("sparse_option_support", "yes") with_clusterSize = "yes" == params.get("with_clusterSize") vol_clusterSize = params.get("vol_clusterSize", "64") vol_clusterSize_unit = params.get("vol_clusterSize_unit") vol_format = params.get("vol_format", "qcow2") libvirt_version.is_libvirt_feature_supported(params) # libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.error("API acl test not supported in current" " libvirt version.") # Destroy VM. if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest", pre_disk_vol=["50M"]) # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if pool_type == "disk": vol_name = utlv.new_disk_vol_name(pool_name) if vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % vol_name utlv.update_polkit_rule(params, vol_pat, new_value) if create_vol: if b_luks_encrypt: if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS format not supported in " "current libvirt version") params['sec_volume'] = os.path.join(pool_target, vol_name) luks_sec_uuid = utlv.create_secret(params) ret = virsh.secret_set_value(luks_sec_uuid, encryption_password, encode=True) utlv.check_exit_status(ret) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(capacity) vol_arg['allocation'] = int(allocation) if with_clusterSize: vol_arg['format'] = vol_format vol_arg['clusterSize'] = int(vol_clusterSize) vol_arg['clusterSize_unit'] = vol_clusterSize_unit create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name) virsh.pool_refresh(pool_name, debug=True) vol_list = virsh.vol_list(pool_name, debug=True).stdout.strip() # iscsi volume name is different from others if pool_type == "iscsi": # Due to BZ 1843791, the volume cannot be obtained sometimes. if len(vol_list.splitlines()) < 3: test.fail("Failed to get iscsi type volume.") vol_name = vol_list.split('\n')[2].split()[0] vol_path = virsh.vol_path(vol_name, pool_name, ignore_status=False).stdout.strip() logging.debug("vol_path is %s", vol_path) # Add command options if pool_type is not None: options = " --pool %s" % pool_name if offset is not None: options += " --offset %s" % offset offset = int(offset) else: offset = 0 if length is not None: options += " --length %s" % length length = int(length) else: length = 0 logging.debug("%s options are %s", operation, options) if operation == "upload": # write data to file write_file(file_path) # Set length for calculate the offset + length in the following # func get_pre_post_digest() and digest() if length == 0: length = 1048576 def get_pre_post_digest(): """ Get pre region and post region digest if have offset and length :return: pre digest and post digest """ # Get digest of pre region before offset if offset != 0: digest_pre = digest(vol_path, 0, offset) else: digest_pre = 0 logging.debug("pre region digest read from %s 0-%s is %s", vol_path, offset, digest_pre) # Get digest of post region after offset+length digest_post = digest(vol_path, offset + length, 0) logging.debug("post region digest read from %s %s-0 is %s", vol_path, offset + length, digest_post) return (digest_pre, digest_post) # Get pre and post digest before operation for compare (ori_pre_digest, ori_post_digest) = get_pre_post_digest() ori_digest = digest(file_path, 0, 0) logging.debug("ori digest read from %s is %s", file_path, ori_digest) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume upload result = virsh.vol_upload(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation (aft_pre_digest, aft_post_digest) = get_pre_post_digest() aft_digest = digest(vol_path, offset, length) logging.debug("aft digest read from %s is %s", vol_path, aft_digest) # Compare the pre and post part before and after if ori_pre_digest == aft_pre_digest and \ ori_post_digest == aft_post_digest: logging.info("file pre and aft digest match") else: test.fail("file pre or post digests do not" "match, in %s", operation) if operation == "download": # Write data to volume write_file(vol_path) # Record the digest value before operation ori_digest = digest(vol_path, offset, length) logging.debug("original digest read from %s is %s", vol_path, ori_digest) process.run("touch %s" % file_path, ignore_status=True, shell=True) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume download result = virsh.vol_download(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation aft_digest = digest(file_path, 0, 0) logging.debug("new digest read from %s is %s", file_path, aft_digest) if operation != "mix": if result.exit_status != 0: test.fail("Fail to %s volume: %s" % (operation, result.stderr)) # Compare the change part on volume and file if ori_digest == aft_digest: logging.info("file digests match, volume %s succeed", operation) else: test.fail("file digests do not match, volume %s failed" % operation) if operation == "mix": target = params.get("virt_disk_device_target", "vdb") disk_file_path = os.path.join(pool_target, file_name) # Create one disk xml and attach it to VM. custom_disk_xml = create_disk('file', disk_file_path, 'raw', 'file', 'disk', target, 'virtio') ret = virsh.attach_device(vm_name, custom_disk_xml.xml, flagstr="--config", debug=True) libvirt.check_exit_status(ret) if vm.is_dead(): vm.start() # Write 100M data into disk. data_size = 100 write_disk(test, vm, target, data_size) data_size_in_bytes = data_size * 1024 * 1024 # Refresh directory pool. virsh.pool_refresh(pool_name, debug=True) # Download volume to local with sparse option. download_spare_file = "download-sparse.raw" download_file_path = os.path.join(data_dir.get_tmp_dir(), download_spare_file) options += " --sparse" result = virsh.vol_download(file_name, download_file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) libvirt.check_exit_status(result) #Check download image size. one_g_in_bytes = 1073741824 download_img_info = utils_misc.get_image_info(download_file_path) download_disk_size = int(download_img_info['dsize']) if (download_disk_size < data_size_in_bytes or download_disk_size >= one_g_in_bytes): test.fail("download image size:%d is less than the generated " "data size:%d or greater than or equal to 1G." % (download_disk_size, data_size_in_bytes)) # Create one upload sparse image file. upload_sparse_file = "upload-sparse.raw" upload_file_path = os.path.join(pool_target, upload_sparse_file) libvirt.create_local_disk('file', upload_file_path, '1', 'raw') # Refresh directory pool. virsh.pool_refresh(pool_name, debug=True) # Do volume upload, upload sparse file which download last time. result = virsh.vol_upload(upload_sparse_file, download_file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) upload_img_info = utils_misc.get_image_info(upload_file_path) upload_disk_size = int(upload_img_info['dsize']) if (upload_disk_size < data_size_in_bytes or upload_disk_size >= one_g_in_bytes): test.fail("upload image size:%d is less than the generated " "data size:%d or greater than or equal to 1G." % (upload_disk_size, data_size_in_bytes)) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest") for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) if os.path.isfile(file_path): os.remove(file_path)
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_allocation = params.get("vol_allocation") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") b_luks_encrypted = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] wipe_old_vol = False with_clusterSize = "yes" == params.get("with_clusterSize") vol_clusterSize = params.get("vol_clusterSize", "64") vol_clusterSize_unit = params.get("vol_clusterSize_unit") libvirt_version.is_libvirt_feature_supported(params) if virsh.has_command_help_match("vol-clone", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: test.cancel("Option --prealloc-metadata " "is not supported.") clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.cancel("API acl test not supported in current" " libvirt version.") # Using algorithms other than zero need scrub installed. try: utils_path.find_command('scrub') except utils_path.CmdNotFoundError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = [ "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random" ] # Choose an algorithm randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) libvirt_pvt = utlv.PoolVolumeTest(test, params) libvirt_pool = libvirt_storage.StoragePool() if libvirt_pool.pool_exists(pool_name): test.error("Pool '%s' already exist" % pool_name) try: # Create a new pool disk_vol = [] if pool_type == 'disk': disk_vol.append(params.get("pre_vol", '10M')) libvirt_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol) libvirt_vol = libvirt_storage.PoolVolume(pool_name) # Create a new volume if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']: if (b_luks_encrypted and vol_format in ['raw', 'qcow2']): if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS is not supported in current" " libvirt version") if vol_format == "qcow2" and not libvirt_version.version_compare( 6, 10, 0): test.cancel("Qcow2 format with luks encryption is not" " supported in current libvirt version") luks_sec_uuid = create_luks_secret( os.path.join(pool_target, vol_name), encryption_password, test) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(vol_capability) vol_arg['allocation'] = int(vol_allocation) vol_arg['format'] = vol_format if with_clusterSize: vol_arg['clusterSize'] = int(vol_clusterSize) vol_arg['clusterSize_unit'] = vol_clusterSize_unit create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: libvirt_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name) elif vol_format == 'partition': vol_name = list(utlv.get_vol_list(pool_name).keys())[0] logging.debug("Find partition %s in disk pool", vol_name) elif vol_format == 'sparse': # Create a sparse file in pool sparse_file = pool_target + '/' + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability process.run(cmd, ignore_status=True, shell=True) else: test.error("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name, debug=True) vol_info = libvirt_vol.volume_info(vol_name) if not vol_info: test.error("Fail to get info of volume %s" % vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count( "prealloc-metadata"): clone_status_error = True if b_luks_encrypted: wipe_old_vol = True if pool_type == "disk": new_vol_name = utlv.new_disk_vol_name(pool_name) if new_vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name utlv.update_polkit_rule(params, vol_pat, new_value) bad_cloned_vol_name = params.get("bad_cloned_vol_name", "") if bad_cloned_vol_name: new_vol_name = bad_cloned_vol_name # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: test.fail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libvirt_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = [ "Unsupported algorithm", "no such pattern sequence" ] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: test.fail("Expect clone volume fail, but run" " successfully.") if wipe_old_vol: # Wipe the old volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = [ "Unsupported algorithm", "no such pattern sequence" ] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") if bad_cloned_vol_name: pattern = "volume name '%s' cannot contain '/'" % new_vol_name if re.search(pattern, clone_result.stderr) is None: test.fail("vol-clone failed with unexpected reason") finally: # Clean up try: libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) except exceptions.TestFail as detail: logging.error(str(detail))
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): test.cancel("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") machine_type = params.get("machine_type", "") disk_device = params.get("disk_device", "") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") reuse_external = "yes" == params.get("reuse_external", "no") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") config_format = "yes" == params.get("config_format", "no") snapshot_image_format = params.get("snapshot_image_format") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") unix_channel = "yes" == params.get("unix_channel", "yes") dac_denial = "yes" == params.get("dac_denial", "no") check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no") disk_snapshot_attr = params.get('disk_snapshot_attr', 'external') set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no") # gluster related params replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) transport = params.get("transport", "") uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if disk_device == 'lun' and machine_type == 's390-ccw-virtio': params['disk_target_bus'] = 'scsi' logging.debug( "Setting target bus scsi because machine type has virtio 1.0." " See https://bugzilla.redhat.com/show_bug.cgi?id=1365823") if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_src_protocol == 'gluster': test.cancel("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") if libvirt_version.version_compare(5, 5, 0): # libvirt-5.5.0-2 commit 68e1a05f starts to allow --no-metadata and # --print-xml to be used together. if "--no-metadata" in options and "--print-xml" in options: logging.info("--no-metadata and --print-xml can be used together " "in this libvirt version. Not expecting a failure.") status_error = "no" # This is brought by new feature:block-dev if libvirt_version.version_compare(6, 0, 0) and transport == "rdma": test.cancel("transport protocol 'rdma' is not yet supported") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(data_dir.get_tmp_dir(), memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(data_dir.get_tmp_dir(), bad_disk) with open(bad_disk, 'w') as bad_file: pass # Generate external disk if reuse_external: disk_path = '' for i in range(dnum): external_disk = "external_disk%s" % i if params.get(external_disk): disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) process.run("qemu-img create -f qcow2 %s 1G" % disk_path, shell=True) # Only chmod of the last external disk for negative case if dac_denial: process.run("chmod 500 %s" % disk_path, shell=True) qemu_conf = None libvirtd_conf = None libvirtd_conf_dict = {} libvirtd_log_path = None conf_type = "libvirtd" if utils_split_daemons.is_modular_daemon(): conf_type = "virtqemud" libvirtd = utils_libvirtd.Libvirtd() try: # Config "snapshot_image_format" option in qemu.conf if config_format: qemu_conf = utils_config.LibvirtQemuConfig() qemu_conf.snapshot_image_format = snapshot_image_format logging.debug("the qemu config file content is:\n %s" % qemu_conf) libvirtd.restart() if check_json_no_savevm: libvirtd_conf_dict["log_level"] = '1' libvirtd_conf_dict["log_filters"] = '"1:json 3:remote 4:event"' libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf_dict[ "log_outputs"] = '"1:file:%s"' % libvirtd_log_path libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, conf_type, ) logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if set_snapshot_attr: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml_backup.get_devices(device_type="disk")[0] vmxml_new.del_device(disk_xml) # set snapshot attribute in disk xml disk_xml.snapshot = disk_snapshot_attr new_disk = disk.Disk(type_name='file') new_disk.xmltreefile = disk_xml.xmltreefile vmxml_new.add_device(new_disk) logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile) vmxml_new.sync() vm.start() # Start qemu-ga on guest if have --quiesce if unix_channel and options.find("quiesce") >= 0: vm.prepare_guest_agent() session = vm.wait_for_login() if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: test.cancel("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) else: # Remove channel if exist if vm.is_alive(): vm.destroy(gracefully=False) xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_inst.remove_agent_channels() vm.start() # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Attach disk before create snapshot if not print xml and multi disks # specified in cfg if dnum > 1 and "--print-xml" not in options: for i in range(1, dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) process.run("qemu-img create -f qcow2 %s 200M" % disk_path, shell=True) virsh.attach_disk(vm_name, disk_path, 'vd%s' % list(string.ascii_lowercase)[i], debug=True) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) # for multi snapshots without specific snapshot name, the # snapshot name is using time string with 1 second # incremental, to avoid get snapshot failure with same name, # sleep 1 second here. if int(multi_num) > 1: time.sleep(1.1) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) test.fail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec" " file already been removed") # Check domain xml is not updated if reuse external fail elif reuse_external and dac_denial: output = virsh.dumpxml(vm_name).stdout.strip() if "reuse_external" in output: test.fail("Domain xml should not be " "updated with snapshot image") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: test.fail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(test, vm_name, options, option_dict, output, snaps_before, snaps_list) # For cover bug 872292 if check_json_no_savevm: pattern = "The command savevm has not been found" with open(libvirtd_log_path) as f: for line in f: if pattern in line and "error" in line: test.fail("'%s' was found: %s" % (pattern, line)) finally: if vm.is_alive(): vm.destroy() # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): test.fail("Still can find snapshot metadata") if disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd) # rm bad disks if bad_disk is not None: os.remove(bad_disk) # rm attach disks and reuse external disks if dnum > 1 and "--print-xml" not in options: for i in range(dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) if os.path.exists(disk_path): os.unlink(disk_path) if reuse_external: external_disk = "external_disk%s" % i disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) if os.path.exists(disk_path): os.unlink(disk_path) # restore config if config_format and qemu_conf: qemu_conf.restore() if libvirtd_conf: libvirtd_conf.restore() if libvirtd_conf or (config_format and qemu_conf): libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
def run(test, params, env): """ Test command virsh nodedev-dumpxml. step1.get param from params. step2.do nodedev dumpxml. step3.clean up. """ def dump_nodedev_xml(dev_name, dev_opt="", **dargs): """ Do dumpxml and check the result. step1.execute nodedev-dumpxml command. step1.compare info in xml with info in sysfs. :param dev_name: name of device. :param dev_opt: command extra options :param dargs: extra dict args """ result = virsh.nodedev_dumpxml(dev_name, options=dev_opt, **dargs) libvirt.check_exit_status(result) logging.debug('Executing "virsh nodedev-dumpxml %s" finished.', dev_name) # Compare info in xml with info in sysfs. nodedevice_xml = nodedev_xml.NodedevXML.new_from_dumpxml(dev_name) if not nodedevice_xml.validates: test.error("nodedvxml of %s is not validated." % (dev_name)) # Get the dict of key to value in xml. # nodedev_dict_xml contain the all keys and values in xml need checking. nodedev_dict_xml = nodedevice_xml.get_key2value_dict() # Get the dict of key to path in sysfs. # nodedev_syspath_dict contain the all keys and the path of file which contain # information for each key. nodedev_syspath_dict = nodedevice_xml.get_key2syspath_dict() # Get the values contained in files. # nodedev_dict_sys contain the all keys and values in sysfs. nodedev_dict_sys = {} for key, filepath in list(nodedev_syspath_dict.items()): with open(filepath, 'r') as f: value = f.readline().rstrip('\n') nodedev_dict_sys[key] = value # Compare the value in xml and in syspath. for key in nodedev_dict_xml: xml_value = nodedev_dict_xml.get(key) sys_value = nodedev_dict_sys.get(key) if not xml_value == sys_value: if (key == 'numa_node' and not libvirt_version.version_compare(1, 2, 5)): logging.warning("key: %s in xml is not supported yet" % key) else: test.error("key: %s in xml is %s," "but in sysfs is %s." % (key, xml_value, sys_value)) else: continue logging.debug( "Compare info in xml and info in sysfs finished" "for device %s.", dev_name) def pci_devices_name(device_type): """ Get the address of pci device :param device_type: type of device, such as pci, net , storage """ devices_list = virsh.nodedev_list(tree='', cap=device_type) devices_name_list = devices_list.stdout.strip().splitlines() device_name = devices_name_list[0] return device_name # Init variables. status_error = ('yes' == params.get('status_error', 'no')) device_type = params.get('device_type', "") device_name = params.get('nodedev_device_name', 'ENTER.YOUR.PCI.DEVICE') if device_name.find('ENTER.YOUR.PCI.DEVICE') != -1: replace_name = pci_devices_name(device_type).strip() device_name = device_name.replace('ENTER.YOUR.PCI.DEVICE', replace_name).strip() device_opt = params.get('nodedev_device_opt', "") # acl polkit params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_dargs = {} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri # change the polkit rule polkit_file = "/etc/polkit-1/rules.d/500-libvirt-acl-virttest.rules" if os.path.exists(polkit_file): replace_cmd = "sed -i 's/'ENTER.YOUR.PCI.DEVICE'/%s/g' /etc/polkit-1/rules.d/500-libvirt-acl-virttest.rules" % device_name cat_cmd = "cat /etc/polkit-1/rules.d/500-libvirt-acl-virttest.rules" replace_output = process.run(replace_cmd, shell=True).stdout_text cat_output = process.run(cat_cmd, shell=True).stdout_text # do nodedev dumpxml. try: time.sleep(10) dump_nodedev_xml(dev_name=device_name, dev_opt=device_opt, **virsh_dargs) if status_error: test.fail('Nodedev dumpxml succeeded in negative test.') except Exception as e: if not status_error: test.fail('Nodedev dumpxml failed in positive test.' 'Error: %s' % e)
def run(test, params, env): """ Test host_uuid parameter in libvird.conf. 1) Change host_uuid in libvirtd.conf; 2) Restart libvirt daemon; 3) Check if libvirtd successfully started; 4) Check current host UUID by `virsh capabilities`; """ def get_dmi_uuid(): """ Retrieve the UUID of DMI, which is usually used as libvirt daemon host UUID. :return : DMI UUID if it can be located or None if can't. """ uuid_paths = [ '/sys/devices/virtual/dmi/id/product_uuid', '/sys/class/dmi/id/product_uuid', ] for path in uuid_paths: if os.path.isfile(path): with open(path) as dmi_fp: uuid = dmi_fp.readline().strip().lower() return uuid uuid_type = params.get("uuid_type", "lowercase") expected_result = params.get("expected_result", "success") new_uuid = params.get("new_uuid", "") # We are expected to get an standard UUID format on success. if expected_result == 'success': expected_uuid = str(uuid.UUID(new_uuid)) config = utils_config.LibvirtdConfig() if utils_split_daemons.is_modular_daemon(): config = utils_config.VirtQemudConfig() libvirtd = utils_libvirtd.Libvirtd() try: orig_uuid = capability_xml.CapabilityXML()['uuid'] logging.debug('Original host UUID is %s' % orig_uuid) if uuid_type == 'not_set': # Remove `host_uuid` in libvirtd.conf. del config.host_uuid elif uuid_type == 'unterminated': # Change `host_uuid` in libvirtd.conf. config.set_raw('host_uuid', '"%s' % new_uuid) elif uuid_type == 'unquoted': config.set_raw('host_uuid', new_uuid) elif uuid_type == 'single_quoted': config.set_raw('host_uuid', "'%s'" % new_uuid) else: config.host_uuid = new_uuid # Restart libvirtd to make change valid. May raise ConfigError # if not succeed. if not libvirtd.restart(): if expected_result != 'unbootable': test.fail('Libvirtd is expected to be started ' 'with host_uuid = %s' % config['host_uuid']) return if expected_result == 'unbootable': test.fail('Libvirtd is not expected to be started ' 'with host_uuid = %s' % config['host_uuid']) cur_uuid = capability_xml.CapabilityXML()['uuid'] logging.debug('Current host UUID is %s' % cur_uuid) if expected_result == 'success': if cur_uuid != expected_uuid: test.fail("Host UUID doesn't changed as expected" " from %s to %s, but %s" % (orig_uuid, expected_uuid, cur_uuid)) # libvirtd should use system DMI UUID for all_digit_same or # not_set host_uuid. elif expected_result == 'dmi_uuid': dmi_uuid = get_dmi_uuid() logging.debug("DMI UUID is %s." % dmi_uuid) if dmi_uuid is not None and cur_uuid != dmi_uuid: test.fail("Host UUID doesn't changed from " "%s to DMI UUID %s as expected, but %s" % (orig_uuid, dmi_uuid, cur_uuid)) finally: config.restore() if not libvirtd.is_running(): libvirtd.start()
def run(test, params, env): """ Test command: virsh secret-define <file> secret-undefine <secret> The testcase is to define or modify a secret from an XML file, then undefine it """ # MAIN TEST CODE ### # Process cartesian parameters secret_ref = params.get("secret_ref") ephemeral = params.get("ephemeral_value", "no") private = params.get("private_value", "no") modify_volume = ("yes" == params.get("secret_modify_volume", "no")) remove_uuid = ("yes" == params.get("secret_remove_uuid", "no")) if secret_ref == "secret_valid_uuid": # Generate valid uuid cmd = "uuidgen" status, uuid = process.getstatusoutput(cmd) if status: test.cancel("Failed to generate valid uuid") elif secret_ref == "secret_invalid_uuid": uuid = params.get(secret_ref) # libvirt acl related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") get_value_acl = "yes" == params.get("get_value_acl", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") get_value_error = "yes" == params.get("get_value_error", "no") define_readonly = "yes" == params.get("secret_define_readonly", "no") undefine_readonly = "yes" == params.get("secret_undefine_readonly", "no") expect_msg = params.get("secret_err_msg", "") if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True} # Get a full path of tmpfile, the tmpfile need not exist tmp_dir = data_dir.get_tmp_dir() volume_path = os.path.join(tmp_dir, "secret_volume") secret_xml_obj = SecretXML(ephemeral, private) secret_xml_obj.uuid = uuid secret_xml_obj.volume = volume_path secret_xml_obj.usage = "volume" secret_obj_xmlfile = os.path.join(SECRET_DIR, uuid + ".xml") # Run the test try: if define_acl: process.run("chmod 666 %s" % secret_xml_obj.xml, shell=True) cmd_result = virsh.secret_define(secret_xml_obj.xml, **acl_dargs) else: cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True, readonly=define_readonly) libvirt.check_exit_status(cmd_result, define_error) if cmd_result.exit_status: if define_readonly: if not re.search(expect_msg, cmd_result.stderr.strip()): test.fail("Fail to get expect err msg: %s" % expect_msg) else: logging.info("Get expect err msg: %s", expect_msg) return # Check ephemeral attribute exist = os.path.exists(secret_obj_xmlfile) if (ephemeral == "yes" and exist) or \ (ephemeral == "no" and not exist): test.fail("The ephemeral attribute worked not expected") # Check private attribute virsh.secret_set_value(uuid, SECRET_BASE64, debug=True) if get_value_acl: cmd_result = virsh.secret_get_value(uuid, **acl_dargs) else: cmd_result = virsh.secret_get_value(uuid, debug=True) libvirt.check_exit_status(cmd_result, get_value_error) status = cmd_result.exit_status err_msg = "The private attribute worked not expected" if private == "yes" and not status: test.fail(err_msg) if private == "no" and status: if not get_value_error: test.fail(err_msg) if modify_volume: volume_path = os.path.join(tmp_dir, "secret_volume_modify") secret_xml_obj.volume = volume_path cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True) if cmd_result.exit_status == 0: test.fail("Expect fail on redefine after modify " "volume, but success indeed") if remove_uuid: secret_xml_obj2 = SecretXML(ephemeral, private) secret_xml_obj2.volume = volume_path secret_xml_obj2.usage = "volume" cmd_result = virsh.secret_define(secret_xml_obj2.xml, debug=True) if cmd_result.exit_status == 0: test.fail("Expect fail on redefine after remove " "uuid, but success indeed") if undefine_acl: cmd_result = virsh.secret_undefine(uuid, **acl_dargs) else: cmd_result = virsh.secret_undefine(uuid, debug=True, readonly=undefine_readonly) libvirt.check_exit_status(cmd_result, undefine_error) if undefine_readonly: if not re.search(expect_msg, cmd_result.stderr.strip()): test.fail("Fail to get expect err msg: %s" % expect_msg) else: logging.info("Get expect err msg: %s", expect_msg) finally: # cleanup virsh.secret_undefine(uuid, ignore_status=True) if os.path.exists(volume_path): os.unlink(volume_path) if os.path.exists(secret_obj_xmlfile): os.unlink(secret_obj_xmlfile)
def run(test, params, env): """ Test command: virsh net-start. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = "yes" == params.get("status_error", "no") inactive_default = "yes" == params.get("net_start_inactive_default", "yes") net_ref = params.get("net_start_net_ref", "netname") # default is tested extra = params.get("net_start_options_extra", "") # extra cmd-line params. route_test = "yes" == params.get("route_test", "no") firewalld_operate = params.get("firewalld_operate", None) # make easy to maintain virsh_dargs = {'uri': uri, 'debug': True, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") if virsh_uri and not utils_split_daemons.is_modular_daemon(): virsh_uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Get all network instance origin_nets = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) # Prepare default network for following test. try: default_netxml = origin_nets['default'] except KeyError: virsh_instance.close_session() test.cancel("Test requires default network to exist") try: # To confirm default network is active if not default_netxml.active: default_netxml.active = True # inactive default according test's need if inactive_default: logging.info("Stopped default network") default_netxml.active = False # State before run command origin_state = virsh_instance.net_state_dict() logging.debug("Origin network(s) state: %s", origin_state) if route_test: # define a network "def" with route address is "192.168.122.x" # 1. start def first then start default current_state = virsh_instance.net_state_dict() if 'def' in current_state: virsh.net_destroy("def", ignore_status=True) virsh.net_undefine("def", ignore_status=True) expect_fail = "yes" == params.get("expect_start_fail", "no") test_xml = network_xml.NetworkXML(network_name="def") test_xml.forward = {'mode': 'nat'} test_xml.routes = [{ 'address': '192.168.122.0', 'prefix': '24', 'gateway': '192.168.100.1' }] ipxml = IPXML(address='192.168.100.1', netmask='255.255.255.0') range_4 = network_xml.RangeXML() range_4.attrs = { 'start': '192.168.100.2', 'end': '192.168.100.254' } ipxml.dhcp_ranges = range_4 test_xml.ip = ipxml test_xml.define() virsh.net_start("def") # start default, should fail result = virsh.net_start("default") logging.debug(result) libvirt.check_exit_status(result, expect_error=expect_fail) # 2. start default then start def virsh.net_destroy("def") virsh.net_start("default") current_state11 = virsh_instance.net_state_dict() logging.debug("before start 2nd network(s) state: %s", current_state11) # start def, should fail result = virsh.net_start("def") logging.debug(result) libvirt.check_exit_status(result, expect_error=expect_fail) current_state12 = virsh_instance.net_state_dict() logging.debug("after start 2nd network(s) state: %s", current_state12) # clear the env virsh.net_undefine("def") else: if net_ref == "netname": net_ref = default_netxml.name elif net_ref == "netuuid": net_ref = default_netxml.uuid if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } if params.get('net_start_readonly', 'no') == 'yes': virsh_dargs = { 'uri': uri, 'debug': True, 'readonly': True, 'ignore_status': True } # Run test case if 'unprivileged_user' in virsh_dargs and status_error: test_virsh = virsh.VirshPersistent( unprivileged_user=virsh_dargs['unprivileged_user']) virsh_dargs.pop('unprivileged_user') result = test_virsh.net_start(net_ref, extra, **virsh_dargs) test_virsh.close_session() elif not route_test: result = virsh.net_start(net_ref, extra, **virsh_dargs) logging.debug(result) status = result.exit_status # Get current net_stat_dict current_state = virsh_instance.net_state_dict() logging.debug("Current network(s) state: %s", current_state) if 'default' not in current_state: test.fail('Network "default" cannot be found') if firewalld_operate: # current network is active, ensure firewalld is active # if not, restart firewalld, then restart libvirtd firewalld_service = service.Factory.create_service("firewalld") libvirtd_obj = utils_libvirtd.Libvirtd() if not firewalld_service.status(): firewalld_service.start() libvirtd_obj.restart() virsh_instance = virsh.VirshPersistent(**virsh_dargs) if firewalld_operate == "restart": # after firewalld restart, destroy and start the network firewalld_service.restart() time.sleep(5) res1 = virsh.net_destroy(net_ref, extra, **virsh_dargs) # need to add wait time. As libvirt doesn't know that firewalld has restarted until it gets the # dbus message, but that message won't arrive until some time after all of libvirt's chains/rules # have already been removed by the firewalld restart. refer to bug 1942805 time.sleep(5) res2 = virsh.net_start(net_ref, extra, **virsh_dargs) elif firewalld_operate == "stop_start": # start network which has been destroyed before firewalld restart res1 = virsh.net_destroy(net_ref, extra, **virsh_dargs) firewalld_service.stop() firewalld_service.start() time.sleep(5) res2 = virsh.net_start(net_ref, extra, **virsh_dargs) logging.debug( "firewalld_operate is %s, result for start network after firewalld restart: %s", firewalld_operate, res2) status1 = res1.exit_status | res2.exit_status if status1: test.fail( "Start or destroy network after firewalld restart fail!") # Check status_error if status_error: if not status: test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") # Get current net_stat_dict current_state = virsh_instance.net_state_dict() logging.debug("Current network(s) state: %s", current_state) is_default_active = current_state['default']['active'] if not is_default_active: test.fail( "Execute cmd successfully but default is inactive actually." ) finally: virsh_instance.close_session() current_state = virsh_instance.net_state_dict() if "def" in current_state: virsh.net_destroy("def", ignore_status=True) virsh.net_undefine("def", ignore_status=True) virsh.net_start('default', debug=True, ignore_status=True)
def run(test, params, env): """ Test command: virsh nwfilter-define. 1) Prepare parameters. 2) Set options of virsh define. 3) Run define command. 4) Check result. 5) Clean env """ # Prepare parameters filter_name = params.get("filter_name", "testcase") filter_uuid = params.get("filter_uuid", "11111111-b071-6127-b4ec-111111111111") exist_filter = params.get("exist_filter", "no-mac-spoofing") filter_xml = params.get("filter_create_xml_file") options_ref = params.get("options_ref", "") status_error = params.get("status_error", "no") boundary_test_skip = "yes" == params.get("boundary_test_skip") new_uuid = "yes" == params.get("new_uuid", 'no') bug_url = params.get("bug_url") # libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") if exist_filter == filter_name and new_uuid: # Since commit 46a811d, update filter with new uuid will fail. if libvirt_version.version_compare(1, 2, 7): status_error = 'yes' else: status_error = 'no' try: if filter_xml == "invalid-filter-xml": tmp_xml = xml_utils.TempXMLFile() tmp_xml.write('"<filter><<<BAD>>><\'XML</name\>' '!@#$%^&*)>(}>}{CORRUPTE|>!</filter>') tmp_xml.flush() filter_xml = tmp_xml.name logging.info("Test invalid xml is: %s" % filter_xml) elif filter_xml: # Create filter xml new_filter = libvirt_xml.NwfilterXML() filterxml_backup = new_filter.new_from_filter_dumpxml(exist_filter) # Backup xml if only update exist filter if exist_filter == filter_name and not new_uuid: filter_uuid = filterxml_backup.uuid params['filter_uuid'] = filter_uuid filterxml = utlv.create_nwfilter_xml(params) filterxml.xmltreefile.write(filter_xml) # Run command cmd_result = virsh.nwfilter_define(filter_xml, options=options_ref, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status = cmd_result.exit_status # Check result chk_result = check_list(filter_uuid, filter_name) xml_path = "%s/%s.xml" % (NWFILTER_ETC_DIR, filter_name) if status_error == "yes": if status == 0: if boundary_test_skip: test.cancel("Boundary check commit 4f20943 not" " in this libvirt build yet.") else: err_msg = "Run successfully with wrong command." if bug_url: err_msg += " Check more info in %s" % bug_url test.fail(err_msg) elif status_error == "no": if status: err_msg = "Run failed with right command." if bug_url: err_msg += " Check more info in %s" % bug_url test.fail(err_msg) if not chk_result: test.fail("Can't find filter in nwfilter-list" + " output") if not os.path.exists(xml_path): test.fail("Can't find filter xml under %s" % NWFILTER_ETC_DIR) logging.info("Dump the xml after define:") virsh.nwfilter_dumpxml(filter_name, ignore_status=True, debug=True) finally: # Clean env if exist_filter == filter_name: logging.info("Restore exist filter: %s" % exist_filter) virsh.nwfilter_undefine(filter_name, ignore_status=True) virsh.nwfilter_define(filterxml_backup.xml, ignore_status=True) else: if chk_result: virsh.nwfilter_undefine(filter_name, ignore_status=True) if os.path.exists(filter_xml): os.remove(filter_xml)
def run(test, params, env): """ Test migration with specified max bandwidth 1) Set both precopy and postcopy bandwidth by virsh migrate parameter 2) Set bandwidth before migration starts by migrate parameter --postcopy-bandwidth 3) Set bandwidth when migration is in post-copy phase 4) Set bandwidth when migration is in pre-copy phase 5) Set bandwidth when guest is running and before migration starts 6) Set bandwidth before migration starts by migrate parameter --bandwidth 7) Set bandwidth when guest is running and before migration starts 8) Set bandwidth when guest is shutoff 9) Do live migration with default max bandwidth :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def get_speed(exp_migrate_speed): """ Get migration speed and compare with value in exp_migrate_speed :params exp_migrate_speed: the dict of expected migration speed :raise: test.fail if speed does not match """ if exp_migrate_speed.get("precopy_speed"): output = virsh.migrate_getspeed(vm_name, **virsh_args).stdout_text.strip() if exp_migrate_speed['precopy_speed'] != output: virsh.migrate_getspeed(vm_name, extra="--postcopy", **virsh_args) test.fail("Migration speed is expected to be '%s MiB/s', but " "'%s MiB/s' found!" % (exp_migrate_speed['precopy_speed'], output)) if exp_migrate_speed.get("postcopy_speed"): output = virsh.migrate_getspeed(vm_name, extra="--postcopy", **virsh_args).stdout_text.strip() if exp_migrate_speed['postcopy_speed'] != output: test.fail("Prostcopy migration speed is expected to be '%s " "MiB/s', but '%s MiB/s' found!" % (exp_migrate_speed['postcopy_speed'], output)) def check_bandwidth(params): """ Check migration bandwidth :param params: the parameters used :raise: test.fail if migration bandwidth does not match expected values """ exp_migrate_speed = eval(params.get('exp_migrate_speed', '{}')) migrate_postcopy_cmd = "yes" == params.get("migrate_postcopy_cmd", "yes") if extra.count("bandwidth"): get_speed(exp_migrate_speed) if params.get("set_postcopy_in_precopy_phase"): virsh.migrate_setspeed(vm_name, params.get("set_postcopy_in_precopy_phase"), "--postcopy", **virsh_args) get_speed(exp_migrate_speed) params.update({ 'compare_to_value': exp_migrate_speed.get("precopy_speed", "8796093022207") }) if exp_migrate_speed.get("precopy_speed", "0") == "8796093022207": params.update({'domjob_ignore_status': True}) libvirt_domjobinfo.check_domjobinfo(vm, params) if migrate_postcopy_cmd: if not utils_misc.wait_for( lambda: not virsh.migrate_postcopy(vm_name, **virsh_args). exit_status, 5): test.fail("Failed to set migration postcopy.") if params.get("set_postcopy_in_postcopy_phase"): virsh.migrate_setspeed( vm_name, params.get("set_postcopy_in_postcopy_phase"), "--postcopy", **virsh_args) get_speed(exp_migrate_speed) time.sleep(5) if exp_migrate_speed.get("postcopy_speed"): params.update( {'compare_to_value': exp_migrate_speed["postcopy_speed"]}) params.update({'domjob_ignore_status': False}) libvirt_domjobinfo.check_domjobinfo(vm, params) migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options", "--live --verbose") jobinfo_item = params.get("jobinfo_item", 'Memory bandwidth:') set_postcopy_speed_before_mig = params.get("set_postcopy_speed_before_mig") set_precopy_speed_before_mig = params.get("set_precopy_speed_before_mig") set_precopy_speed_before_vm_start = params.get( "set_precopy_speed_before_vm_start") stress_package = params.get("stress_package") exp_migrate_speed = eval(params.get('exp_migrate_speed', '{}')) log_file = params.get("log_outputs", "/var/log/libvirt/libvirt_daemons.log") check_str_local_log = params.get("check_str_local_log", "") libvirtd_conf_dict = eval(params.get("libvirtd_conf_dict", '{}')) action_during_mig = check_bandwidth params.update({"action_during_mig_params_exists": "yes"}) extra_args = migration_test.update_virsh_migrate_extra_args(params) libvirtd_conf = None mig_result = None remove_dict = {} src_libvirt_file = None if not libvirt_version.version_compare(6, 0, 0): test.cancel("This libvirt version doesn't support " "postcopy migration bandwidth function.") # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Change the disk of the vm libvirt.set_vm_disk(vm, params) # Update libvirtd configuration if libvirtd_conf_dict: if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) logging.debug("Update libvirtd configuration file") conf_type = "libvirtd" if utils_split_daemons.is_modular_daemon(): conf_type = "virtqemud" libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, conf_type, ) if set_precopy_speed_before_vm_start: if vm.is_alive(): vm.destroy() virsh.migrate_setspeed(vm_name, set_precopy_speed_before_vm_start, **virsh_args) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm.wait_for_login(restart_network=True).close() migration_test.ping_vm(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) if any([set_precopy_speed_before_mig, set_postcopy_speed_before_mig]): if set_precopy_speed_before_mig: virsh.migrate_setspeed(vm_name, set_precopy_speed_before_mig, **virsh_args) if set_postcopy_speed_before_mig: virsh.migrate_setspeed(vm_name, set_postcopy_speed_before_mig, "--postcopy", **virsh_args) get_speed(exp_migrate_speed) if stress_package: migration_test.run_stress_in_vm(vm, params) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=action_during_mig, extra_opts=extra, **extra_args) if int(migration_test.ret.exit_status) == 0: migration_test.ping_vm(vm, params, uri=dest_uri) if check_str_local_log: libvirt.check_logfile(check_str_local_log, log_file) finally: logging.debug("Recover test environment") # Clean VM on destination and source migration_test.cleanup_vm(vm, dest_uri) logging.info("Recover VM XML configuration") orig_config_xml.sync() if libvirtd_conf: logging.debug("Recover the configurations") libvirt.customize_libvirt_config(None, is_recover=True, config_object=libvirtd_conf) if src_libvirt_file: src_libvirt_file.restore() logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file)
def run(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroyed. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") net_persistent = "yes" == params.get("net_persistent", "yes") net_cfg_file = params.get("net_cfg_file", "/usr/share/libvirt/networks/default.xml") check_libvirtd = "yes" == params.get("check_libvirtd") vm_defined = "yes" == params.get("vm_defined") check_vm = "yes" == params.get("check_vm") # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' output_all = virsh.net_list("--all").stdout.strip() # prepare the network status: active, persistent if not re.search(network_name, output_all): if net_persistent: virsh.net_define(net_cfg_file, ignore_status=False) virsh.net_start(network_name, ignore_status=False) else: virsh.create(net_cfg_file, ignore_status=False) # Backup the current network xml net_xml_bk = os.path.join(data_dir.get_tmp_dir(), "%s.xml" % network_name) virsh.net_dumpxml(network_name, to_file=net_xml_bk) if net_persistent: if not virsh.net_state_dict()[network_name]['persistent']: logging.debug("make the network persistent...") virsh.net_define(net_xml_bk) else: if virsh.net_state_dict()[network_name]['persistent']: virsh.net_undefine(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: if network_status == "active": virsh.net_start(network_name, ignore_status=False) else: if network_status == "inactive": logging.debug( "destroy network as we need to test inactive network...") virsh.net_destroy(network_name, ignore_status=False) logging.debug("After prepare: %s" % virsh.net_state_dict()) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name if check_libvirtd or check_vm: vm_name = params.get("main_vm") if virsh.is_alive(vm_name): virsh.destroy(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml # make sure there is interface with source network as default iface_devices = vmxml.get_devices(device_type="interface") has_default_net = False for iface in iface_devices: source = iface.get_source() if 'network' in source.keys() and source['network'] == 'default': has_default_net = True break elif 'bridge' in source.keys() and source['bridge'] == 'virbr0': has_default_net = True break if not has_default_net: options = "network default --current" virsh.attach_interface(vm_name, options, ignore_status=False) try: if vm_defined: ret = virsh.start(vm_name) else: logging.debug("undefine the vm, then create the vm...") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh.undefine(vm_name) ret = virsh.create(vmxml.xml) logging.debug(ret.stdout) # check the create or start cmd status utils_test.libvirt.check_exit_status( ret, expect_error=(network_status != 'active')) status = 1 if status_error != 'yes': libvirtd = utils_libvirtd.Libvirtd("virtqemud") daemon_name = libvirtd.service_name pid_before_run = utils_misc.get_pid(daemon_name) ret = virsh.net_destroy(net_ref, extra, uri=uri, debug=True, unprivileged_user=unprivileged_user, ignore_status=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) # check_libvirtd pid no change pid_after_run = utils_misc.get_pid(daemon_name) if pid_after_run != pid_before_run: test.fail("libvirtd crash after destroy network!") status = 1 else: logging.debug( "libvirtd do not crash after destroy network!") status = 0 if check_libvirtd: # destroy vm, check libvirtd pid no change ret = virsh.destroy(vm_name) utils_test.libvirt.check_exit_status(ret, expect_error=False) pid_after_run2 = utils_misc.get_pid(daemon_name) if pid_after_run2 != pid_before_run: test.fail("libvirtd crash after destroy vm!") status = 1 else: logging.debug( "libvirtd do not crash after destroy vm!") status = 0 elif check_vm: # restart libvirtd and check vm is running libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if not virsh.is_alive(vm_name): test.fail( "vm shutdown when transient network destroyed then libvirtd restart" ) else: status = 0 finally: if not vm_defined: vmxml_backup.define() vmxml_backup.sync() else: readonly = (params.get("net_destroy_readonly", "no") == "yes") status = virsh.net_destroy(net_ref, extra, uri=uri, readonly=readonly, debug=True, unprivileged_user=unprivileged_user, ignore_status=True).exit_status # Confirm the network has been destroyed. if net_persistent: if virsh.net_state_dict()[network_name]['active']: status = 1 else: output_all = virsh.net_list("--all").stdout.strip() if re.search(network_name, output_all): status = 1 logging.debug( "transient network should not exists after destroy") # Recover network status to system default status try: if network_name not in virsh.net_state_dict(): virsh.net_define(net_xml_bk, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: virsh.net_start(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['persistent']: virsh.net_define(net_xml_bk, ignore_status=False) if not virsh.net_state_dict()[network_name]['autostart']: virsh.net_autostart(network_name, ignore_status=False) except process.CmdError: test.error("Recover network status failed!") # Clean up the backup network xml file if os.path.isfile(net_xml_bk): data_dir.clean_tmp_files() logging.debug("Cleaning up the network backup xml") # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command") else: test.error("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test command: virsh find-storage-pool-sources 1. Prepare env to provide source storage if use localhost: 1). For 'netfs' source type, setup nfs server 2). For 'iscsi' source type, setup iscsi server 3). For 'logical' type pool, setup iscsi storage to create vg 4). Prepare srcSpec xml file if not given 2. Find the pool sources by running virsh cmd """ source_type = params.get("source_type", "") source_host = params.get("source_host", "127.0.0.1") source_initiator = params.get("source_initiator", "") srcSpec = params.get("source_Spec", "") vg_name = params.get("vg_name", "virttest_vg_0") ro_flag = "yes" == params.get("readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise exceptions.TestSkipError("API acl test not supported in " "current libvirt version.") if not libvirt_version.version_compare(4, 7, 0): if source_type == "iscsi-direct": test.cancel("iscsi-drect pool is not supported in current" "libvirt version") if not source_type: raise exceptions.TestFail("Command requires <type> value") cleanup_nfs = False cleanup_iscsi = False cleanup_logical = False # Prepare source storage if source_host == "127.0.0.1": if source_type == "netfs": # Set up nfs res = utils_test.libvirt.setup_or_cleanup_nfs(True) selinux_bak = res["selinux_status_bak"] cleanup_nfs = True if source_type in ["iscsi", "logical", "iscsi-direct"]: # Set up iscsi iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True) # If we got nothing, force failure if not iscsi_device: raise exceptions.TestFail("Did not setup an iscsi device") cleanup_iscsi = True if source_type == "logical": # Create vg by using iscsi device try: lv_utils.vg_create(vg_name, iscsi_device) except Exception as detail: utils_test.libvirt.setup_or_cleanup_iscsi(False) raise exceptions.TestFail("vg_create failed: %s" % detail) cleanup_logical = True # Prepare srcSpec xml if srcSpec: if srcSpec == "INVALID.XML": src_xml = "<invalid><host name='#@!'/><?source>" elif srcSpec == "VALID.XML": if source_type == "iscsi-direct": src_xml = "<source><host name='%s'/><initiator><iqn name='%s'/></initiator></source>" % ( source_host, source_initiator) else: src_xml = "<source><host name='%s'/></source>" % source_host srcSpec = xml_utils.TempXMLFile().name with open(srcSpec, "w+") as srcSpec_file: srcSpec_file.write(src_xml) logging.debug("srcSpec file content:\n%s", srcSpec_file.read()) if params.get('setup_libvirt_polkit') == 'yes' and srcSpec: cmd = "chmod 666 %s" % srcSpec process.run(cmd) if ro_flag: logging.debug("Readonly mode test") # Run virsh cmd try: cmd_result = virsh.find_storage_pool_sources( source_type, srcSpec, ignore_status=True, debug=True, unprivileged_user=unprivileged_user, uri=uri, readonly=ro_flag) utils_test.libvirt.check_exit_status(cmd_result, status_error) finally: # Clean up if cleanup_logical: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.run(cmd, shell=True).stdout_text lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name) if cleanup_iscsi: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_nfs: utils_test.libvirt.setup_or_cleanup_nfs( False, restore_selinux=selinux_bak)
def run(test, params, env): """ Test the virsh pool commands with acl, initiate a pool then do following operations. (1) Undefine a given type pool (2) Define the pool from xml (3) Build given type pool (4) Start pool (5) Destroy pool (6) Refresh pool after start it (7) Run vol-list with the pool (9) Delete pool For negative cases, redo failed step to make the case run continue. Run cleanup at last restore env. """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name vol_path = os.path.join(pool_target, vol_name) define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") start_acl = "yes" == params.get("start_acl", "no") destroy_acl = "yes" == params.get("destroy_acl", "no") build_acl = "yes" == params.get("build_acl", "no") delete_acl = "yes" == params.get("delete_acl", "no") refresh_acl = "yes" == params.get("refresh_acl", "no") vol_list_acl = "yes" == params.get("vol_list_acl", "no") list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no") src_pool_error = "yes" == params.get("src_pool_error", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") start_error = "yes" == params.get("start_error", "no") destroy_error = "yes" == params.get("destroy_error", "no") build_error = "yes" == params.get("build_error", "no") delete_error = "yes" == params.get("delete_error", "no") refresh_error = "yes" == params.get("refresh_error", "no") vol_list_error = "yes" == params.get("vol_list_error", "no") # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster cleanup_env = [False, False, False, "", False] # libvirt acl related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True} def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable if list_dumpxml_acl: result = virsh.pool_list(option, **acl_dargs) else: result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: test.fail("Unexpected pool '%s' exist." % pool_name) if not expect_error and not found: test.fail("Expect pool '%s' doesn't exist." % pool_name) # Run Testcase kwargs = {'source_format': params.get('pool_source_format', 'ext4')} try: _pool = libvirt_storage.StoragePool() # Init a pool for test result = utlv.define_pool(pool_name, pool_type, pool_target, cleanup_env, **kwargs) utlv.check_exit_status(result, src_pool_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if list_dumpxml_acl: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs) else: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (1) # Undefine pool if undefine_acl: result = virsh.pool_undefine(pool_name, **acl_dargs) else: result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, undefine_error) if undefine_error: check_pool_list(pool_name, "--all", False) # Redo under negative case to keep case continue result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) else: check_pool_list(pool_name, "--all", True) # Step (2) # Define pool from XML file if define_acl: result = virsh.pool_define(pool_xml, **acl_dargs) else: result = virsh.pool_define(pool_xml) utlv.check_exit_status(result, define_error) if define_error: # Redo under negative case to keep case continue result = virsh.pool_define(pool_xml) utlv.check_exit_status(result) # Step (3) # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if pool_type != "logical": option = '' if pool_type in ['disk', 'fs']: option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) if build_acl: result = virsh.pool_build(pool_name, option, **acl_dargs) else: result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result, build_error) if build_error: # Redo under negative case to keep case continue result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) # For iSCSI pool, we need discover targets before start the pool if pool_type == 'iscsi': cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1' process.run(cmd, shell=True) # Step (4) # Pool start if start_acl: result = virsh.pool_start(pool_name, **acl_dargs) else: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, start_error) if start_error: # Redo under negative case to keep case continue result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (5) # Pool destroy if destroy_acl: result = virsh.pool_destroy(pool_name, **acl_dargs) else: result = virsh.pool_destroy(pool_name) if result: if destroy_error: test.fail("Expect fail, but run successfully.") else: if not destroy_error: test.fail("Pool %s destroy failed, not expected." % pool_name) else: # Redo under negative case to keep case continue if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (6) # Pool refresh for 'dir' type pool # Pool start result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) if pool_type == "dir": os.mknod(vol_path) if refresh_acl: result = virsh.pool_refresh(pool_name, **acl_dargs) else: result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result, refresh_error) # Step (7) # Pool vol-list if vol_list_acl: result = virsh.vol_list(pool_name, **acl_dargs) else: result = virsh.vol_list(pool_name) utlv.check_exit_status(result, vol_list_error) # Step (8) # Pool delete for 'dir' type pool if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) if delete_acl: result = virsh.pool_delete(pool_name, **acl_dargs) else: result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result, delete_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if not delete_error: if os.path.exists(pool_target): test.fail("The target path '%s' still exist." % pool_target) result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.run(cmd, shell=True).stdout_text lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name, shell=True) if cleanup_env[1]: utlv.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utlv.setup_or_cleanup_nfs( False, restore_selinux=cleanup_env[3])
def run(test, params, env): """ Test command: virsh nwfilter-undefine. 1) Prepare parameters. 2) Run nwfilter-undefine command. 3) Check result. 4) Clean env """ # Prepare parameters filter_ref = params.get("undefine_filter_ref", "") options_ref = params.get("undefine_options_ref", "") status_error = params.get("status_error", "no") # libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') try: if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Backup filter xml if filter_ref: new_filter = libvirt_xml.NwfilterXML() filterxml = new_filter.new_from_filter_dumpxml(filter_ref) logging.debug("the filter xml is: %s" % filterxml.xmltreefile) filter_xml = filterxml.xmltreefile.name # Run command cmd_result = virsh.nwfilter_undefine( filter_ref, options=options_ref, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status = cmd_result.exit_status # Check result if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command.") elif status_error == "no": if status: test.fail("Run failed with right command.") chk_result = check_list(filter_ref) if chk_result: test.fail("filter %s show up in filter list." % filter_ref) finally: # Clean env if status == 0: virsh.nwfilter_define(filter_xml, options="", ignore_status=True, debug=True)
def run(test, params, env): """ Test attach device with ccw address option. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare test xml for different devices. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Disk specific attributes. image_path = params.get("virt_disk_device_source", "/var/lib/libvirt/images/test.img") backend_device = params.get("backend_device", "disk") logging.debug("eval devei backed:%s", backend_device) hotplug = "yes" == params.get("virt_device_hotplug") status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error", "no") expected_fails_msg = [] error_msg = params.get("error_msg", "cannot use CCW address type for device") expected_fails_msg.append(error_msg) device_obj = None ori_pid_libvirtd = None # Back up xml file. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if backend_device == "disk": device_obj = create_ccw_addr_disk(params) elif backend_device == "rng": device_obj = create_ccw_addr_rng(params) elif backend_device == "controller": device_obj = create_ccw_addr_controller(params) elif backend_device == "same_pci_slot": device_obj = create_same_pci_slot_disk(params) # Check libvirtd should not crash during the process if not utils_split_daemons.is_modular_daemon(): ori_pid_libvirtd = process.getoutput("pidof libvirtd") if not hotplug: # Sync VM xml. vmxml.add_device(device_obj) vmxml.sync() vm.start() vm.wait_for_login().close() if status_error: if hotplug: logging.info("attaching devices, expecting error...") result = virsh.attach_device(vm_name, device_obj.xml, debug=True) libvirt.check_result(result, expected_fails=expected_fails_msg) else: test.fail("VM started unexpectedly.") except virt_vm.VMStartError as e: if status_error: if hotplug: test.fail( "In hotplug scenario, VM should " "start successfully but not." "Error: %s", str(e)) else: logging.debug("VM failed to start as expected." "Error: %s", str(e)) else: test.fail("VM failed to start." "Error: %s" % str(e)) check_libvirtd_process_id(ori_pid_libvirtd, test) except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % xml_error) else: logging.info("As expected, failed to define VM") check_libvirtd_process_id(ori_pid_libvirtd, test) except Exception as ex: test.fail("unexpected exception happen: %s" % str(ex)) check_libvirtd_process_id(ori_pid_libvirtd, test) else: if backend_device == "same_pci_slot": check_multifunction_is_on(vm_name, test) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() # Clean up images for file_path in [image_path]: if os.path.exists(file_path): os.remove(file_path)
def run(test, params, env): """ Test migration with memory related configuration :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") remote_ip = params.get("remote_ip") remote_user = params.get("remote_user") remote_pwd = params.get("remote_pwd") local_ip = params.get("local_ip") local_pwd = params.get("local_pwd") ballooned_mem = params.get("ballooned_mem") check = params.get("check") remove_dict = {} src_libvirt_file = None remote_virsh_dargs = { 'remote_ip': remote_ip, 'remote_user': remote_user, 'remote_pwd': remote_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") options = params.get("virsh_migrate_options", "--live --verbose") func_params_exists = "yes" == params.get("func_params_exists", "yes") log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd_daemons.log") check_str_local_log = params.get("check_str_local_log", "") libvirtd_conf_dict = eval(params.get("libvirtd_conf_dict", '{}')) func_name = None libvirtd_conf = None mig_result = None # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() extra_args = {} if func_params_exists: extra_args.update({'func_params': params}) # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: if check == "mem_balloon": # Update memory balloon device to correct model membal_dict = { 'membal_model': 'virtio', 'membal_stats_period': '10' } libvirt.update_memballoon_xml(new_xml, membal_dict) if check == "mem_device": libvirt_cpu.add_cpu_settings(new_xml, params) dimm_params = { k.replace('memdev_', ''): v for k, v in params.items() if k.startswith('memdev_') } dimm_xml = utils_hotplug.create_mem_xml(**dimm_params) libvirt.add_vm_device(new_xml, dimm_xml) logging.debug(virsh.dumpxml(vm_name)) # Change the disk of the vm libvirt.set_vm_disk(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) # Update libvirtd configuration if libvirtd_conf_dict: if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) logging.debug("Update libvirtd configuration file") conf_type = params.get("conf_type", "libvirtd") if conf_type == "libvirtd" and utils_split_daemons.is_modular_daemon( ): conf_type = "virtqemud" libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, conf_type) try: if not vm.is_alive(): vm.start() except virt_vm.VMStartError as e: logging.info("Failed to start VM") test.fail("Failed to start VM: %s" % vm_name) logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm.wait_for_login(restart_network=True).close() migration_test.ping_vm(vm, params) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=func_name, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if int(mig_result.exit_status) == 0: migration_test.ping_vm(vm, params, uri=dest_uri) if check_str_local_log: libvirt.check_logfile(check_str_local_log, log_file) if check == "mem_balloon": remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) remote_virsh_session.setmem(vm_name, ballooned_mem, None, None, False, "", **virsh_args) def check_mem_balloon(): """Check if memory balloon worked""" memstat_ouput = remote_virsh_session.dommemstat( vm_name, "", **virsh_args) memstat_after = memstat_ouput.stdout_text mem_after = memstat_after.splitlines()[0].split()[1] if mem_after != ballooned_mem: logging.debug("Current memory size is: %s" % mem_after) return False return True check_ret = utils_misc.wait_for(check_mem_balloon, timeout=20) if not check_ret: test.fail("Memory is not ballooned to the expected size: %s" % ballooned_mem) remote_virsh_session.close_session() # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=remote_ip, username=remote_user, password=remote_pwd) if check == "mem_device": qemu_checks = params.get('qemu_checks', '').split('`') logging.debug("qemu_checks:%s" % qemu_checks[0]) for qemu_check in qemu_checks: libvirt.check_qemu_cmd_line(qemu_check, False, params, runner_on_target) if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=remote_ip, server_pwd=remote_pwd, client_ip=local_ip, client_pwd=local_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine src_full_uri = libvirt_vm.complete_uri( params.get("migrate_source_host")) migration_test.migrate_pre_setup(src_full_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_full_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination and source try: migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri) if vm.is_alive(): vm.destroy(gracefully=False) except Exception as err: logging.error(err) logging.info("Recovery VM XML configration") orig_config_xml.sync() if libvirtd_conf: logging.debug("Recover the configurations") libvirt.customize_libvirt_config(None, is_recover=True, config_object=libvirtd_conf) if src_libvirt_file: src_libvirt_file.restore()
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") net_persistent = "yes" == params.get("net_persistent") net_active = "yes" == params.get("net_active") expect_msg = params.get("net_define_undefine_err_msg") # define multi ip/dhcp sections in network multi_ip = "yes" == params.get("multi_ip", "no") netmask = params.get("netmask") prefix_v6 = params.get("prefix_v6") single_v6_range = "yes" == params.get("single_v6_range", "no") # Get 2nd ipv4 dhcp range dhcp_ranges_start = params.get("dhcp_ranges_start", None) dhcp_ranges_end = params.get("dhcp_ranges_end", None) # Get 2 groups of ipv6 ip address and dhcp section address_v6_1 = params.get("address_v6_1") dhcp_ranges_v6_start_1 = params.get("dhcp_ranges_v6_start_1", None) dhcp_ranges_v6_end_1 = params.get("dhcp_ranges_v6_end_1", None) address_v6_2 = params.get("address_v6_2") dhcp_ranges_v6_start_2 = params.get("dhcp_ranges_v6_start_2", None) dhcp_ranges_v6_end_2 = params.get("dhcp_ranges_v6_end_2", None) # Edit net xml forward/ip part then define/start to check invalid setting edit_xml = "yes" == params.get("edit_xml", "no") address_v4 = params.get("address_v4") nat_port_start = params.get("nat_port_start") nat_port_end = params.get("nat_port_end") test_port = "yes" == params.get("test_port", "no") loop = int(params.get("loop", 1)) # Get params about creating a bridge bridge = params.get('bridge', None) create_bridge = "yes" == params.get('create_bridge', 'no') ovs_bridge = "yes" == params.get('ovs_bridge', 'no') iface_name = utils_net.get_net_if(state="UP")[0] # Get params about creating a network create_netxml = "yes" == params.get("create_netxml", "no") domain = params.get('domain', None) forward = params.get("forward", None) net_dns_txt = params.get("net_dns_txt", None) net_bandwidth_inbound = params.get("net_bandwidth_inbound", None) net_bandwidth_outbound = params.get("net_bandwidth_outbound", None) mac = params.get("mac") # Edit the created network xml to get the xml to be tested del_mac = "yes" == params.get('del_mac', 'no') del_ip = "yes" == params.get('del_ip', 'no') add_dev = "yes" == params.get('add_dev', 'no') virtualport = 'yes' == params.get("virtualport", "no") virtualport_type = params.get("virtualport_type") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") if virsh_uri and not utils_split_daemons.is_modular_daemon(): virsh_uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): test.cancel("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) logging.debug("Get network xml as testnet_xml: %s" % testnet_xml) if remove_existing: for netxml in list(backup.values()): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } cmd = "chmod 666 %s" % testnet_xml.xml process.run(cmd, shell=True) if params.get('net_define_undefine_readonly', 'no') == 'yes': virsh_dargs = { 'uri': uri, 'debug': False, 'ignore_status': True, 'readonly': True } try: if edit_xml: ipxml_v4 = network_xml.IPXML() ipxml_v4.address = address_v4 ipxml_v4.netmask = netmask range_4 = network_xml.RangeXML() range_4.attrs = { "start": dhcp_ranges_start, "end": dhcp_ranges_end } ipxml_v4.dhcp_ranges = range_4 testnet_xml.del_ip() testnet_xml.set_ip(ipxml_v4) if test_port: nat_port = {"start": nat_port_start, "end": nat_port_end} testnet_xml.nat_port = nat_port testnet_xml.debug_xml() if multi_ip: # Enabling IPv6 forwarding with RA routes without accept_ra set to 2 # is likely to cause routes loss sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra' original_accept_ra = process.run(sysctl_cmd + ' -n').stdout_text if original_accept_ra != '2': process.system(sysctl_cmd + '=2') # add another ipv4 address and dhcp range set_ip_section(testnet_xml, address_v4, ipv6=False, netmask=netmask, dhcp_ranges_start=dhcp_ranges_start, dhcp_ranges_end=dhcp_ranges_end) # add ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_1, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_1, dhcp_ranges_end=dhcp_ranges_v6_end_1) # 2nd ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_2, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_2, dhcp_ranges_end=dhcp_ranges_v6_end_2) if create_netxml: net_dict = { 'del_nat_attrs': True, 'del_ip': del_ip, 'dns_txt': net_dns_txt, 'domain': domain, 'bridge': bridge, 'forward': forward, 'interface_dev': iface_name, 'virtualport': virtualport, 'virtualport_type': virtualport_type, 'mac': mac, 'net_bandwidth_inbound': net_bandwidth_inbound, 'net_bandwidth_outbound': net_bandwidth_outbound } logging.debug("net_dict is %s" % net_dict) testnet_xml = libvirt_network.modify_network_xml( net_dict, testnet_xml) testnet_xml.debug_xml() if create_bridge: if ovs_bridge: utils_net.create_ovs_bridge(bridge, ignore_status=False) else: utils_net.create_linux_bridge_tmux(bridge, iface_name, ignore_status=False) # Run test case while loop: try: define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states after define if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined network: %s" % str(net_state)) if define_status == 1 and status_error and expect_msg: logging.debug("check result is %s, expect_msg is %s" % (define_result, expect_msg)) libvirt.check_result(define_result, expect_msg.split(';')) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append( "Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append( "Found wrong network states for " "defined network: %s" % str(net_state)) # Check network states after start if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started network: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append( "Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append( "Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) logging.debug("undefine network:") # prepare the network status if not net_persistent: virsh.net_undefine(net_name, ignore_status=False) if not net_active: virsh.net_destroy(net_name, ignore_status=False) undefine_status = virsh.net_undefine( undefine_options, undefine_extra, **virsh_dargs).exit_status net_state = virsh_instance.net_state_dict() if net_persistent: if undefine_status: fail_flag = 1 result_info.append( "undefine should succeed but failed") if net_active: if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append( "Found wrong network states for " "undefined network: %s" % str(net_state)) else: if net_name in net_state: fail_flag = 1 result_info.append( "Transient network should not exists " "after undefine : %s" % str(net_state)) else: if not undefine_status: fail_flag = 1 result_info.append( "undefine transient network should fail " "but succeed: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine( undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status except Exception: logging.debug( "The define and undefine operation in loop %s failed. ", loop) finally: loop = loop - 1 finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in list(leftovers.values()): netxml.orbital_nuclear_strike() # Recover from backup for netxml in list(backup.values()): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml if create_bridge: if ovs_bridge: utils_net.delete_ovs_bridge(bridge, ignore_status=False) else: utils_net.delete_linux_bridge_tmux(bridge, iface_name, ignore_status=False) # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: test.fail("Define network for transaction test " "failed:%s" % result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: test.fail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: test.fail("Define an unexpected network, " "and start it successfully.") else: test.fail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: test.fail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: test.fail("Run failed with right command") else: if start_status != 0: test.fail("Network is defined as expected, " "but start it failed.")
def run(test, params, env): """ Test command: virsh vol-resize Resize the capacity of the given volume (default bytes). 1. Define and start a given type pool. 2. Create a volume in the pool. 3. Do vol-resize. 4. Check the volume info. 5. Delete the volume and pool. TODO: Add volume shrink test after libvirt upstream support it. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") vol_format = params.get("vol_format") vol_capacity = params.get("vol_capacity") vol_new_capacity = params.get("vol_new_capacity") resize_option = params.get("resize_option", "") check_vol_size = "yes" == params.get("check_vol_size", "yes") status_error = "yes" == params.get("status_error", "no") b_luks_encrypt = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] with_clusterSize = "yes" == params.get("with_clusterSize") libvirt_version.is_libvirt_feature_supported(params) if not libvirt_version.version_compare(1, 0, 0): if "--allocate" in resize_option: test.cancel("'--allocate' flag is not supported in" " current libvirt version.") # libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") libv_pvt = libvirt.PoolVolumeTest(test, params) try: libv_pool = libvirt_storage.StoragePool() # Raise error if given name pool already exist if libv_pool.pool_exists(pool_name): test.error("Pool '%s' already exist", pool_name) else: # Create a new pool libv_pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size=emulated_image_size) pool_info = libv_pool.pool_info(pool_name) for key in pool_info: logging.debug("Pool info: %s = %s", key, pool_info[key]) # Deal with vol_new_capacity, '--capacity' only accept integer if vol_new_capacity == "pool_available": pool_avai = pool_info["Available"].split() vol_new_capacity = pool_avai[0].split('.')[0] + pool_avai[1] if vol_new_capacity == "pool_capacity": pool_capa = pool_info["Capacity"].split() vol_new_capacity = pool_capa[0].split('.')[0] + pool_capa[1] # Create a volume if b_luks_encrypt: luks_sec_uuid = create_luks_secret( os.path.join(pool_target, vol_name), test) secret_uuids.append(luks_sec_uuid) set_secret_value(encryption_password, luks_sec_uuid) create_luks_vol(vol_name, luks_sec_uuid, params, test) else: libv_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format, capacity=vol_capacity, allocation=None, pool_name=pool_name) libv_vol = libvirt_storage.PoolVolume(pool_name) check_vol_info(libv_vol, vol_name, test) # The volume size may not accurate as we expect after resize, such as: # 1) vol_new_capacity = 1b with --delta option, the volume size will not # change; run # 2) vol_new_capacity = 1KB with --delta option, the volume size will # increase 1024 not 1000 # So we can disable volume size check after resize if check_vol_size: vol_path = libv_vol.list_volumes()[vol_name] expect_info = get_expect_info(vol_new_capacity, vol_path, test, resize_option) logging.debug("Expect volume info: %s", expect_info) else: expect_info = {} # Run vol-resize result = virsh.vol_resize(vol_name, vol_new_capacity, pool_name, resize_option, uri=uri, unprivileged_user=unpri_user, debug=True) if not status_error: if result.exit_status != 0: test.fail(result.stdout.strip() + result.stderr.strip()) else: if check_vol_info(libv_vol, vol_name, test, expect_info): logging.debug("Volume %s resize check pass.", vol_name) else: test.fail("Volume %s resize check fail." % vol_name) elif result.exit_status == 0: test.fail("Expect resize fail but run successfully.") finally: # Clean up try: libv_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) except exceptions.TestFail as detail: logging.error(str(detail))
def run(test, params, env): """ Test command: nodedev-list [--tree] [--cap <string>] 1) Run nodedev-list command and check return code. 2) If `cap_option == one`, results are also compared with devices get from sysfs. """ def _check_result(cap, ref_list, result, mode): """ Check test result against a device list retrieved from sysfs. :param cap: Capability being checked, current available caps are defined in variable `caps`. :param ref_list: Reference device list retrieved from sysfs. :param result: Stdout returned from virsh nodedev-list command. :param mode: How to compare sysfs info with command output: "exact" or "similar". """ check_list = result.strip().splitlines() are_not_equivalent = True if mode == "similar": listed = [x for x in ref_list if x in result] all_sysfs_info_listed = len(ref_list) == len(listed) same_number_of_devices = len(ref_list) == len(check_list) are_not_equivalent = (not all_sysfs_info_listed or not same_number_of_devices) elif mode == "exact": are_not_equivalent = set(ref_list) != set(check_list) else: logging.error("Unknown comparison mode in result check: %s", mode) return False uavail_caps = ['system', 'vports', 'fc_host'] if are_not_equivalent and cap not in uavail_caps: logging.error('Difference in capability %s:', cap) logging.error('Expected devices: %s', ref_list) logging.error('Result devices : %s', check_list) return False return True mode = params.get("comparison_mode", "exact") all_caps = [ 'system', 'pci', 'usb_device', 'usb', 'net', 'scsi_host', 'scsi_target', 'scsi', 'storage', 'fc_host', 'vports', 'scsi_generic', 'ccw', 'css' ] expect_succeed = params.get('expect_succeed', 'yes') tree_option = params.get('tree_option', 'off') cap_option = params.get('cap_option', 'off') caps = get_avail_caps(all_caps) check_failed = False # acl polkit params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_dargs = {} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri tree = (tree_option == 'on') if cap_option == 'one': devices = {} for cap in caps: devices[cap] = get_devices_by_cap(cap) for cap in devices: logging.debug(cap + ':') for device in devices[cap]: logging.debug(' ' + device) for cap in caps: result = virsh.nodedev_list(tree=tree, cap=cap, **virsh_dargs) if result.exit_status != 0 and expect_succeed == 'yes': break elif result.exit_status == 0 and expect_succeed == 'no': break if not _check_result(cap, devices[cap], result.stdout.strip(), mode): check_failed = True break else: cap = '' if cap_option != 'off': if cap_option == 'multi': cap = ','.join(caps) elif cap_option == 'long': cap = ','.join(['pci', 'usb', 'net', 'storage', 'scsi'] * 5000) else: cap = cap_option result = virsh.nodedev_list(tree=tree, cap=cap, **virsh_dargs) logging.debug(result) if expect_succeed == 'yes': if result.exit_status != 0: test.fail('Expected succeed, but failed with result:\n%s' % result) elif expect_succeed == 'no': if result.exit_status == 0: test.fail('Expected fail, but succeed with result:\n%s' % result) if check_failed: test.fail('Check failed. result:\n%s' % result)
def run(test, params, env): """ Kill libvirt daemon with different signals and check whether daemon restart properly and leaving no pid file if stopped. """ def get_pid(libvirtd): """ Get the pid of libvirt daemon process. """ pid = int(open(pid_file).read()) return pid def send_signal(pid, signal_name): """ Send signal to a process by pid. """ signal_num = getattr(signal, signal_name) os.kill(pid, signal_num) def start_mark(src_file, dest_file): """ Copy the src_file to a tmp file :param src_file: The file should be checked. :param dest_file: The temp file to mark the time point. """ # Clean the dest file if existed if os.path.exists(dest_file): os.remove(dest_file) cmdline = 'cp %s %s' % \ (src_file, dest_file) process.run(cmdline, shell=True) pid_file = '/var/run/libvirtd.pid' if utils_split_daemons.is_modular_daemon(): pid_file = '/var/run/virtqemud.pid' message_src_file = '/var/log/messages' message_dest_file = '/tmp/messages_tmp' signal_name = params.get("signal", "SIGTERM") should_restart = params.get("expect_restart", "yes") == "yes" timeout = int(params.get("restart_timeout", 1)) pid_should_change = params.get("expect_pid_change", "yes") == "yes" expect_coredump = params.get("expect_coredump", "no") == "yes" sysconfig = params.get("sysconfig", None) check_dmesg = params.get("check_dmesg", None) libvirtd = Libvirtd("virtqemud") try: libvirtd.start() if sysconfig: config = utils_config.LibvirtdSysConfig() setattr(config, sysconfig.split('=')[0], sysconfig.split('=')[1]) libvirtd.restart() if check_dmesg: start_mark(message_src_file, message_dest_file) pid = get_pid(libvirtd) logging.debug("Pid of libvirtd is %d" % pid) logging.debug("Killing process %s with %s" % (pid, signal_name)) send_signal(pid, signal_name) # Wait for libvirtd to restart or reload time.sleep(timeout) if libvirtd.is_running(): if not should_restart: test.fail("libvirtd should stop running after signal %s" % signal_name) new_pid = get_pid(libvirtd) logging.debug("New pid of libvirtd is %d" % new_pid) if pid == new_pid and pid_should_change: test.fail("Pid should have been changed.") if pid != new_pid and not pid_should_change: test.fail("Pid should not have been changed.") else: if should_restart: test.fail("libvirtd should still running after signal %s" % signal_name) if check_dmesg: cmdline = 'diff %s %s' % \ (message_src_file, message_dest_file) res = process.run(cmdline, shell=True, ignore_status=True).stdout_text if check_dmesg not in res: test.fail('%s should in %s , but not now' % (check_dmesg, message_src_file)) finally: # Clear coredump info if expect_coredump: cmd = 'journalctl --flush;' cmd += 'journalctl --rotate; journalctl --vacuum-size=1K; journalctl --vacuum-time=1s' process.run(cmd, ignore_status=True, shell=True) if not libvirtd.is_running(): if os.path.exists(pid_file): os.remove(pid_file) libvirtd.start() test.fail("Pid file should not reside") libvirtd.start() if sysconfig: config.restore() libvirtd.restart()