def run(test, params, env): """ Test the pull-mode backup function Steps: 1. craete a vm with extra disk vdb 2. create some data on vdb 3. start a pull mode full backup on vdb 4. create some data on vdb 5. start a pull mode incremental backup 6. repeat step 5 to 7 7. check the full/incremental backup file data """ # Basic case config hotplug_disk = "yes" == params.get("hotplug_disk", "no") original_disk_size = params.get("original_disk_size", "100M") original_disk_type = params.get("original_disk_type", "local") original_disk_target = params.get("original_disk_target", "vdb") local_hostname = params.get("loal_hostname", "localhost") local_ip = params.get("local_ip", "127.0.0.1") local_user_name = params.get("local_user_name", "root") local_user_password = params.get("local_user_password", "redhat") tmp_dir = data_dir.get_tmp_dir() # Backup config scratch_type = params.get("scratch_type", "file") reuse_scratch_file = "yes" == params.get("reuse_scratch_file") prepare_scratch_file = "yes" == params.get("prepare_scratch_file") scratch_blkdev_path = params.get("scratch_blkdev_path") scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size) prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev") backup_rounds = int(params.get("backup_rounds", 3)) backup_error = "yes" == params.get("backup_error") expect_backup_canceled = "yes" == params.get("expect_backup_canceled") # NBD service config nbd_protocol = params.get("nbd_protocol", "unix") nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket") nbd_tcp_port = params.get("nbd_tcp_port", "10809") nbd_hostname = local_hostname set_exportname = "yes" == params.get("set_exportname") set_exportbitmap = "yes" == params.get("set_exportbitmap") # TLS service config tls_enabled = "yes" == params.get("tls_enabled") tls_x509_verify = "yes" == params.get("tls_x509_verify") custom_pki_path = "yes" == params.get("custom_pki_path") tls_client_ip = tls_server_ip = local_ip tls_client_cn = tls_server_cn = local_hostname tls_client_user = tls_server_user = local_user_name tls_client_pwd = tls_server_pwd = local_user_password tls_provide_client_cert = "yes" == params.get("tls_provide_client_cert") tls_error = "yes" == params.get("tls_error") # LUKS config scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted") luks_passphrase = params.get("luks_passphrase", "password") # Cancel the test if libvirt support related functions if not libvirt_version.version_compare(6, 0, 0): test.cancel("Current libvirt version doesn't support " "incremental backup.") if tls_enabled and not libvirt_version.version_compare(6, 6, 0): test.cancel("Current libvirt version doesn't support pull mode " "backup with tls nbd.") try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Make sure there is no checkpoint metadata before test utils_backup.clean_checkpoints(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() utils_backup.enable_inc_backup_for_vm(vm) # Prepare tls env if tls_enabled: # Prepare pki tls_config = { "qemu_tls": "yes", "auto_recover": "yes", "client_ip": tls_client_ip, "server_ip": tls_server_ip, "client_cn": tls_client_cn, "server_cn": tls_server_cn, "client_user": tls_client_user, "server_user": tls_server_user, "client_pwd": tls_client_pwd, "server_pwd": tls_server_pwd, } if custom_pki_path: pki_path = os.path.join(tmp_dir, "inc_bkup_pki") else: pki_path = "/etc/pki/libvirt-backup/" if tls_x509_verify: tls_config["client_ip"] = tls_client_ip tls_config["custom_pki_path"] = pki_path tls_obj = TLSConnection(tls_config) tls_obj.conn_setup(True, tls_provide_client_cert) logging.debug("TLS certs in: %s" % pki_path) # Set qemu.conf qemu_config = LibvirtQemuConfig() if tls_x509_verify: qemu_config.backup_tls_x509_verify = True else: qemu_config.backup_tls_x509_verify = False if custom_pki_path: qemu_config.backup_tls_x509_cert_dir = pki_path utils_libvirtd.Libvirtd().restart() # Prepare libvirt secret if scratch_luks_encrypted: utils_secret.clean_up_secrets() luks_secret_uuid = libvirt.create_secret(params) virsh.secret_set_value(luks_secret_uuid, luks_passphrase, encode=True, debug=True) # Prepare the disk to be backuped. disk_params = {} disk_path = "" if original_disk_type == "local": image_name = "{}_image.qcow2".format(original_disk_target) disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, original_disk_size, "qcow2") disk_params = { "device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": original_disk_target, "source_file": disk_path } if original_disk_target: disk_params["target_dev"] = original_disk_target elif original_disk_type == "iscsi": iscsi_host = '127.0.0.1' iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=original_disk_size, portal_ip=iscsi_host) disk_path = ("iscsi://[%s]/%s/%s" % (iscsi_host, iscsi_target, lun_num)) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'iscsi', 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': iscsi_host, 'source_host_port': '3260' } disk_params.update(disk_params_src) elif original_disk_type == "gluster": gluster_vol_name = "gluster_vol" gluster_pool_name = "gluster_pool" gluster_img_name = "gluster.qcow2" gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) disk_path = 'gluster://%s/%s/%s' % ( gluster_host_ip, gluster_vol_name, gluster_img_name) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'gluster', 'source_name': gluster_vol_name + "/%s" % gluster_img_name, 'source_host_name': gluster_host_ip, 'source_host_port': '24007' } disk_params.update(disk_params_src) else: test.error("The disk type '%s' not supported in this script.", original_disk_type) if hotplug_disk: vm.start() session = vm.wait_for_login().close() disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm_name, disk_xml, debug=True) else: disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True) vm.start() session = vm.wait_for_login() new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys()) session.close() if len(new_disks_in_vm) != 1: test.fail("Test disk not prepared in vm") # Use the newly added disk as the test disk test_disk_in_vm = "/dev/" + new_disks_in_vm[0] vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_disks = list(vmxml.get_disk_all().keys()) checkpoint_list = [] is_incremental = False backup_file_list = [] for backup_index in range(backup_rounds): # Prepare backup xml backup_params = {"backup_mode": "pull"} if backup_index > 0: is_incremental = True backup_params["backup_incremental"] = "checkpoint_" + str( backup_index - 1) # Set libvirt default nbd export name and bitmap name nbd_export_name = original_disk_target nbd_bitmap_name = "backup-" + original_disk_target backup_server_dict = {} if nbd_protocol == "unix": backup_server_dict["transport"] = "unix" backup_server_dict["socket"] = nbd_socket else: backup_server_dict["name"] = nbd_hostname backup_server_dict["port"] = nbd_tcp_port if tls_enabled: backup_server_dict["tls"] = "yes" backup_params["backup_server"] = backup_server_dict backup_disk_xmls = [] for vm_disk in vm_disks: backup_disk_params = {"disk_name": vm_disk} if vm_disk != original_disk_target: backup_disk_params["enable_backup"] = "no" else: backup_disk_params["enable_backup"] = "yes" backup_disk_params["disk_type"] = scratch_type # Custom nbd export name and bitmap name if required if set_exportname: nbd_export_name = original_disk_target + "_custom_exp" backup_disk_params["exportname"] = nbd_export_name if set_exportbitmap: nbd_bitmap_name = original_disk_target + "_custom_bitmap" backup_disk_params["exportbitmap"] = nbd_bitmap_name # Prepare nbd scratch file/dev params scratch_params = {"attrs": {}} scratch_path = None if scratch_type == "file": scratch_file_name = "scratch_file_%s" % backup_index scratch_path = os.path.join(tmp_dir, scratch_file_name) if prepare_scratch_file: libvirt.create_local_disk("file", scratch_path, original_disk_size, "qcow2") scratch_params["attrs"]["file"] = scratch_path elif scratch_type == "block": if prepare_scratch_blkdev: scratch_path = libvirt.setup_or_cleanup_iscsi( is_setup=True, image_size=scratch_blkdev_size) scratch_params["attrs"]["dev"] = scratch_path else: test.fail( "We do not support backup scratch type: '%s'" % scratch_type) if scratch_luks_encrypted: encryption_dict = { "encryption": "luks", "secret": { "type": "passphrase", "uuid": luks_secret_uuid } } scratch_params["encryption"] = encryption_dict logging.debug("scratch params: %s", scratch_params) backup_disk_params["backup_scratch"] = scratch_params backup_disk_xml = utils_backup.create_backup_disk_xml( backup_disk_params) backup_disk_xmls.append(backup_disk_xml) logging.debug("disk list %s", backup_disk_xmls) backup_xml = utils_backup.create_backup_xml( backup_params, backup_disk_xmls) logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml) # Prepare checkpoint xml checkpoint_name = "checkpoint_%s" % backup_index checkpoint_list.append(checkpoint_name) cp_params = {"checkpoint_name": checkpoint_name} cp_params["checkpoint_desc"] = params.get( "checkpoint_desc", "desc of cp_%s" % backup_index) disk_param_list = [] for vm_disk in vm_disks: cp_disk_param = {"name": vm_disk} if vm_disk != original_disk_target: cp_disk_param["checkpoint"] = "no" else: cp_disk_param["checkpoint"] = "bitmap" cp_disk_bitmap = params.get("cp_disk_bitmap") if cp_disk_bitmap: cp_disk_param["bitmap"] = cp_disk_bitmap + str( backup_index) disk_param_list.append(cp_disk_param) checkpoint_xml = utils_backup.create_checkpoint_xml( cp_params, disk_param_list) logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index, checkpoint_xml) # Create some data in vdb dd_count = "1" dd_seek = str(backup_index * 10 + 10) dd_bs = "1M" session = vm.wait_for_login() utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs, dd_seek, dd_count) session.close() # Start backup backup_options = backup_xml.xml + " " + checkpoint_xml.xml if reuse_scratch_file: backup_options += " --reuse-external" backup_result = virsh.backup_begin(vm_name, backup_options, ignore_status=True, debug=True) if backup_result.exit_status: raise utils_backup.BackupBeginError( backup_result.stderr.strip()) # If required, do some error operations during backup job error_operation = params.get("error_operation") if error_operation: if "destroy_vm" in error_operation: vm.destroy(gracefully=False) if "kill_qemu" in error_operation: utils_misc.safe_kill(vm.get_pid(), signal.SIGKILL) if utils_misc.wait_for( lambda: utils_backup.is_backup_canceled(vm_name), timeout=5): raise utils_backup.BackupCanceledError() elif expect_backup_canceled: test.fail("Backup job should be canceled but not.") backup_file_path = os.path.join( tmp_dir, "backup_file_%s.qcow2" % str(backup_index)) backup_file_list.append(backup_file_path) nbd_params = { "nbd_protocol": nbd_protocol, "nbd_export": nbd_export_name } if nbd_protocol == "unix": nbd_params["nbd_socket"] = nbd_socket elif nbd_protocol == "tcp": nbd_params["nbd_hostname"] = nbd_hostname nbd_params["nbd_tcp_port"] = nbd_tcp_port if tls_enabled: nbd_params["tls_dir"] = pki_path nbd_params["tls_server_ip"] = tls_server_ip if not is_incremental: # Do full backup try: utils_backup.pull_full_backup_to_file( nbd_params, backup_file_path) except Exception as details: if tls_enabled and tls_error: raise utils_backup.BackupTLSError(details) else: test.fail("Fail to get full backup data: %s" % details) logging.debug("Full backup to: %s", backup_file_path) else: # Do incremental backup utils_backup.pull_incremental_backup_to_file( nbd_params, backup_file_path, nbd_bitmap_name, original_disk_size) # Check if scratch file encrypted if scratch_luks_encrypted and scratch_path: cmd = "qemu-img info -U %s" % scratch_path result = process.run(cmd, shell=True, verbose=True).stdout_text.strip() if (not re.search("format.*luks", result, re.IGNORECASE) or not re.search("encrypted.*yes", result, re.IGNORECASE)): test.fail("scratch file/dev is not encrypted by LUKS") virsh.domjobabort(vm_name, debug=True) for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) # Compare the backup data and original data original_data_file = os.path.join(tmp_dir, "original_data.qcow2") cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path, original_data_file) process.run(cmd, shell=True, verbose=True) for backup_file in backup_file_list: if not utils_backup.cmp_backup_data(original_data_file, backup_file): test.fail("Backup and original data are not identical for" "'%s' and '%s'" % (disk_path, backup_file)) else: logging.debug("'%s' contains correct backup data", backup_file) except utils_backup.BackupBeginError as detail: if backup_error: logging.debug("Backup failed as expected.") else: test.fail("Backup failed to start: %s" % detail) except utils_backup.BackupTLSError as detail: if tls_error: logging.debug("Failed to get backup data as expected.") else: test.fail("Failed to get tls backup data: %s" % detail) except utils_backup.BackupCanceledError as detail: if expect_backup_canceled: logging.debug("Backup canceled as expected.") if not vm.is_alive(): logging.debug("Check if vm can be started again when backup " "canceled.") vm.start() vm.wait_for_login().close() else: test.fail("Backup job canceled: %s" % detail) finally: # Remove checkpoints clean_checkpoint_metadata = not vm.is_alive() if "error_operation" in locals() and error_operation is not None: if "kill_qemu" in error_operation: clean_checkpoint_metadata = True utils_backup.clean_checkpoints( vm_name, clean_metadata=clean_checkpoint_metadata) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync() # Remove iscsi devices if original_disk_type == "iscsi" or scratch_type == "block": libvirt.setup_or_cleanup_iscsi(False) # Remove gluster devices if original_disk_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) # Recover qemu.conf if "qemu_config" in locals(): qemu_config.restore() # Remove tls object if "tls_obj" in locals(): del tls_obj # Remove libvirt secret if "luks_secret_uuid" in locals(): virsh.secret_undefine(luks_secret_uuid, ignore_status=True)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml") def config_ceph(): """ Write the configs to the file. """ src_host = disk_src_host.split() src_port = disk_src_port.split() conf_str = "mon_host = " hosts = [] for host, port in zip(src_host, src_port): hosts.append("%s:%s" % (host, port)) with open(disk_src_config, 'w') as f: f.write(conf_str + ','.join(hosts) + '\n') def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip(disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ( "--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool( pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err) def create_vol(vol_params): """ Create volume. :param p_name. Pool name. :param vol_params. Volume parameters dict. :return: True if create successfully. """ pvt = libvirt.PoolVolumeTest(test, params) if create_by_xml: pvt.pre_vol_by_xml(pool_name, **vol_params) else: pvt.pre_vol(vol_name, None, '2G', None, pool_name) def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) process.run(cmd, shell=True) if disk_src_name: cmd += " | grep file=rbd:%s:" % disk_src_name if auth_user and auth_key: cmd += ('id=%s:auth_supported=cephx' % auth_user) if disk_src_config: cmd += " | grep 'conf=%s'" % disk_src_config elif mon_host: hosts = '\:6789\;'.join(mon_host.split()) cmd += " | grep 'mon_host=%s'" % hosts if driver_iothread: cmd += " | grep iothread%s" % driver_iothread # Run the command process.run(cmd, shell=True) def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = [ "disk name='%s' snapshot='external' type='file'" % target_dev ] xml_dom_exp = [ "source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name ] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot or test_disk_readonly: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml") def check_blockcopy(target): """ Block copy operation test. """ blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd") if os.path.exists(blk_file): os.remove(blk_file) blk_mirror = ("mirror type='file' file='%s' " "format='raw' job='copy'" % blk_file) # Do blockcopy ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_result(ret, skip_if=unsupported_err) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count(blk_mirror): test.fail("Can't see block job in domain xml") # Abort ret = virsh.blockjob(vm_name, target, "--abort") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if dom_xml.count(blk_mirror): test.fail("Failed to abort block job") if os.path.exists(blk_file): os.remove(blk_file) # Sleep for a while after abort operation. time.sleep(5) # Do blockcopy again ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_exit_status(ret) # Wait for complete def wait_func(): ret = virsh.blockjob(vm_name, target, "--info") return ret.stderr.count("Block Copy: [100 %]") timeout = params.get("blockjob_timeout", 600) utils_misc.wait_for(wait_func, int(timeout)) # Pivot ret = virsh.blockjob(vm_name, target, "--pivot") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count("source file='%s'" % blk_file): test.fail("Failed to pivot block job") # Remove the disk file. if os.path.exists(blk_file): os.remove(blk_file) def check_in_vm(vm_obj, target, old_parts, read_only=False): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm_obj.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;" " touch /mnt/testfile; umount /mnt)".format(added_part)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info("Check disk operation in VM:\n, %s, %s", s, o) # Readonly fs, check the error messages. # The command may return True, read-only # messges can be found from the command output if read_only: if "Read-only file system" not in o: return False else: return True # Other errors if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def clean_up_volume_snapshots(): """ Get all snapshots for rbd_vol.img volume,unprotect and then clean up them. """ cmd = ("rbd -m {0} {1} info {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) if process.run(cmd, ignore_status=True, shell=True).exit_status: return # Get snapshot list. cmd = ("rbd -m {0} {1} snap" " list {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text snap_names = [] if snaps_out: for line in snaps_out.rsplit("\n"): if line.startswith("SNAPID") or line == "": continue snap_line = line.rsplit() if len(snap_line) == 4: snap_names.append(snap_line[1]) logging.debug("Find snapshots: %s", snap_names) # Unprotect snapshot first,otherwise it will fail to purge volume for snap_name in snap_names: cmd = ("rbd -m {0} {1} snap" " unprotect {2}@{3}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name)) process.run(cmd, ignore_status=True, shell=True) # Purge volume,and then delete volume. cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) process.run(cmd, ignore_status=True, shell=True) def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = { "s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file) } for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append( first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = results_stdout_52lts( secret_list_result).strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_config = params.get("disk_source_config") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") disk_src_pool = params.get("disk_source_pool") disk_format = params.get("disk_format", "raw") driver_iothread = params.get("driver_iothread") snap_name = params.get("disk_snap_name") attach_device = "yes" == params.get("attach_device", "no") attach_disk = "yes" == params.get("attach_disk", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") test_blockcopy = "yes" == params.get("test_blockcopy", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_vm_parts = "yes" == params.get("test_vm_parts", "no") additional_guest = "yes" == params.get("additional_guest", "no") create_snapshot = "yes" == params.get("create_snapshot", "no") convert_image = "yes" == params.get("convert_image", "no") create_volume = "yes" == params.get("create_volume", "no") create_by_xml = "yes" == params.get("create_by_xml", "no") client_key = params.get("client_key") client_name = params.get("client_name") auth_key = params.get("auth_key") auth_user = params.get("auth_user") auth_type = params.get("auth_type") auth_usage = params.get("secret_usage") pool_name = params.get("pool_name") pool_type = params.get("pool_type") vol_name = params.get("vol_name") cloned_vol_name = params.get("cloned_volume", "cloned_test_volume") create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume") vol_cap = params.get("vol_cap") vol_cap_unit = params.get("vol_cap_unit") start_vm = "yes" == params.get("start_vm", "no") test_disk_readonly = "yes" == params.get("test_disk_readonly", "no") test_disk_internal_snapshot = "yes" == params.get( "test_disk_internal_snapshot", "no") test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no") disk_snapshot_with_sanlock = "yes" == params.get( "disk_internal_with_sanlock", "no") auth_place_in_source = params.get("auth_place_in_source") # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) # After libvirt 3.9.0, auth element can be put into source part. if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0): test.cancel( "place auth in source is not supported in current libvirt version") # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) if additional_guest: guest_name = "%s_%s" % (vm_name, '1') timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout, ignore_status=False) additional_vm = vm.clone(guest_name) if start_vm: virsh.start(guest_name) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) key_opt = "" secret_uuid = None snapshot_path = None key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) front_end_img_file = os.path.join(data_dir.get_tmp_dir(), "%s_frontend_test.img" % vm_name) # Construct a unsupported error message list to skip these kind of tests unsupported_err = [] if driver_iothread: unsupported_err.append('IOThreads not supported') if test_snapshot: unsupported_err.append('live disk snapshot not supported') if test_disk_readonly: if not libvirt_version.version_compare(5, 0, 0): unsupported_err.append('Could not create file: Permission denied') unsupported_err.append('Permission denied') else: unsupported_err.append( 'unsupported configuration: external snapshot ' + 'for readonly disk vdb is not supported') if test_disk_internal_snapshot: unsupported_err.append( 'unsupported configuration: internal snapshot for disk ' + 'vdb unsupported for storage type raw') if test_blockcopy: unsupported_err.append('block copy is not supported') if attach_disk: unsupported_err.append('No such file or directory') if create_volume: unsupported_err.append("backing 'volume' disks isn't yet supported") unsupported_err.append('this function is not supported') try: # Clean up dirty secrets in test environments if there have. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Prepare test environment. qemu_config = LibvirtQemuConfig() if disk_snapshot_with_sanlock: # Install necessary package:sanlock,libvirt-lock-sanlock if not utils_package.package_install(["sanlock"]): test.error("fail to install sanlock") if not utils_package.package_install(["libvirt-lock-sanlock"]): test.error("fail to install libvirt-lock-sanlock") # Set virt_use_sanlock result = process.run("setsebool -P virt_use_sanlock 1", shell=True) if result.exit_status: test.error("Failed to set virt_use_sanlock value") # Update lock_manager in qemu.conf qemu_config.lock_manager = 'sanlock' # Update qemu-sanlock.conf. san_lock_config = LibvirtSanLockConfig() san_lock_config.user = '******' san_lock_config.group = 'sanlock' san_lock_config.host_id = 1 san_lock_config.auto_disk_leases = True process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True) san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock" san_lock_config.require_lease_for_disks = False # Start sanlock service and restart libvirtd to enforce changes. result = process.run("systemctl start wdmd", shell=True) if result.exit_status: test.error("Failed to start wdmd service") result = process.run("systemctl start sanlock", shell=True) if result.exit_status: test.error("Failed to start sanlock service") utils_libvirtd.Libvirtd().restart() # Prepare lockspace and lease file for sanlock in order. sanlock_cmd_dict = OrderedDict() sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS" sanlock_cmd_dict[ "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0" sanlock_cmd_dict[ "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS" sanlock_cmd_dict[ "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock" sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock" sanlock_cmd_dict[ "sanlock direct init -r TEST_LS:test-disk-resource-lock:" + "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock" sanlock_cmd_dict[ "chown sanlock:sanlock " + "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc" sanlock_cmd_dict[ "sanlock client add_lockspace -s TEST_LS:1:" + "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0" for sanlock_cmd in sanlock_cmd_dict.keys(): result = process.run(sanlock_cmd, shell=True) if result.exit_status: test.error(sanlock_cmd_dict[sanlock_cmd]) # Create one lease device and add it to VM. san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) lease_device = Lease() lease_device.lockspace = 'TEST_LS' lease_device.key = 'test-disk-resource-lock' lease_device.target = { 'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock' } san_lock_vmxml.add_device(lease_device) san_lock_vmxml.sync() # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb") # To be compatible with create_disk_xml function, # some parameters need to be updated. params.update({ "type_name": params.get("disk_type", "network"), "target_bus": params.get("disk_target_bus"), "target_dev": targetdev, "secret_uuid": secret_uuid, "source_protocol": params.get("disk_source_protocol"), "source_name": disk_src_name, "source_host_name": disk_src_host, "source_host_port": disk_src_port }) # Prepare disk image if convert_image: first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) elif create_volume: vol_params = { "name": vol_name, "capacity": int(vol_cap), "capacity_unit": vol_cap_unit, "format": disk_format } create_pool() create_vol(vol_params) check_vol(vol_params) else: # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" % (disk_format, img_file, img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_cmd = ( "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) # Create disk snapshot if needed. if create_snapshot: snap_cmd = ("rbd -m %s %s snap create %s@%s" % (mon_host, key_opt, disk_src_name, snap_name)) process.run(snap_cmd, ignore_status=False, shell=True) if test_json_pseudo_protocol: # Create one frontend image with the rbd backing file. json_str = ('json:{"file.driver":"rbd",' '"file.filename":"rbd:%s:mon_host=%s"}' % (disk_src_name, mon_host)) # pass different json string according to the auth config if auth_user and auth_key: json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key)) disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" % (json_str, front_end_img_file)) disk_path = front_end_img_file process.run(disk_cmd, ignore_status=False, shell=True) # If hot plug, start VM first, and then wait the OS boot. # Otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() if attach_device: if create_volume: params.update({"source_pool": pool_name}) params.update({"type_name": "volume"}) # No need auth options for volume if "auth_user" in params: params.pop("auth_user") if "auth_type" in params: params.pop("auth_type") if "secret_type" in params: params.pop("secret_type") if "secret_uuid" in params: params.pop("secret_uuid") if "secret_usage" in params: params.pop("secret_usage") # After 3.9.0,the auth element can be place in source part. if auth_place_in_source: params.update({"auth_in_source": auth_place_in_source}) xml_file = libvirt.create_disk_xml(params) if additional_guest: # Copy xml_file for additional guest VM. shutil.copyfile(xml_file, additional_xml_file) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) if additional_guest: # Make sure the additional VM is running if additional_vm.is_dead(): additional_vm.start() additional_vm.wait_for_login().close() ret = virsh.attach_device(guest_name, additional_xml_file, "", debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif attach_disk: opts = params.get("attach_option", "") ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_readonly: params.update({'readonly': "yes"}) xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_internal_snapshot: xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif disk_snapshot_with_sanlock: if vm.is_dead(): vm.start() snapshot_path = make_snapshot() if vm.is_alive(): vm.destroy() elif not create_volume: libvirt.set_vm_disk(vm, params) if test_blockcopy: logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): vm.start() # Wait for vm is running vm.wait_for_login(timeout=600).close() if additional_guest: if additional_vm.is_dead(): additional_vm.start() # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check partitions in vm if test_vm_parts: if not check_in_vm( vm, targetdev, old_parts, read_only=create_snapshot): test.fail("Failed to check vm partitions") if additional_guest: if not check_in_vm(additional_vm, targetdev, old_parts): test.fail("Failed to check vm partitions") # Save and restore operation if test_save_restore: check_save_restore() if test_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option) if test_blockcopy: check_blockcopy(targetdev) if test_disk_readonly: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, 'vdb') if test_disk_internal_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, targetdev) # Detach the device. if attach_device: xml_file = libvirt.create_disk_xml(params) ret = virsh.detach_device(vm_name, xml_file) libvirt.check_exit_status(ret) if additional_guest: ret = virsh.detach_device(guest_name, xml_file) libvirt.check_exit_status(ret) elif attach_disk: ret = virsh.detach_disk(vm_name, targetdev) libvirt.check_exit_status(ret) # Check disk in vm after detachment. if attach_device or attach_disk: session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) if len(new_parts) != len(old_parts): test.fail("Disk still exists in vm" " after detachment") session.close() except virt_vm.VMStartError as details: for msg in unsupported_err: if msg in str(details): test.cancel(str(details)) else: test.fail("VM failed to start." "Error: %s" % str(details)) finally: # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) if additional_guest: virsh.remove_domain(guest_name, "--remove-all-storage", ignore_stauts=True) # Remove the snapshot. if create_snapshot: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) elif create_volume: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name))) process.run(cmd, ignore_status=True, shell=True) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format( mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume))) process.run(cmd, ignore_status=True, shell=True) clean_up_volume_snapshots() else: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Delete tmp files. if os.path.exists(key_file): os.remove(key_file) if os.path.exists(img_file): os.remove(img_file) # Clean up volume, pool if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout): virsh.vol_delete(vol_name, pool_name) if pool_name and pool_name in virsh.pool_state_dict(): virsh.pool_destroy(pool_name, **virsh_dargs) virsh.pool_undefine(pool_name, **virsh_dargs) # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) logging.info("Restoring vm...") vmxml_backup.sync() if disk_snapshot_with_sanlock: # Restore virt_use_sanlock setting. process.run("setsebool -P virt_use_sanlock 0", shell=True) # Restore qemu config qemu_config.restore() utils_libvirtd.Libvirtd().restart() # Force shutdown sanlock service. process.run("sanlock client shutdown -f 1", shell=True) # Clean up lockspace folder process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True) if snapshot_path is not None: for snapshot in snapshot_path: if os.path.exists(snapshot): os.remove(snapshot)
# Destroy VM. if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vm_xml_file = os.path.join(test.tmpdir, "vm.xml") virsh.dumpxml(vm_name, extra="--inactive", to_file=vm_xml_file) # Get device path. device_source_path = "" if source_path: device_source_path = test.virtdir # Prepare test environment. qemu_config = LibvirtQemuConfig() if test_disks_format: qemu_config.allow_disk_format_probing = True utils_libvirtd.libvirtd_restart() # Create virtual device file. disks = [] try: for i in range(len(device_source_names)): if test_disk_type_dir: # If we testing disk type dir option, # it needn't to create disk image disks.append({"format": "dir", "source": device_source_names[i]}) else: path = "%s/%s.%s" % (device_source_path,
def run(test, params, env): """ Test virtiofs filesystem device: 1.Start guest with 1/2 virtiofs filesystem devices. 2.Start 2 guest with the same virtiofs filesystem device. 3.Coldplug/Coldunplug virtiofs filesystem device 4.Share data between guests and host. 5.Lifecycle for guest with virtiofs filesystem device. """ def generate_expected_process_option(expected_results): """ Generate expected virtiofsd process option """ if cache_mode != "auto": expected_results = "cache=%s" % cache_mode if xattr == "on": expected_results += ",xattr" elif xattr == "off": expected_results += ",no_xattr" if flock == "on": expected_results += ",flock" else: expected_results += ",no_flock" if lock_posix == "on": expected_results += ",posix_lock" else: expected_results += ",no_posix_lock" logging.debug(expected_results) return expected_results def shared_data(vm_names, fs_devs): """ Shared data between guests and host: 1.Mount dir in guest; 2.Write a file in guest; 3.Check the md5sum value are the same in guests and host; """ md5s = [] for vm in vms: session = vm.wait_for_login() for fs_dev in fs_devs: logging.debug(fs_dev) mount_dir = '/var/tmp/' + fs_dev.target['dir'] session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=False) session.cmd('mkdir -p %s' % mount_dir) logging.debug("mount virtiofs dir in guest") cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'], mount_dir) status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("mount virtiofs dir failed: %s" % output) if vm == vms[0]: filename_guest = mount_dir + '/' + vm.name cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename_guest status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: session.close() test.fail("Write data failed: %s" % output) md5_value = session.cmd_status_output( "md5sum %s" % filename_guest)[1].strip().split()[0] md5s.append(md5_value) logging.debug(md5_value) md5_value = process.run( "md5sum %s" % filename_guest).stdout_text.strip().split()[0] logging.debug(md5_value) md5s.append(md5_value) session.close() if len(set(md5s)) != len(fs_devs): test.fail("The md5sum value are not the same in guests and host") def launch_externally_virtiofs(source_dir, source_socket): """ Launch externally virtiofs :param source_dir: the dir shared on host :param source_socket: the socket file listened on """ process.run('chcon -t virtd_exec_t %s' % path, ignore_status=False, shell=True) cmd = "systemd-run %s --socket-path=%s -o source=%s" % ( path, source_socket, source_dir) try: process.run(cmd, ignore_status=False, shell=True) # Make sure the socket is created utils_misc.wait_for(lambda: os.path.isdir(source_socket), timeout=3) process.run("chown qemu:qemu %s" % source_socket, ignore_status=False) process.run('chcon -t svirt_image_t %s' % source_socket, ignore_status=False, shell=True) except Exception as err: cmd = "pkill virtiofsd" process.run(cmd, shell=True) test.fail("{}".format(err)) def prepare_stress_script(script_path, script_content): """ Refer to xfstest generic/531. Create stress test script to create a lot of unlinked files. :param source_path: The path of script :param content: The content of stress script """ logging.debug("stress script path: %s content: %s" % (script_path, script_content)) script_lines = script_content.split(';') try: with open(script_path, 'w') as fd: fd.write('\n'.join(script_lines)) os.chmod(script_path, 0o777) except Exception as e: test.error("Prepare the guest stress script failed %s" % e) def run_stress_script(session, script_path): """ Run stress script in the guest :param session: guest session :param script_path: The path of script in the guest """ # Set ULIMIT_NOFILE to increase the number of unlinked files session.cmd("ulimit -n 500000 && /usr/bin/python3 %s" % script_path, timeout=120) def umount_fs(vm): """ Unmount the filesystem in guest :param vm: filesystem in this vm that should be unmounted """ if vm.is_alive(): session = vm.wait_for_login() for fs_dev in fs_devs: mount_dir = '/var/tmp/' + fs_dev.target['dir'] session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True) session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True) session.close() def check_detached_xml(vm): """ Check whether there is xml about the filesystem device in the vm xml :param vm: the vm to be checked """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) filesystems = vmxml.devices.by_device_tag('filesystem') if filesystems: test.fail("There should be no filesystem devices in guest " "xml after hotunplug") def check_filesystem_in_guest(vm, fs_dev): """ Check whether there is virtiofs in vm :param vm: the vm to be checked :param fs_dev: the virtiofs device to be checked """ session = vm.wait_for_login() mount_dir = '/var/tmp/' + fs_dev.target['dir'] cmd = "mkdir %s; mount -t virtiofs %s %s" % ( mount_dir, fs_dev.target['dir'], mount_dir) status, output = session.cmd_status_output(cmd, timeout=300) session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True) if not status: test.fail( "Mount virtiofs should failed after hotunplug device. %s" % output) session.close() start_vm = params.get("start_vm", "no") vm_names = params.get("vms", "avocado-vt-vm1").split() cache_mode = params.get("cache_mode", "none") xattr = params.get("xattr", "on") lock_posix = params.get("lock_posix", "on") flock = params.get("flock", "on") xattr = params.get("xattr", "on") path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd") queue_size = int(params.get("queue_size", "512")) driver_type = params.get("driver_type", "virtiofs") guest_num = int(params.get("guest_num", "1")) fs_num = int(params.get("fs_num", "1")) vcpus_per_cell = int(params.get("vcpus_per_cell", 2)) dir_prefix = params.get("dir_prefix", "mount_tag") error_msg_start = params.get("error_msg_start", "") error_msg_save = params.get("error_msg_save", "") status_error = params.get("status_error", "no") == "yes" socket_file_checking = params.get("socket_file_checking", "no") == "yes" suspend_resume = params.get("suspend_resume", "no") == "yes" managedsave = params.get("managedsave", "no") == "yes" coldplug = params.get("coldplug", "no") == "yes" hotplug_unplug = params.get("hotplug_unplug", "no") == "yes" detach_device_alias = params.get("detach_device_alias", "no") == "yes" extra_hugepages = params.get_numeric("extra_hugepages") edit_start = params.get("edit_start", "no") == "yes" with_hugepages = params.get("with_hugepages", "yes") == "yes" with_numa = params.get("with_numa", "yes") == "yes" with_memfd = params.get("with_memfd", "no") == "yes" source_socket = params.get("source_socket", "/var/tmp/vm001.socket") launched_mode = params.get("launched_mode", "auto") destroy_start = params.get("destroy_start", "no") == "yes" bug_url = params.get("bug_url", "") script_content = params.get("stress_script", "") stdio_handler_file = "file" == params.get("stdio_handler") fs_devs = [] vms = [] vmxml_backups = [] expected_fails_msg = [] expected_results = "" host_hp_size = utils_memory.get_huge_page_size() backup_huge_pages_num = utils_memory.get_num_huge_pages() huge_pages_num = 0 if len(vm_names) != guest_num: test.cancel("This test needs exactly %d vms." % guest_num) if not libvirt_version.version_compare(7, 0, 0) and not with_numa: test.cancel("Not supported without NUMA before 7.0.0") if not libvirt_version.version_compare(7, 6, 0) and destroy_start: test.cancel("Bug %s is not fixed on current build" % bug_url) try: # Define filesystem device xml for index in range(fs_num): driver = {'type': driver_type, 'queue': queue_size} source_dir = os.path.join('/var/tmp/', str(dir_prefix) + str(index)) logging.debug(source_dir) not os.path.isdir(source_dir) and os.mkdir(source_dir) target_dir = dir_prefix + str(index) source = {'socket': source_socket} target = {'dir': target_dir} if launched_mode == "auto": binary_keys = [ 'path', 'cache_mode', 'xattr', 'lock_posix', 'flock' ] binary_values = [path, cache_mode, xattr, lock_posix, flock] binary_dict = dict(zip(binary_keys, binary_values)) source = {'dir': source_dir} accessmode = "passthrough" fsdev_keys = [ 'accessmode', 'driver', 'source', 'target', 'binary' ] fsdev_values = [ accessmode, driver, source, target, binary_dict ] else: fsdev_keys = ['driver', 'source', 'target'] fsdev_values = [driver, source, target] fsdev_dict = dict(zip(fsdev_keys, fsdev_values)) logging.debug(fsdev_dict) fs_dev = libvirt_device_utils.create_fs_xml( fsdev_dict, launched_mode) logging.debug(fs_dev) fs_devs.append(fs_dev) #Start guest with virtiofs filesystem device for index in range(guest_num): logging.debug("prepare vm %s", vm_names[index]) vm = env.get_vm(vm_names[index]) vms.append(vm) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) vmxml_backup = vmxml.copy() vmxml_backups.append(vmxml_backup) if vmxml.max_mem < 1024000: vmxml.max_mem = 1024000 if with_hugepages: huge_pages_num += vmxml.max_mem // host_hp_size + extra_hugepages utils_memory.set_num_huge_pages(huge_pages_num) vmxml.remove_all_device_by_type('filesystem') vmxml.sync() numa_no = None if with_numa: numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1 vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name, vmxml.vcpu, numa_number=numa_no) vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name, access_mode="shared", hpgs=with_hugepages, memfd=with_memfd) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index]) logging.debug(vmxml) if launched_mode == "externally": launch_externally_virtiofs(source_dir, source_socket) if coldplug: ret = virsh.attach_device(vm_names[index], fs_devs[0].xml, flagstr='--config', debug=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) else: if not hotplug_unplug: for fs in fs_devs: vmxml.add_device(fs) vmxml.sync() logging.debug(vmxml) libvirt_pcicontr.reset_pci_num(vm_names[index]) result = virsh.start(vm_names[index], debug=True) if hotplug_unplug: if stdio_handler_file: qemu_config = LibvirtQemuConfig() qemu_config.stdio_handler = "file" utils_libvirtd.Libvirtd().restart() for fs_dev in fs_devs: ret = virsh.attach_device(vm_names[index], fs_dev.xml, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error: return if status_error and not managedsave: expected_error = error_msg_start utils_test.libvirt.check_exit_status(result, expected_error) return else: utils_test.libvirt.check_exit_status(result, expect_error=False) expected_results = generate_expected_process_option( expected_results) if launched_mode == "auto": cmd = 'ps aux | grep virtiofsd | head -n 1' utils_test.libvirt.check_cmd_output(cmd, content=expected_results) if managedsave: expected_error = error_msg_save result = virsh.managedsave(vm_names[0], ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: shared_data(vm_names, fs_devs) if suspend_resume: virsh.suspend(vm_names[0], debug=True, ignore_status=False) time.sleep(30) virsh.resume(vm_names[0], debug=True, ignore_statue=False) elif destroy_start: session = vm.wait_for_login(timeout=120) # Prepare the guest test script script_path = os.path.join(fs_devs[0].source["dir"], "test.py") script_content %= (fs_devs[0].source["dir"], fs_devs[0].source["dir"]) prepare_stress_script(script_path, script_content) # Run guest stress script stress_script_thread = threading.Thread( target=run_stress_script, args=(session, script_path)) stress_script_thread.setDaemon(True) stress_script_thread.start() # Create a lot of unlink files time.sleep(60) virsh.destroy(vm_names[0], debug=True, ignore_status=False) ret = virsh.start(vm_names[0], debug=True) libvirt.check_exit_status(ret) elif edit_start: vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml( vm_names[0]) if vm.is_alive(): virsh.destroy(vm_names[0]) cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[ 0] cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug(virsh.dumpxml(vm_names[0])) if cmd_result.exit_status: test.error("virt-xml edit guest failed: %s" % cmd_result) result = virsh.start(vm_names[0], ignore_status=True, debug=True) if error_msg_start: expected_fails_msg.append(error_msg_start) utils_test.libvirt.check_result( result, expected_fails=expected_fails_msg) if not libvirt_version.version_compare(6, 10, 0): # Because of bug #1897105, it was fixed in libvirt-6.10.0, # before this version, need to recover the env manually. cmd = "pkill virtiofsd" process.run(cmd, shell=True) if not vm.is_alive(): # Restoring vm and check if vm can start successfully vmxml_virtio_backup.sync() virsh.start(vm_names[0], ignore_status=False, shell=True) elif socket_file_checking: result = virsh.domid(vm_names[0]) domid = result.stdout.strip() domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[ 0] if result.exit_status: test.fail("Get domid failed.") for fs_dev in fs_devs: alias = fs_dev.alias['name'] expected_pid = domain_dir + alias + '-fs.pid' expected_sock = alias + '-fs.sock' status1 = process.run('ls -l %s' % expected_pid, shell=True).exit_status status2 = process.run('ls -l %s' % expected_sock, shell=True).exit_status if not (status1 and status2): test.fail( "The socket and pid file is not as expected") elif hotplug_unplug: for vm in vms: umount_fs(vm) for fs_dev in fs_devs: if detach_device_alias: alias = fs_dev.alias['name'] cmd = 'lsof /var/log/libvirt/qemu/%s-%s-virtiofsd.log' % ( vm.name, alias) output = process.run(cmd).stdout_text.splitlines() for item in output[1:]: if stdio_handler_file: if item.split()[0] != "virtiofsd": test.fail( "When setting stdio_handler as file, the command" "to write log should be virtiofsd!" ) else: if item.split()[0] != "virtlogd": test.fail( "When setting stdio_handler as logd, the command" "to write log should be virtlogd!") ret = virsh.detach_device_alias( vm.name, alias, ignore_status=True, debug=True, wait_for_event=True) else: ret = virsh.detach_device(vm.name, fs_dev.xml, ignore_status=True, debug=True, wait_for_event=True) libvirt.check_exit_status(ret, status_error) check_filesystem_in_guest(vm, fs_dev) check_detached_xml(vm) finally: for vm in vms: if vm.is_alive(): umount_fs(vm) vm.destroy(gracefully=False) for vmxml_backup in vmxml_backups: vmxml_backup.sync() for index in range(fs_num): process.run('rm -rf %s' % '/var/tmp/' + str(dir_prefix) + str(index), ignore_status=False) process.run('rm -rf %s' % source_socket, ignore_status=False, shell=True) if launched_mode == "externally": process.run('restorecon %s' % path, ignore_status=False, shell=True) utils_memory.set_num_huge_pages(backup_huge_pages_num) if stdio_handler_file: qemu_config.restore() utils_libvirtd.Libvirtd().restart()
# Destroy VM. if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vm_xml_file = os.path.join(test.tmpdir, "vm.xml") virsh.dumpxml(vm_name, extra="--inactive", to_file=vm_xml_file) # Get device path. device_source_path = "" if source_path: device_source_path = test.virtdir # Prepare test environment. qemu_config = LibvirtQemuConfig() if test_disks_format: qemu_config.allow_disk_format_probing = True utils_libvirtd.libvirtd_restart() # Create virtual device file. disks = [] try: for i in range(len(device_source_names)): if test_disk_type_dir: # If we testing disk type dir option, # it needn't to create disk image disks.append({ "format": "dir", "source": device_source_names[i] })
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml") def config_ceph(): """ Write the configs to the file. """ src_host = disk_src_host.split() src_port = disk_src_port.split() conf_str = "mon_host = " hosts = [] for host, port in zip(src_host, src_port): hosts.append("%s:%s" % (host, port)) with open(disk_src_config, 'w') as f: f.write(conf_str + ','.join(hosts) + '\n') def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip( disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool(pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err) def create_vol(vol_params): """ Create volume. :param p_name. Pool name. :param vol_params. Volume parameters dict. :return: True if create successfully. """ pvt = libvirt.PoolVolumeTest(test, params) if create_by_xml: pvt.pre_vol_by_xml(pool_name, **vol_params) else: pvt.pre_vol(vol_name, None, '2G', None, pool_name) def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) process.run(cmd, shell=True) if disk_src_name: cmd += " | grep file=rbd:%s:" % disk_src_name if auth_user and auth_key: cmd += ('id=%s:auth_supported=cephx' % auth_user) if disk_src_config: cmd += " | grep 'conf=%s'" % disk_src_config elif mon_host: hosts = '\:6789\;'.join(mon_host.split()) cmd += " | grep 'mon_host=%s'" % hosts if driver_iothread: cmd += " | grep iothread%s" % driver_iothread # Run the command process.run(cmd, shell=True) def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev] xml_dom_exp = ["source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot or test_disk_readonly: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml") def check_blockcopy(target): """ Block copy operation test. """ blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd") if os.path.exists(blk_file): os.remove(blk_file) blk_mirror = ("mirror type='file' file='%s' " "format='raw' job='copy'" % blk_file) # Do blockcopy ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_result(ret, skip_if=unsupported_err) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count(blk_mirror): test.fail("Can't see block job in domain xml") # Abort ret = virsh.blockjob(vm_name, target, "--abort") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if dom_xml.count(blk_mirror): test.fail("Failed to abort block job") if os.path.exists(blk_file): os.remove(blk_file) # Sleep for a while after abort operation. time.sleep(5) # Do blockcopy again ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_exit_status(ret) # Wait for complete def wait_func(): ret = virsh.blockjob(vm_name, target, "--info") return ret.stderr.count("Block Copy: [100 %]") timeout = params.get("blockjob_timeout", 600) utils_misc.wait_for(wait_func, int(timeout)) # Pivot ret = virsh.blockjob(vm_name, target, "--pivot") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count("source file='%s'" % blk_file): test.fail("Failed to pivot block job") # Remove the disk file. if os.path.exists(blk_file): os.remove(blk_file) def check_in_vm(vm_obj, target, old_parts, read_only=False): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm_obj.wait_for_login() new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;" " touch /mnt/testfile; umount /mnt)" .format(added_part)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info("Check disk operation in VM:\n, %s, %s", s, o) # Readonly fs, check the error messages. # The command may return True, read-only # messges can be found from the command output if read_only: if "Read-only file system" not in o: return False else: return True # Other errors if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def clean_up_volume_snapshots(): """ Get all snapshots for rbd_vol.img volume,unprotect and then clean up them. """ cmd = ("rbd -m {0} {1} info {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) if process.run(cmd, ignore_status=True, shell=True).exit_status: return # Get snapshot list. cmd = ("rbd -m {0} {1} snap" " list {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text snap_names = [] if snaps_out: for line in snaps_out.rsplit("\n"): if line.startswith("SNAPID") or line == "": continue snap_line = line.rsplit() if len(snap_line) == 4: snap_names.append(snap_line[1]) logging.debug("Find snapshots: %s", snap_names) # Unprotect snapshot first,otherwise it will fail to purge volume for snap_name in snap_names: cmd = ("rbd -m {0} {1} snap" " unprotect {2}@{3}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name)) process.run(cmd, ignore_status=True, shell=True) # Purge volume,and then delete volume. cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) process.run(cmd, ignore_status=True, shell=True) def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)} for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = results_stdout_52lts(secret_list_result).strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_config = params.get("disk_source_config") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") disk_src_pool = params.get("disk_source_pool") disk_format = params.get("disk_format", "raw") driver_iothread = params.get("driver_iothread") snap_name = params.get("disk_snap_name") attach_device = "yes" == params.get("attach_device", "no") attach_disk = "yes" == params.get("attach_disk", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") test_blockcopy = "yes" == params.get("test_blockcopy", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_vm_parts = "yes" == params.get("test_vm_parts", "no") additional_guest = "yes" == params.get("additional_guest", "no") create_snapshot = "yes" == params.get("create_snapshot", "no") convert_image = "yes" == params.get("convert_image", "no") create_volume = "yes" == params.get("create_volume", "no") create_by_xml = "yes" == params.get("create_by_xml", "no") client_key = params.get("client_key") client_name = params.get("client_name") auth_key = params.get("auth_key") auth_user = params.get("auth_user") auth_type = params.get("auth_type") auth_usage = params.get("secret_usage") pool_name = params.get("pool_name") pool_type = params.get("pool_type") vol_name = params.get("vol_name") cloned_vol_name = params.get("cloned_volume", "cloned_test_volume") create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume") vol_cap = params.get("vol_cap") vol_cap_unit = params.get("vol_cap_unit") start_vm = "yes" == params.get("start_vm", "no") test_disk_readonly = "yes" == params.get("test_disk_readonly", "no") test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no") test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no") disk_snapshot_with_sanlock = "yes" == params.get("disk_internal_with_sanlock", "no") # Create /etc/ceph/ceph.conf file to suppress false warning error message. process.run("mkdir -p /etc/ceph", ignore_status=True, shell=True) cmd = ("echo 'mon_host = {0}' >/etc/ceph/ceph.conf" .format(mon_host)) process.run(cmd, ignore_status=True, shell=True) # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) if additional_guest: guest_name = "%s_%s" % (vm_name, '1') timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout, ignore_status=False) additional_vm = vm.clone(guest_name) if start_vm: virsh.start(guest_name) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) key_opt = "" secret_uuid = None snapshot_path = None key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) front_end_img_file = os.path.join(data_dir.get_tmp_dir(), "%s_frontend_test.img" % vm_name) # Construct a unsupported error message list to skip these kind of tests unsupported_err = [] if driver_iothread: unsupported_err.append('IOThreads not supported') if test_snapshot: unsupported_err.append('live disk snapshot not supported') if test_disk_readonly: if not libvirt_version.version_compare(5, 0, 0): unsupported_err.append('Could not create file: Permission denied') unsupported_err.append('Permission denied') else: unsupported_err.append('unsupported configuration: external snapshot ' + 'for readonly disk vdb is not supported') if test_disk_internal_snapshot: unsupported_err.append('unsupported configuration: internal snapshot for disk ' + 'vdb unsupported for storage type raw') if test_blockcopy: unsupported_err.append('block copy is not supported') if attach_disk: unsupported_err.append('No such file or directory') if create_volume: unsupported_err.append("backing 'volume' disks isn't yet supported") unsupported_err.append('this function is not supported') try: # Clean up dirty secrets in test environments if there have. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Prepare test environment. qemu_config = LibvirtQemuConfig() if disk_snapshot_with_sanlock: # Install necessary package:sanlock,libvirt-lock-sanlock if not utils_package.package_install(["sanlock"]): test.error("fail to install sanlock") if not utils_package.package_install(["libvirt-lock-sanlock"]): test.error("fail to install libvirt-lock-sanlock") # Set virt_use_sanlock result = process.run("setsebool -P virt_use_sanlock 1", shell=True) if result.exit_status: test.error("Failed to set virt_use_sanlock value") # Update lock_manager in qemu.conf qemu_config.lock_manager = 'sanlock' # Update qemu-sanlock.conf. san_lock_config = LibvirtSanLockConfig() san_lock_config.user = '******' san_lock_config.group = 'sanlock' san_lock_config.host_id = 1 san_lock_config.auto_disk_leases = True process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True) san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock" san_lock_config.require_lease_for_disks = False # Start sanlock service and restart libvirtd to enforce changes. result = process.run("systemctl start wdmd", shell=True) if result.exit_status: test.error("Failed to start wdmd service") result = process.run("systemctl start sanlock", shell=True) if result.exit_status: test.error("Failed to start sanlock service") utils_libvirtd.Libvirtd().restart() # Prepare lockspace and lease file for sanlock in order. sanlock_cmd_dict = OrderedDict() sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS" sanlock_cmd_dict["sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0" sanlock_cmd_dict["chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS" sanlock_cmd_dict["restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock" sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock" sanlock_cmd_dict["sanlock direct init -r TEST_LS:test-disk-resource-lock:" + "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock" sanlock_cmd_dict["chown sanlock:sanlock " + "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc" sanlock_cmd_dict["sanlock client add_lockspace -s TEST_LS:1:" + "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0" for sanlock_cmd in sanlock_cmd_dict.keys(): result = process.run(sanlock_cmd, shell=True) if result.exit_status: test.error(sanlock_cmd_dict[sanlock_cmd]) # Create one lease device and add it to VM. san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) lease_device = Lease() lease_device.lockspace = 'TEST_LS' lease_device.key = 'test-disk-resource-lock' lease_device.target = {'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'} san_lock_vmxml.add_device(lease_device) san_lock_vmxml.sync() # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb") # To be compatible with create_disk_xml function, # some parameters need to be updated. params.update({ "type_name": params.get("disk_type", "network"), "target_bus": params.get("disk_target_bus"), "target_dev": targetdev, "secret_uuid": secret_uuid, "source_protocol": params.get("disk_source_protocol"), "source_name": disk_src_name, "source_host_name": disk_src_host, "source_host_port": disk_src_port}) # Prepare disk image if convert_image: first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) elif create_volume: vol_params = {"name": vol_name, "capacity": int(vol_cap), "capacity_unit": vol_cap_unit, "format": disk_format} create_pool() create_vol(vol_params) check_vol(vol_params) else: # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" % (disk_format, img_file, img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) # Create disk snapshot if needed. if create_snapshot: snap_cmd = ("rbd -m %s %s snap create %s@%s" % (mon_host, key_opt, disk_src_name, snap_name)) process.run(snap_cmd, ignore_status=False, shell=True) if test_json_pseudo_protocol: # Create one frontend image with the rbd backing file. json_str = ('json:{"file.driver":"rbd",' '"file.filename":"rbd:%s:mon_host=%s"}' % (disk_src_name, mon_host)) # pass different json string according to the auth config if auth_user and auth_key: json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key)) disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" % (json_str, front_end_img_file)) disk_path = front_end_img_file process.run(disk_cmd, ignore_status=False, shell=True) # If hot plug, start VM first, and then wait the OS boot. # Otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() if attach_device: if create_volume: params.update({"source_pool": pool_name}) params.update({"type_name": "volume"}) # No need auth options for volume if "auth_user" in params: params.pop("auth_user") if "auth_type" in params: params.pop("auth_type") if "secret_type" in params: params.pop("secret_type") if "secret_uuid" in params: params.pop("secret_uuid") if "secret_usage" in params: params.pop("secret_usage") xml_file = libvirt.create_disk_xml(params) if additional_guest: # Copy xml_file for additional guest VM. shutil.copyfile(xml_file, additional_xml_file) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) if additional_guest: # Make sure the additional VM is running if additional_vm.is_dead(): additional_vm.start() additional_vm.wait_for_login().close() ret = virsh.attach_device(guest_name, additional_xml_file, "", debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif attach_disk: opts = params.get("attach_option", "") ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_readonly: params.update({'readonly': "yes"}) xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_internal_snapshot: xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif disk_snapshot_with_sanlock: if vm.is_dead(): vm.start() snapshot_path = make_snapshot() if vm.is_alive(): vm.destroy() elif not create_volume: libvirt.set_vm_disk(vm, params) if test_blockcopy: logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): vm.start() # Wait for vm is running vm.wait_for_login(timeout=600).close() if additional_guest: if additional_vm.is_dead(): additional_vm.start() # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check partitions in vm if test_vm_parts: if not check_in_vm(vm, targetdev, old_parts, read_only=create_snapshot): test.fail("Failed to check vm partitions") if additional_guest: if not check_in_vm(additional_vm, targetdev, old_parts): test.fail("Failed to check vm partitions") # Save and restore operation if test_save_restore: check_save_restore() if test_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option) if test_blockcopy: check_blockcopy(targetdev) if test_disk_readonly: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, 'vdb') if test_disk_internal_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, targetdev) # Detach the device. if attach_device: xml_file = libvirt.create_disk_xml(params) ret = virsh.detach_device(vm_name, xml_file) libvirt.check_exit_status(ret) if additional_guest: ret = virsh.detach_device(guest_name, xml_file) libvirt.check_exit_status(ret) elif attach_disk: ret = virsh.detach_disk(vm_name, targetdev) libvirt.check_exit_status(ret) # Check disk in vm after detachment. if attach_device or attach_disk: session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) if len(new_parts) != len(old_parts): test.fail("Disk still exists in vm" " after detachment") session.close() except virt_vm.VMStartError as details: for msg in unsupported_err: if msg in str(details): test.cancel(str(details)) else: test.fail("VM failed to start." "Error: %s" % str(details)) finally: # Remove /etc/ceph/ceph.conf file if exists. if os.path.exists('/etc/ceph/ceph.conf'): os.remove('/etc/ceph/ceph.conf') # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) if additional_guest: virsh.remove_domain(guest_name, "--remove-all-storage", ignore_stauts=True) # Remove the snapshot. if create_snapshot: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) elif create_volume: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name))) process.run(cmd, ignore_status=True, shell=True) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume))) process.run(cmd, ignore_status=True, shell=True) clean_up_volume_snapshots() else: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Delete tmp files. if os.path.exists(key_file): os.remove(key_file) if os.path.exists(img_file): os.remove(img_file) # Clean up volume, pool if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout): virsh.vol_delete(vol_name, pool_name) if pool_name and pool_name in virsh.pool_state_dict(): virsh.pool_destroy(pool_name, **virsh_dargs) virsh.pool_undefine(pool_name, **virsh_dargs) # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) logging.info("Restoring vm...") vmxml_backup.sync() if disk_snapshot_with_sanlock: # Restore virt_use_sanlock setting. process.run("setsebool -P virt_use_sanlock 0", shell=True) # Restore qemu config qemu_config.restore() utils_libvirtd.Libvirtd().restart() # Force shutdown sanlock service. process.run("sanlock client shutdown -f 1", shell=True) # Clean up lockspace folder process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True) if snapshot_path is not None: for snapshot in snapshot_path: if os.path.exists(snapshot): os.remove(snapshot)