def run(test, params, env): """ Test virsh blockcopy --xml option. 1.Prepare backend storage (file/block/iscsi/ceph/nbd) 2.Start VM 3.Prepare target xml 4.Execute virsh blockcopy --xml command 5.Check VM xml after operation accomplished 6.Clean up test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} ignore_check = False def check_blockcopy_xml(vm_name, source_image, ignore_check=False): """ Check blockcopy xml in VM. :param vm_name: VM name :param source_image: source image name. :param ignore_check: default is False. """ if ignore_check: return source_imge_list = [] blklist = virsh.domblklist(vm_name).stdout_text.splitlines() for line in blklist: if line.strip().startswith(('hd', 'vd', 'sd', 'xvd')): source_imge_list.append(line.split()[-1]) logging.debug('domblklist %s:\n%s', vm_name, source_imge_list) if not any(source_image in s for s in source_imge_list): test.fail("Cannot find expected source image: %s" % source_image) # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") blockcopy_option = params.get("blockcopy_option") # Backend storage auth info storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") use_auth_usage = "yes" == params.get("use_auth_usage") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") auth_sec_uuid = "" disk_auth_dict = {} size = "1" status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") # Initialize one NbdExport object nbd = None img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Additional disk images. disks_img = [] try: # Clean up dirty secrets in test environments if there are. utils_secret.clean_up_secrets() # Setup backend storage if backend_storage_type == "file": image_filename = params.get("image_filename", "raw.img") disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename) if blockcopy_option in ['reuse_external']: device_source = libvirt.create_local_disk( backend_storage_type, disk_path, storage_size, device_format) else: device_source = disk_path disks_img.append({ "format": device_format, "source": disk_path, "path": disk_path }) disk_src_dict = { 'attrs': { 'file': device_source, 'type_name': 'file' } } checkout_device_source = image_filename elif backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} checkout_device_source = device_source elif device_type == "network": chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = { "sec_usage": "iscsi", "sec_target": auth_sec_usage } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml disk_auth_dict = { "auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target } device_source = "iscsi://%s:%s/%s/%s" % ( iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = { "attrs": { "protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num) }, "hosts": [{ "name": iscsi_host, "port": iscsi_port }] } checkout_device_source = 'emulated-iscsi' elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") size = "0.15" key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm whether it needs delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = { "sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret" } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, ignore_status=False, debug=True) disk_auth_dict = { "auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid } else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % ( ceph_disk_name, ceph_mon_ip, key_file) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("pre clean up rbd disk if exists: %s", cmd_result) if blockcopy_option in ['reuse_external']: # Create an local image and make FS on it. libvirt.create_local_disk("file", img_file, storage_size, device_format) # Convert the image to remote storage disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) if ceph_client_name and ceph_client_key: disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) rbd_cmd = ( "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name, device_format, img_file, disk_path)) process.run(rbd_cmd, ignore_status=False, shell=True) disk_src_dict = { "attrs": { "protocol": "rbd", "name": ceph_disk_name }, "hosts": [{ "name": ceph_host_ip, "port": ceph_host_port }] } checkout_device_source = ceph_disk_name elif backend_storage_type == "nbd": # Get server hostname. hostname = socket.gethostname().strip() # Setup backend storage nbd_server_host = hostname nbd_server_port = params.get("nbd_server_port") image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img") # Create NbdExport object nbd = NbdExport(image_path, image_format=device_format, port=nbd_server_port) nbd.start_nbd_server() # Prepare disk source xml source_attrs_dict = {"protocol": "nbd"} disk_src_dict = {} disk_src_dict.update({"attrs": source_attrs_dict}) disk_src_dict.update({ "hosts": [{ "name": nbd_server_host, "port": nbd_server_port }] }) device_source = "nbd://%s:%s/%s" % (nbd_server_host, nbd_server_port, image_path) checkout_device_source = image_path if blockcopy_option in ['pivot']: ignore_check = True logging.debug("device source is: %s", device_source) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) auth_in_source = True if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) disk_source.auth = disk_xml.new_auth(**disk_auth_dict) disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml device_source_path = os.path.join(data_dir.get_tmp_dir(), "source.raw") tmp_device_source = libvirt.create_local_disk("file", path=device_source_path, size=size, disk_format="raw") s_attach = virsh.attach_disk(vm_name, tmp_device_source, device_target, "--config", debug=True) libvirt.check_exit_status(s_attach) try: vm.start() vm.wait_for_login().close() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s", str(xml_error)) except virt_vm.VMStartError as details: # VM cannot be started if status_error: logging.info("VM failed to start as expected: %s", str(details)) else: test.fail("VM should start but failed: %s" % str(details)) # Additional operations before set block threshold options = params.get("options", "--pivot --transient-job --verbose --wait") result = virsh.blockcopy(vm_name, device_target, "--xml %s" % disk_xml.xml, options=options, debug=True) libvirt.check_exit_status(result) check_source_image = None if blockcopy_option in ['pivot']: check_source_image = checkout_device_source else: check_source_image = tmp_device_source check_blockcopy_xml(vm_name, check_source_image, ignore_check) finally: # Delete snapshots. if virsh.domain_exists(vm_name): #To Delete snapshot, destroy vm first. if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) vmxml_backup.sync("--snapshots-metadata") if os.path.exists(img_file): libvirt.delete_local_disk("file", img_file) for img in disks_img: if os.path.exists(img["path"]): libvirt.delete_local_disk("file", img["path"]) # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result.stdout_text) if os.path.exists(key_file): os.remove(key_file) elif backend_storage_type == "nbd": if nbd: try: nbd.cleanup() except Exception as ndbEx: logging.error("Clean Up nbd failed: %s" % str(ndbEx)) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid)
def run(test, params, env): """ Test disk encryption option. 1.Prepare backend storage (blkdev/iscsi/gluster/ceph) 2.Use luks format to encrypt the backend storage 3.Prepare a disk xml indicating to the backend storage with valid/invalid luks password 4.Start VM with disk hot/cold plugged 5.Check some disk operations in VM 6.Check backend storage is still in luks format 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def encrypt_dev(device, params): """ Encrypt device with luks format :param device: Storage deivce to be encrypted. :param params: From the dict to get encryption password. """ password = params.get("luks_encrypt_passwd", "password") size = params.get("luks_size", "500M") cmd = ( "qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (password, device, size)) if process.system(cmd, shell=True): test.fail("Can't create a luks encrypted img by qemu-img") def check_dev_format(device, fmt="luks"): """ Check if device is in luks format :param device: Storage deivce to be checked. :param fmt: Expected disk format. :return: If device's format equals to fmt, return True, else return False. """ cmd_result = process.run("qemu-img" + ' -h', ignore_status=True, shell=True, verbose=False) if b'-U' in cmd_result.stdout: cmd = ("qemu-img info -U %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) else: cmd = ("qemu-img info %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.fail("device %s is not in %s format. err is: %s" % (device, fmt, cmd_result.stderr)) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test".format( added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") # Backend storage options. storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") # Luks encryption info, luks_encrypt_passwd is the password used to encrypt # luks image, and luks_secret_passwd is the password set to luks secret, you # can set a wrong password to luks_secret_passwd for negative tests luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Backend storage auth info use_auth_usage = "yes" == params.get("use_auth_usage") if use_auth_usage: use_auth_uuid = False else: use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") status_error = "yes" == params.get("status_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") encryption_in_source = "yes" == params.get("encryption_in_source", "no") auth_in_source = "yes" == params.get("auth_in_source", "no") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} pvt = None if ((encryption_in_source or auth_in_source) and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot put <encryption> or <auth> inside disk <source> " "in this libvirt version.") # Start VM and get all partions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Setup backend storage if backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": if enable_auth: chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = { "sec_usage": "iscsi", "sec_target": auth_sec_usage } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml if use_auth_uuid: disk_auth_dict = { "auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid } elif use_auth_usage: disk_auth_dict = { "auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target } else: iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, portal_ip=iscsi_host) device_source = "iscsi://%s:%s/%s/%s" % ( iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = { "attrs": { "protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num) }, "hosts": [{ "name": iscsi_host, "port": iscsi_port }] } elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = libvirt.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) device_source = "gluster://%s/%s/%s" % ( gluster_host_ip, gluster_vol_name, gluster_img_name) disk_src_dict = { "attrs": { "protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name) }, "hosts": [{ "name": gluster_host_ip, "port": "24007" }] } elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) if enable_auth: # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = { "sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret" } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = { "auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid } cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % ( ceph_disk_name, ceph_mon_ip, key_file) else: device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip) disk_src_dict = { "attrs": { "protocol": "rbd", "name": ceph_disk_name }, "hosts": [{ "name": ceph_host_ip, "port": ceph_host_port }] } elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = data_dir.get_tmp_dir() pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name disk_src_dict = { 'attrs': { 'file': device_source, 'type_name': 'file' } } else: test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.") logging.debug("device source is: %s", device_source) luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, debug=True) encrypt_dev(device_source, params) libvirt.check_exit_status(ret) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) if auth_in_source: disk_source.auth = disk_xml.new_auth(**disk_auth_dict) else: disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) disk_encryption_dict = { "encryption": "luks", "secret": { "type": "passphrase", "uuid": luks_sec_uuid } } disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) if encryption_in_source: disk_source.encryption = disk_encryption else: disk_xml.encryption = disk_encryption disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) vmxml.sync() try: vm.start() vm.wait_for_login() except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) if check_partitions and not status_error: if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") check_dev_format(device_source) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": libvirt.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(postfix_n, snapshot_take, is_check_snapshot_tree=False): """ Make external snapshots for disks only. :param postfix_n: postfix option :param snapshot_take: snapshots taken. """ # Add all disks into command line. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, snapshot_take): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) if is_check_snapshot_tree: options = options.replace("--no-metadata", "") cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: test.fail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def check_snapshot_tree(): """ Check whether predefined snapshot names are equals to snapshot names by virsh snapshot-list --tree """ predefined_snapshot_name_list = [] for count in range(1, snapshot_take): predefined_snapshot_name_list.append("%s_%s" % (postfix_n, count)) snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm_name result_output = process.run(snapshot_list_cmd, ignore_status=True, shell=True).stdout_text virsh_snapshot_name_list = [] for line in result_output.rsplit("\n"): strip_line = line.strip() if strip_line and "|" not in strip_line: virsh_snapshot_name_list.append(strip_line) # Compare two lists in their order and values, all need to be same. compare_list = [out_p for out_p, out_v in zip(predefined_snapshot_name_list, virsh_snapshot_name_list) if out_p not in out_v] if compare_list: test.fail("snapshot tree not correctly returned.") # If check_snapshot_tree is True, check snapshot tree output. if is_check_snapshot_tree: check_snapshot_tree() def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() first_disk_src = first_device['source'] return first_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") with_active_commit = "yes" == params.get("with_active_commit", "no") multiple_chain = "yes" == params.get("multiple_chain", "no") virsh_dargs = {'debug': True} check_snapshot_tree = "yes" == params.get("check_snapshot_tree", "no") # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", 'no') vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): test.cancel("live active block commit is not supported" " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] cmd_session = None # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = '' try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide rbd host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot postfix_n = 'snap' make_disk_snapshot(postfix_n, snapshot_take, check_snapshot_tree) basename = os.path.basename(blk_source) diskname = basename.split(".")[0] snap_src_lst = [blk_source] if multiple_chain: snap_name = "%s.%s1" % (diskname, postfix_n) snap_top = os.path.join(tmp_dir, snap_name) top_index = snapshot_external_disks.index(snap_top) + 1 omit_list = snapshot_external_disks[top_index:] vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break vmxml.del_device(disk_xml) disk_dict = {'attrs': {'file': snap_top}} disk_xml.source = disk_xml.new_disk_source(**disk_dict) vmxml.add_device(disk_xml) vmxml.sync() vm.start() session = vm.wait_for_login() postfix_n = 'new_snap' make_disk_snapshot(postfix_n, snapshot_take) snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks logging.debug("omit list is %s", omit_list) for i in omit_list: snap_src_lst.remove(i) else: # snapshot src file list snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(list(range(snapshot_take))): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: if disk.device != 'disk': continue disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: test.fail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introuduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" if with_timeout: blockcommit_options += " --timeout 1" if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: snap_name = "%s.%s1" % (diskname, postfix_n) blk_source = os.path.join(tmp_dir, snap_name) blockcommit_options += " --base %s" % blk_source if top_inactive: snap_name = "%s.%s2" % (diskname, postfix_n) top_image = os.path.join(tmp_dir, snap_name) blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if vm_state == "shut off": vm.destroy(gracefully=True) if with_active_commit: # inactive commit follow active commit will fail with bug 1135339 cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name, blk_target) cmd_session = aexpect.ShellSession(cmd) if backing_file_relative_path: blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" block_commit_index = snapshot_take expect_backing_file = False # Do block commit using --active for count in range(1, snapshot_take): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) if top_inactive: blockcommit_options = " --wait --verbose --top vda[1] --base vda[2] --keep-relative" block_commit_index = snapshot_take - 1 expect_backing_file = True # Do block commit with --wait if top_inactive for count in range(1, block_commit_index): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) check_chain_backing_files(blk_source_image, expect_backing_file) return # Run test case # Active commit does not support on rbd based disk with bug 1200726 result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if not multiple_chain: if disk_src_file != snap_src_lst[2]: test.fail(err_msg) else: if disk_src_file != snap_src_lst[3]: test.fail(err_msg) else: if disk_src_file != blk_source: test.fail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': test.fail("blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug("after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") cmd_result = virsh.blockjob(vm_name, blk_target, '', ignore_status=True, debug=True) libvirt.check_exit_status(cmd_result) elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off" and not multiple_chain: for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Clean ceph image if used in test if 'mon_host' in locals(): if utils_package.package_install(["ceph-common"]): disk_source_name = params.get("disk_source_name") cmd = ("rbd -m {0} info {1} && rbd -m {0} rm " "{1}".format(mon_host, disk_source_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) else: logging.debug('Failed to install ceph-common to clean ceph.') if cmd_session: cmd_session.close() for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml") def config_ceph(): """ Write the configs to the file. """ src_host = disk_src_host.split() src_port = disk_src_port.split() conf_str = "mon_host = " hosts = [] for host, port in zip(src_host, src_port): hosts.append("%s:%s" % (host, port)) with open(disk_src_config, 'w') as f: f.write(conf_str + ','.join(hosts) + '\n') def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip(disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ( "--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool( pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err) def create_vol(vol_params): """ Create volume. :param p_name. Pool name. :param vol_params. Volume parameters dict. :return: True if create successfully. """ pvt = libvirt.PoolVolumeTest(test, params) if create_by_xml: pvt.pre_vol_by_xml(pool_name, **vol_params) else: pvt.pre_vol(vol_name, None, '2G', None, pool_name) def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) process.run(cmd, shell=True) if disk_src_name: cmd += " | grep file=rbd:%s:" % disk_src_name if auth_user and auth_key: cmd += ('id=%s:auth_supported=cephx' % auth_user) if disk_src_config: cmd += " | grep 'conf=%s'" % disk_src_config elif mon_host: hosts = '\:6789\;'.join(mon_host.split()) cmd += " | grep 'mon_host=%s'" % hosts if driver_iothread: cmd += " | grep iothread%s" % driver_iothread # Run the command process.run(cmd, shell=True) def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = [ "disk name='%s' snapshot='external' type='file'" % target_dev ] xml_dom_exp = [ "source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name ] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot or test_disk_readonly: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml") def check_blockcopy(target): """ Block copy operation test. """ blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd") if os.path.exists(blk_file): os.remove(blk_file) blk_mirror = ("mirror type='file' file='%s' " "format='raw' job='copy'" % blk_file) # Do blockcopy ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_result(ret, skip_if=unsupported_err) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count(blk_mirror): test.fail("Can't see block job in domain xml") # Abort ret = virsh.blockjob(vm_name, target, "--abort") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if dom_xml.count(blk_mirror): test.fail("Failed to abort block job") if os.path.exists(blk_file): os.remove(blk_file) # Sleep for a while after abort operation. time.sleep(5) # Do blockcopy again ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_exit_status(ret) # Wait for complete def wait_func(): ret = virsh.blockjob(vm_name, target, "--info") return ret.stderr.count("Block Copy: [100 %]") timeout = params.get("blockjob_timeout", 600) utils_misc.wait_for(wait_func, int(timeout)) # Pivot ret = virsh.blockjob(vm_name, target, "--pivot") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count("source file='%s'" % blk_file): test.fail("Failed to pivot block job") # Remove the disk file. if os.path.exists(blk_file): os.remove(blk_file) def check_in_vm(vm_obj, target, old_parts, read_only=False): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm_obj.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;" " touch /mnt/testfile; umount /mnt)".format(added_part)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info("Check disk operation in VM:\n, %s, %s", s, o) # Readonly fs, check the error messages. # The command may return True, read-only # messges can be found from the command output if read_only: if "Read-only file system" not in o: return False else: return True # Other errors if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def clean_up_volume_snapshots(): """ Get all snapshots for rbd_vol.img volume,unprotect and then clean up them. """ cmd = ("rbd -m {0} {1} info {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) if process.run(cmd, ignore_status=True, shell=True).exit_status: return # Get snapshot list. cmd = ("rbd -m {0} {1} snap" " list {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text snap_names = [] if snaps_out: for line in snaps_out.rsplit("\n"): if line.startswith("SNAPID") or line == "": continue snap_line = line.rsplit() if len(snap_line) == 4: snap_names.append(snap_line[1]) logging.debug("Find snapshots: %s", snap_names) # Unprotect snapshot first,otherwise it will fail to purge volume for snap_name in snap_names: cmd = ("rbd -m {0} {1} snap" " unprotect {2}@{3}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name)) process.run(cmd, ignore_status=True, shell=True) # Purge volume,and then delete volume. cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) process.run(cmd, ignore_status=True, shell=True) def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = { "s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file) } for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append( first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = results_stdout_52lts( secret_list_result).strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_config = params.get("disk_source_config") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") disk_src_pool = params.get("disk_source_pool") disk_format = params.get("disk_format", "raw") driver_iothread = params.get("driver_iothread") snap_name = params.get("disk_snap_name") attach_device = "yes" == params.get("attach_device", "no") attach_disk = "yes" == params.get("attach_disk", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") test_blockcopy = "yes" == params.get("test_blockcopy", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_vm_parts = "yes" == params.get("test_vm_parts", "no") additional_guest = "yes" == params.get("additional_guest", "no") create_snapshot = "yes" == params.get("create_snapshot", "no") convert_image = "yes" == params.get("convert_image", "no") create_volume = "yes" == params.get("create_volume", "no") create_by_xml = "yes" == params.get("create_by_xml", "no") client_key = params.get("client_key") client_name = params.get("client_name") auth_key = params.get("auth_key") auth_user = params.get("auth_user") auth_type = params.get("auth_type") auth_usage = params.get("secret_usage") pool_name = params.get("pool_name") pool_type = params.get("pool_type") vol_name = params.get("vol_name") cloned_vol_name = params.get("cloned_volume", "cloned_test_volume") create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume") vol_cap = params.get("vol_cap") vol_cap_unit = params.get("vol_cap_unit") start_vm = "yes" == params.get("start_vm", "no") test_disk_readonly = "yes" == params.get("test_disk_readonly", "no") test_disk_internal_snapshot = "yes" == params.get( "test_disk_internal_snapshot", "no") test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no") disk_snapshot_with_sanlock = "yes" == params.get( "disk_internal_with_sanlock", "no") auth_place_in_source = params.get("auth_place_in_source") # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) # After libvirt 3.9.0, auth element can be put into source part. if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0): test.cancel( "place auth in source is not supported in current libvirt version") # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) if additional_guest: guest_name = "%s_%s" % (vm_name, '1') timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout, ignore_status=False) additional_vm = vm.clone(guest_name) if start_vm: virsh.start(guest_name) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) key_opt = "" secret_uuid = None snapshot_path = None key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) front_end_img_file = os.path.join(data_dir.get_tmp_dir(), "%s_frontend_test.img" % vm_name) # Construct a unsupported error message list to skip these kind of tests unsupported_err = [] if driver_iothread: unsupported_err.append('IOThreads not supported') if test_snapshot: unsupported_err.append('live disk snapshot not supported') if test_disk_readonly: if not libvirt_version.version_compare(5, 0, 0): unsupported_err.append('Could not create file: Permission denied') unsupported_err.append('Permission denied') else: unsupported_err.append( 'unsupported configuration: external snapshot ' + 'for readonly disk vdb is not supported') if test_disk_internal_snapshot: unsupported_err.append( 'unsupported configuration: internal snapshot for disk ' + 'vdb unsupported for storage type raw') if test_blockcopy: unsupported_err.append('block copy is not supported') if attach_disk: unsupported_err.append('No such file or directory') if create_volume: unsupported_err.append("backing 'volume' disks isn't yet supported") unsupported_err.append('this function is not supported') try: # Clean up dirty secrets in test environments if there have. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Prepare test environment. qemu_config = LibvirtQemuConfig() if disk_snapshot_with_sanlock: # Install necessary package:sanlock,libvirt-lock-sanlock if not utils_package.package_install(["sanlock"]): test.error("fail to install sanlock") if not utils_package.package_install(["libvirt-lock-sanlock"]): test.error("fail to install libvirt-lock-sanlock") # Set virt_use_sanlock result = process.run("setsebool -P virt_use_sanlock 1", shell=True) if result.exit_status: test.error("Failed to set virt_use_sanlock value") # Update lock_manager in qemu.conf qemu_config.lock_manager = 'sanlock' # Update qemu-sanlock.conf. san_lock_config = LibvirtSanLockConfig() san_lock_config.user = '******' san_lock_config.group = 'sanlock' san_lock_config.host_id = 1 san_lock_config.auto_disk_leases = True process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True) san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock" san_lock_config.require_lease_for_disks = False # Start sanlock service and restart libvirtd to enforce changes. result = process.run("systemctl start wdmd", shell=True) if result.exit_status: test.error("Failed to start wdmd service") result = process.run("systemctl start sanlock", shell=True) if result.exit_status: test.error("Failed to start sanlock service") utils_libvirtd.Libvirtd().restart() # Prepare lockspace and lease file for sanlock in order. sanlock_cmd_dict = OrderedDict() sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS" sanlock_cmd_dict[ "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0" sanlock_cmd_dict[ "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS" sanlock_cmd_dict[ "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock" sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock" sanlock_cmd_dict[ "sanlock direct init -r TEST_LS:test-disk-resource-lock:" + "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock" sanlock_cmd_dict[ "chown sanlock:sanlock " + "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc" sanlock_cmd_dict[ "sanlock client add_lockspace -s TEST_LS:1:" + "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0" for sanlock_cmd in sanlock_cmd_dict.keys(): result = process.run(sanlock_cmd, shell=True) if result.exit_status: test.error(sanlock_cmd_dict[sanlock_cmd]) # Create one lease device and add it to VM. san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) lease_device = Lease() lease_device.lockspace = 'TEST_LS' lease_device.key = 'test-disk-resource-lock' lease_device.target = { 'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock' } san_lock_vmxml.add_device(lease_device) san_lock_vmxml.sync() # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb") # To be compatible with create_disk_xml function, # some parameters need to be updated. params.update({ "type_name": params.get("disk_type", "network"), "target_bus": params.get("disk_target_bus"), "target_dev": targetdev, "secret_uuid": secret_uuid, "source_protocol": params.get("disk_source_protocol"), "source_name": disk_src_name, "source_host_name": disk_src_host, "source_host_port": disk_src_port }) # Prepare disk image if convert_image: first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) elif create_volume: vol_params = { "name": vol_name, "capacity": int(vol_cap), "capacity_unit": vol_cap_unit, "format": disk_format } create_pool() create_vol(vol_params) check_vol(vol_params) else: # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" % (disk_format, img_file, img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_cmd = ( "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) # Create disk snapshot if needed. if create_snapshot: snap_cmd = ("rbd -m %s %s snap create %s@%s" % (mon_host, key_opt, disk_src_name, snap_name)) process.run(snap_cmd, ignore_status=False, shell=True) if test_json_pseudo_protocol: # Create one frontend image with the rbd backing file. json_str = ('json:{"file.driver":"rbd",' '"file.filename":"rbd:%s:mon_host=%s"}' % (disk_src_name, mon_host)) # pass different json string according to the auth config if auth_user and auth_key: json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key)) disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" % (json_str, front_end_img_file)) disk_path = front_end_img_file process.run(disk_cmd, ignore_status=False, shell=True) # If hot plug, start VM first, and then wait the OS boot. # Otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() if attach_device: if create_volume: params.update({"source_pool": pool_name}) params.update({"type_name": "volume"}) # No need auth options for volume if "auth_user" in params: params.pop("auth_user") if "auth_type" in params: params.pop("auth_type") if "secret_type" in params: params.pop("secret_type") if "secret_uuid" in params: params.pop("secret_uuid") if "secret_usage" in params: params.pop("secret_usage") # After 3.9.0,the auth element can be place in source part. if auth_place_in_source: params.update({"auth_in_source": auth_place_in_source}) xml_file = libvirt.create_disk_xml(params) if additional_guest: # Copy xml_file for additional guest VM. shutil.copyfile(xml_file, additional_xml_file) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) if additional_guest: # Make sure the additional VM is running if additional_vm.is_dead(): additional_vm.start() additional_vm.wait_for_login().close() ret = virsh.attach_device(guest_name, additional_xml_file, "", debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif attach_disk: opts = params.get("attach_option", "") ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_readonly: params.update({'readonly': "yes"}) xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_internal_snapshot: xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif disk_snapshot_with_sanlock: if vm.is_dead(): vm.start() snapshot_path = make_snapshot() if vm.is_alive(): vm.destroy() elif not create_volume: libvirt.set_vm_disk(vm, params) if test_blockcopy: logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): vm.start() # Wait for vm is running vm.wait_for_login(timeout=600).close() if additional_guest: if additional_vm.is_dead(): additional_vm.start() # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check partitions in vm if test_vm_parts: if not check_in_vm( vm, targetdev, old_parts, read_only=create_snapshot): test.fail("Failed to check vm partitions") if additional_guest: if not check_in_vm(additional_vm, targetdev, old_parts): test.fail("Failed to check vm partitions") # Save and restore operation if test_save_restore: check_save_restore() if test_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option) if test_blockcopy: check_blockcopy(targetdev) if test_disk_readonly: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, 'vdb') if test_disk_internal_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, targetdev) # Detach the device. if attach_device: xml_file = libvirt.create_disk_xml(params) ret = virsh.detach_device(vm_name, xml_file) libvirt.check_exit_status(ret) if additional_guest: ret = virsh.detach_device(guest_name, xml_file) libvirt.check_exit_status(ret) elif attach_disk: ret = virsh.detach_disk(vm_name, targetdev) libvirt.check_exit_status(ret) # Check disk in vm after detachment. if attach_device or attach_disk: session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) if len(new_parts) != len(old_parts): test.fail("Disk still exists in vm" " after detachment") session.close() except virt_vm.VMStartError as details: for msg in unsupported_err: if msg in str(details): test.cancel(str(details)) else: test.fail("VM failed to start." "Error: %s" % str(details)) finally: # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) if additional_guest: virsh.remove_domain(guest_name, "--remove-all-storage", ignore_stauts=True) # Remove the snapshot. if create_snapshot: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) elif create_volume: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name))) process.run(cmd, ignore_status=True, shell=True) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format( mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume))) process.run(cmd, ignore_status=True, shell=True) clean_up_volume_snapshots() else: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Delete tmp files. if os.path.exists(key_file): os.remove(key_file) if os.path.exists(img_file): os.remove(img_file) # Clean up volume, pool if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout): virsh.vol_delete(vol_name, pool_name) if pool_name and pool_name in virsh.pool_state_dict(): virsh.pool_destroy(pool_name, **virsh_dargs) virsh.pool_undefine(pool_name, **virsh_dargs) # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) logging.info("Restoring vm...") vmxml_backup.sync() if disk_snapshot_with_sanlock: # Restore virt_use_sanlock setting. process.run("setsebool -P virt_use_sanlock 0", shell=True) # Restore qemu config qemu_config.restore() utils_libvirtd.Libvirtd().restart() # Force shutdown sanlock service. process.run("sanlock client shutdown -f 1", shell=True) # Clean up lockspace folder process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True) if snapshot_path is not None: for snapshot in snapshot_path: if os.path.exists(snapshot): os.remove(snapshot)
def run(test, params, env): """ Test virsh domblkthreshold option. 1.Prepare backend storage (file/luks/iscsi/gluster/ceph/nbd) 2.Start VM 3.Set domblkthreshold on target device in VM 4.Trigger one threshold event 5.Check threshold event is received as expected 6.Clean up test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} block_threshold_timeout = params.get("block_threshold_timeout", "120") event_type = params.get("event_type", "block-threshold") block_threshold_option = params.get("block_threshold_option", "--loop") def set_vm_block_domblkthreshold(vm_name, target_device, threshold, **dargs): """ Set VM block threshold on specific target device. :param vm_name: VM name. :param target_device: target device in VM :param threshold: threshold value with specific unit such as 100M :param dargs: mutable parameter dict """ ret = virsh.domblkthreshold(vm_name, target_device, threshold, **dargs) libvirt.check_exit_status(ret) def trigger_block_threshold_event(vm_domain, target): """ Trigger block threshold event. :param vm_domain: VM name :param target: Disk dev in VM. """ try: session = vm_domain.wait_for_login() time.sleep(10) cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " " mount /dev/{0} /mnt && " " dd if=/dev/urandom of=/mnt/bigfile bs=1M count=101" .format(target)) status, output = session.cmd_status_output(cmd) if status: test.error("Failed to mount and fill data in VM: %s" % output) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) raise def check_threshold_event(vm_name, event_type, event_timeout, options, **dargs): """ Check threshold event. :param vm_name: VM name :param event_type: event type. :param event_timeout: event timeout value :param options: event option :dargs: dynamic parameters. """ ret = virsh.event(vm_name, event_type, event_timeout, options, **dargs) logging.debug(ret.stdout_text) libvirt.check_exit_status(ret) def create_vol(p_name, vol_params): """ Create volume. :param p_name: Pool name. :param vol_params: Volume parameters dict. """ # Clean up dirty volumes if pool has. pv = libvirt_storage.PoolVolume(p_name) vol_name_list = pv.list_volumes() for vol_name in vol_name_list: pv.delete_volume(vol_name) volxml = vol_xml.VolXML() v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) def trigger_block_commit(vm_name, target, blockcommit_options, **virsh_dargs): """ Trigger blockcommit. :param vm_name: VM name :param target: Disk dev in VM. :param blockcommit_options: blockcommit option :param virsh_dargs: additional parameters """ result = virsh.blockcommit(vm_name, target, blockcommit_options, ignore_status=False, **virsh_dargs) def trigger_block_copy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs): """ Trigger blockcopy :param vm_name: string, VM name :param target: string, target disk :param dest_path: string, the path of copied disk :param blockcopy_options: string, some options applied :param virsh_dargs: additional options """ result = virsh.blockcopy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs) libvirt.check_exit_status(result) def trigger_mirror_threshold_event(vm_domain, target): """ Trigger mirror mode block threshold event. :param vm_domain: VM name :param target: Disk target in VM. """ try: session = vm_domain.wait_for_login() # Sleep 10 seconds to let wait for events thread start first in main thread time.sleep(10) cmd = ("dd if=/dev/urandom of=file bs=1G count=3") status, output = session.cmd_status_output(cmd) if status: test.error("Failed to fill data in VM target: %s with %s" % (target, output)) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) raise except Exception as ex: raise def get_mirror_source_index(vm_name, dev_index=0): """ Get mirror source index :param vm_name: VM name :param dev_index: Disk device index. :return mirror source index in integer """ disk_list = vm_xml.VMXML.get_disk_source(vm_name) disk_mirror = disk_list[dev_index].find('mirror') if disk_mirror is None: test.fail("Failed to get disk mirror") disk_mirror_source = disk_mirror.find('source') return int(disk_mirror_source.get('index')) # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") # Backend storage auth info storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") use_auth_usage = "yes" == params.get("use_auth_usage") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") mirror_mode_blockcommit = "yes" == params.get("mirror_mode_blockcommit", "no") mirror_mode_blockcopy = "yes" == params.get("mirror_mode_blockcopy", "no") default_snapshot_test = "yes" == params.get("default_snapshot_test", "no") block_threshold_value = params.get("block_threshold_value", "100M") snapshot_external_disks = [] tmp_dir = data_dir.get_tmp_dir() dest_path = params.get("dest_path", "/var/lib/libvirt/images/newclone") pvt = None # Initialize one NbdExport object nbd = None img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) if ((backend_storage_type == "luks") and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot support <encryption> inside disk in this libvirt version.") # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Additional disk images. disks_img = [] try: # Clean up dirty secrets in test environments if there are. utils_secret.clean_up_secrets() # Setup backend storage if backend_storage_type == "file": image_filename = params.get("image_filename", "raw.img") disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename) device_source = libvirt.create_local_disk(backend_storage_type, disk_path, storage_size, device_format) disks_img.append({"format": device_format, "source": disk_path, "path": disk_path}) disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} # Setup backend storage elif backend_storage_type == "luks": luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Create secret luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, ignore_status=False, debug=True) image_filename = params.get("image_filename", "raw.img") device_source = os.path.join(data_dir.get_tmp_dir(), image_filename) disks_img.append({"format": device_format, "source": device_source, "path": device_source}) disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} cmd = ("qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (luks_encrypt_passwd, device_source, storage_size)) if process.system(cmd, shell=True): test.error("Can't create a luks encrypted img by qemu-img") elif backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, ignore_status=False, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target} device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = {"attrs": {"protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num)}, "hosts": [{"name": iscsi_host, "port": iscsi_port}]} elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) device_source = "gluster://%s/%s/%s" % (gluster_host_ip, gluster_vol_name, gluster_img_name) cmd = ("qemu-img create -f %s " "%s %s" % (device_format, device_source, storage_size)) if process.system(cmd, shell=True): test.error("Can't create a gluster type img by qemu-img") disk_src_dict = {"attrs": {"protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name)}, "hosts": [{"name": gluster_host_ip, "port": "24007"}]} elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = {"sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret"} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = {"auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name, ceph_mon_ip, key_file) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("pre clean up rbd disk if exists: %s", cmd_result) # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s %s" % (device_format, img_file, storage_size)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) if ceph_client_name and ceph_client_key: disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) rbd_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name, device_format, img_file, disk_path)) process.run(rbd_cmd, ignore_status=False, shell=True) disk_src_dict = {"attrs": {"protocol": "rbd", "name": ceph_disk_name}, "hosts": [{"name": ceph_host_ip, "port": ceph_host_port}]} elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = data_dir.get_tmp_dir() pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) # Set virt_use_nfs virt_use_nfs = params.get("virt_use_nfs", "off") result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.error("Failed to set virt_use_nfs value") nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name # Create one image on nfs server libvirt.create_local_disk("file", device_source, '1', "raw") disks_img.append({"format": device_format, "source": device_source, "path": device_source}) disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} # Create dir based pool,and then create one volume on it. elif backend_storage_type == "dir": pool_name = params.get("pool_name", "dir_pool") pool_target = params.get("pool_target") pool_type = params.get("pool_type") emulated_image = params.get("emulated_image") image_name = params.get("dir_image_name", "luks_1.img") # Create and start dir_based pool. pvt = libvirt.PoolVolumeTest(test, params) if not os.path.exists(pool_target): os.mkdir(pool_target) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) sp = libvirt_storage.StoragePool() if not sp.is_pool_active(pool_name): sp.set_pool_autostart(pool_name) sp.start_pool(pool_name) # Create one volume on the pool. volume_name = params.get("vol_name") volume_alloc = params.get("vol_alloc") volume_cap_unit = params.get("vol_cap_unit") volume_cap = params.get("vol_cap") volume_target_path = params.get("sec_volume") volume_target_format = params.get("target_format") volume_target_encypt = params.get("target_encypt", "") volume_target_label = params.get("target_label") vol_params = {"name": volume_name, "capacity": int(volume_cap), "allocation": int(volume_alloc), "format": volume_target_format, "path": volume_target_path, "label": volume_target_label, "capacity_unit": volume_cap_unit} try: # If Libvirt version is lower than 2.5.0 # Creating luks encryption volume is not supported,so skip it. create_vol(pool_name, vol_params) except AssertionError as info: err_msgs = ("create: invalid option") if str(info).count(err_msgs): test.cancel("Creating luks encryption volume " "is not supported on this libvirt version") else: test.error("Failed to create volume." "Error: %s" % str(info)) disk_src_dict = {'attrs': {'file': volume_target_path}} device_source = volume_target_path elif backend_storage_type == "nbd": # Get server hostname. hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip() # Setup backend storage nbd_server_host = hostname nbd_server_port = params.get("nbd_server_port") image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img") # Create NbdExport object nbd = NbdExport(image_path, image_format=device_format, port=nbd_server_port) nbd.start_nbd_server() # Prepare disk source xml source_attrs_dict = {"protocol": "nbd"} disk_src_dict = {} disk_src_dict.update({"attrs": source_attrs_dict}) disk_src_dict.update({"hosts": [{"name": nbd_server_host, "port": nbd_server_port}]}) device_source = "nbd://%s:%s/%s" % (nbd_server_host, nbd_server_port, image_path) logging.debug("device source is: %s", device_source) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) if disk_encryption_dict: disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) disk_xml.encryption = disk_encryption disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml except mirror_mode_blockcommit or mirror_mode_blockcopy if (not mirror_mode_blockcommit and not mirror_mode_blockcopy): vmxml.add_device(disk_xml) try: vmxml.sync() vm.start() vm.wait_for_login().close() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s", str(xml_error)) except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error: logging.info("VM failed to start as expected: %s", str(details)) else: test.fail("VM should start but failed: %s" % str(details)) func_name = trigger_block_threshold_event # Additional operations before set block threshold if backend_storage_type == "file": logging.info("Create snapshot...") snap_opt = " %s --disk-only " snap_opt += "%s,snapshot=external,file=%s" if default_snapshot_test: for index in range(1, 5): snapshot_name = "snapshot_%s" % index snap_path = "%s/%s_%s.snap" % (tmp_dir, vm_name, index) snapshot_external_disks.append(snap_path) snap_option = snap_opt % (snapshot_name, device_target, snap_path) virsh.snapshot_create_as(vm_name, snap_option, ignore_status=False, debug=True) if mirror_mode_blockcommit: if not libvirt_version.version_compare(6, 6, 0): test.cancel("Set threshold for disk mirroring feature is not supported on current version") vmxml.del_device(disk_xml) virsh.snapshot_create_as(vm_name, "--disk-only --no-metadata", ignore_status=False, debug=True) # Do active blockcommit in background. blockcommit_options = "--active" mirror_blockcommit_thread = threading.Thread(target=trigger_block_commit, args=(vm_name, 'vda', blockcommit_options,), kwargs={'debug': True}) mirror_blockcommit_thread.start() device_target = "vda[1]" func_name = trigger_mirror_threshold_event if mirror_mode_blockcopy: if not libvirt_version.version_compare(6, 6, 0): test.cancel("Set threshold for disk mirroring feature is not supported on current version") # Do transient blockcopy in backgroud. blockcopy_options = "--transient-job " # Do cleanup if os.path.exists(dest_path): libvirt.delete_local_disk("file", dest_path) mirror_blockcopy_thread = threading.Thread(target=trigger_block_copy, args=(vm_name, 'vda', dest_path, blockcopy_options,), kwargs={'debug': True}) mirror_blockcopy_thread.start() mirror_blockcopy_thread.join(10) device_target = "vda[%d]" % get_mirror_source_index(vm_name) func_name = trigger_mirror_threshold_event set_vm_block_domblkthreshold(vm_name, device_target, block_threshold_value, **{"debug": True}) cli_thread = threading.Thread(target=func_name, args=(vm, device_target)) cli_thread.start() check_threshold_event(vm_name, event_type, block_threshold_timeout, block_threshold_option, **{"debug": True}) finally: # Delete snapshots. if virsh.domain_exists(vm_name): #To delete snapshot, destroy VM first. if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) vmxml_backup.sync("--snapshots-metadata") if os.path.exists(img_file): libvirt.delete_local_disk("file", img_file) for img in disks_img: if os.path.exists(img["path"]): libvirt.delete_local_disk("file", img["path"]) for disk in snapshot_external_disks: libvirt.delete_local_disk('file', disk) if os.path.exists(dest_path): libvirt.delete_local_disk("file", dest_path) # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) elif backend_storage_type == "nfs": result = process.run("setsebool virt_use_nfs off", shell=True) if result.exit_status: logging.info("Failed to restore virt_use_nfs value") elif backend_storage_type == "nbd": if nbd: try: nbd.cleanup() except Exception as ndbEx: logging.info("Clean Up nbd failed: %s" % str(ndbEx)) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env): """ Test push-mode incremental backup Steps: 1. create a vm with extra disk vdb 2. create some data on vdb in vm 3. start a push mode full backup on vdb 4. create some data on vdb in vm 5. start a push mode incremental backup 6. repeat step 4 and 5 as required 7. check the full/incremental backup file data """ def backup_job_done(vm_name, vm_disk): """ Check if a backup job for a vm's specific disk is finished. :param vm_name: vm's name :param vm_disk: the disk to be checked, such as 'vdb' :return: 'True' means job finished """ result = virsh.blockjob(vm_name, vm_disk, debug=True) if "no current block job" in result.stdout_text.strip().lower(): return True # Cancel the test if libvirt version is too low if not libvirt_version.version_compare(6, 0, 0): test.cancel("Current libvirt version doesn't support " "incremental backup.") hotplug_disk = "yes" == params.get("hotplug_disk", "no") original_disk_size = params.get("original_disk_size", "100M") original_disk_type = params.get("original_disk_type", "local") original_disk_target = params.get("original_disk_target", "vdb") target_driver = params.get("target_driver", "qcow2") target_type = params.get("target_type", "file") target_blkdev_path = params.get("target_blkdev_path") target_blkdev_size = params.get("target_blkdev_size", original_disk_size) reuse_target_file = "yes" == params.get("reuse_target_file") prepare_target_file = "yes" == params.get("prepare_target_file") prepare_target_blkdev = "yes" == params.get("prepare_target_blkdev") backup_rounds = int(params.get("backup_rounds", 3)) backup_error = "yes" == params.get("backup_error") tmp_dir = data_dir.get_data_dir() virsh_dargs = {'debug': True, 'ignore_status': True} try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Make sure there is no checkpoint metadata before test utils_backup.clean_checkpoints(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() utils_backup.enable_inc_backup_for_vm(vm) # Prepare the disk to be backuped. disk_params = {} disk_path = "" if original_disk_type == "local": image_name = "{}_image.qcow2".format(original_disk_target) disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, original_disk_size, "qcow2") disk_params = { "device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": original_disk_target, "source_file": disk_path } if original_disk_target: disk_params["target_dev"] = original_disk_target elif original_disk_type == "ceph": ceph_mon_host = params.get("ceph_mon_host", "EXAMPLE_MON_HOST_AUTHX") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORT") ceph_pool_name = params.get("ceph_pool_name", "EXAMPLE_POOL") ceph_file_name = params.get("ceph_file_name", "EXAMPLE_FILE") ceph_disk_name = ceph_pool_name + "/" + ceph_file_name ceph_client_name = params.get("ceph_client_name", "EXAMPLE_CLIENT_NAME") ceph_client_key = params.get("ceph_client_key", "EXAMPLE_CLIENT_KEY") ceph_auth_user = params.get("ceph_auth_user", "EXAMPLE_AUTH_USER") ceph_auth_key = params.get("ceph_auth_key", "EXAMPLE_AUTH_KEY") auth_sec_usage_type = "ceph" enable_auth = "yes" == params.get("enable_auth", "yes") key_file = os.path.join(tmp_dir, "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_host) if enable_auth: # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = { "sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret" } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_params_auth = { "auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid, "auth_in_source": True } else: test.error("No ceph client name/key provided.") disk_path = "rbd:%s:mon_host=%s:keyring=%s" % ( ceph_disk_name, ceph_mon_host, key_file) ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name, ceph_cfg, key_file) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'rbd', 'source_name': ceph_disk_name, 'source_host_name': ceph_mon_host, 'source_host_port': ceph_host_port } disk_params.update(disk_params_src) disk_params.update(disk_params_auth) else: test.error("The disk type '%s' not supported in this script." % original_disk_type) if hotplug_disk: vm.start() session = vm.wait_for_login().close() disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm_name, disk_xml, debug=True) else: disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True) vm.start() session = vm.wait_for_login() new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys()) session.close() if len(new_disks_in_vm) != 1: test.fail("Test disk not prepared in vm") # Use the newly added disk as test disk test_disk_in_vm = "/dev/" + new_disks_in_vm[0] vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_disks = list(vmxml.get_disk_all().keys()) checkpoint_list = [] is_incremental = False backup_path_list = [] for backup_index in range(backup_rounds): # Prepare backup xml backup_params = {"backup_mode": "push"} if backup_index > 0: is_incremental = True backup_params["backup_incremental"] = "checkpoint_" + str( backup_index - 1) backup_disk_xmls = [] for vm_disk in vm_disks: backup_disk_params = {"disk_name": vm_disk} if vm_disk != original_disk_target: backup_disk_params["enable_backup"] = "no" else: backup_disk_params["enable_backup"] = "yes" backup_disk_params["disk_type"] = target_type target_params = {"attrs": {}} if target_type == "file": target_file_name = "target_file_%s" % backup_index target_file_path = os.path.join( tmp_dir, target_file_name) if prepare_target_file: libvirt.create_local_disk("file", target_file_path, original_disk_size, target_driver) target_params["attrs"]["file"] = target_file_path backup_path_list.append(target_file_path) elif target_type == "block": if prepare_target_blkdev: target_blkdev_path = libvirt.setup_or_cleanup_iscsi( is_setup=True, image_size=target_blkdev_size) target_params["attrs"]["dev"] = target_blkdev_path backup_path_list.append(target_blkdev_path) else: test.fail( "We do not support backup target type: '%s'" % target_type) logging.debug("target params: %s", target_params) backup_disk_params["backup_target"] = target_params driver_params = {"type": target_driver} backup_disk_params["backup_driver"] = driver_params backup_disk_xml = utils_backup.create_backup_disk_xml( backup_disk_params) backup_disk_xmls.append(backup_disk_xml) logging.debug("disk list %s", backup_disk_xmls) backup_xml = utils_backup.create_backup_xml( backup_params, backup_disk_xmls) logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml) # Prepare checkpoint xml checkpoint_name = "checkpoint_%s" % backup_index checkpoint_list.append(checkpoint_name) cp_params = {"checkpoint_name": checkpoint_name} cp_params["checkpoint_desc"] = params.get( "checkpoint_desc", "desc of cp_%s" % backup_index) disk_param_list = [] for vm_disk in vm_disks: cp_disk_param = {"name": vm_disk} if vm_disk != original_disk_target: cp_disk_param["checkpoint"] = "no" else: cp_disk_param["checkpoint"] = "bitmap" cp_disk_bitmap = params.get("cp_disk_bitmap") if cp_disk_bitmap: cp_disk_param["bitmap"] = cp_disk_bitmap + str( backup_index) disk_param_list.append(cp_disk_param) checkpoint_xml = utils_backup.create_checkpoint_xml( cp_params, disk_param_list) logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index, checkpoint_xml) # Start backup backup_options = backup_xml.xml + " " + checkpoint_xml.xml # Create some data in vdb dd_count = "1" dd_seek = str(backup_index * 10 + 10) dd_bs = "1M" session = vm.wait_for_login() utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs, dd_seek, dd_count) session.close() if reuse_target_file: backup_options += " --reuse-external" backup_result = virsh.backup_begin(vm_name, backup_options, debug=True) if backup_result.exit_status: raise utils_backup.BackupBeginError( backup_result.stderr.strip()) # Wait for the backup job actually finished if not utils_misc.wait_for( lambda: backup_job_done(vm_name, original_disk_target), 60): test.fail("Backup job not finished in 60s") for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) # Compare the backup data and original data original_data_file = os.path.join(tmp_dir, "original_data.qcow2") cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path, original_data_file) process.run(cmd, shell=True, verbose=True) for backup_path in backup_path_list: if target_driver == "qcow2": # Clear backup image's backing file before comparison qemu_cmd = ("qemu-img rebase -u -f qcow2 -b '' -F qcow2 %s" % backup_path) process.run(qemu_cmd, shell=True, verbose=True) if not utils_backup.cmp_backup_data( original_data_file, backup_path, backup_file_driver=target_driver): test.fail("Backup and original data are not identical for" "'%s' and '%s'" % (disk_path, backup_path)) else: logging.debug("'%s' contains correct backup data", backup_path) except utils_backup.BackupBeginError as details: if backup_error: logging.debug("Backup failed as expected.") else: test.fail(details) finally: # Remove checkpoints if "checkpoint_list" in locals() and checkpoint_list: for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync() # Remove local backup file if "target_file_path" in locals(): if os.path.exists(target_file_path): os.remove(target_file_path) # Remove iscsi devices libvirt.setup_or_cleanup_iscsi(False) # Remove ceph related data if original_disk_type == "ceph": ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name, ceph_cfg, key_file) if "auth_sec_uuid" in locals() and auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if "ceph_cfg" in locals() and os.path.exists(ceph_cfg): os.remove(ceph_cfg) if os.path.exists(key_file): os.remove(key_file)
def run(test, params, env): ''' Test the command virsh pool-create-as (1) Prepare backend storage device (2) Define secret xml and set secret value (3) Test pool-create-as or virsh pool-define with authentication ''' pool_options = params.get('pool_options', '') pool_name = params.get('pool_name') pool_type = params.get('pool_type') pool_target = params.get('pool_target', '') status_error = params.get('status_error') == "yes" # iscsi options emulated_size = params.get("iscsi_image_size", "1") iscsi_host = params.get("iscsi_host", "127.0.0.1") chap_user = params.get("iscsi_user") chap_passwd = params.get("iscsi_password") # ceph options ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "--keyring %s" % key_file # auth options auth_usage = (params.get('auth_usage') == 'yes') auth_uuid = (params.get('auth_uuid') == 'yes') sec_ephemeral = params.get("secret_ephemeral", "no") sec_private = params.get("secret_private", "yes") sec_desc = params.get("secret_description") auth_type = params.get("auth_type") sec_usage = params.get("secret_usage_type") sec_target = params.get("secret_usage_target") sec_name = params.get("secret_name") auth_sec_dict = { "sec_ephemeral": sec_ephemeral, "sec_private": sec_private, "sec_desc": sec_desc, "sec_usage": sec_usage, "sec_target": sec_target, "sec_name": sec_name } if sec_usage == "iscsi": auth_username = chap_user sec_password = chap_passwd secret_usage = sec_target if sec_usage == "ceph": auth_username = ceph_auth_user sec_password = ceph_auth_key secret_usage = sec_name if pool_target and not os.path.isdir(pool_target): if os.path.isfile(pool_target): logging.error('<target> must be a directory') else: os.makedirs(pool_target) def setup_ceph_auth(): disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) process.run(cmd, ignore_status=True, shell=True) # Create an local image and make FS on it. img_file = os.path.join(data_dir.get_tmp_dir(), "test.img") disk_cmd = ("qemu-img create -f raw {0} 10M && mkfs.ext4 -F {0}". format(img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage # Ceph can only support raw format disk_cmd = ("qemu-img convert -O %s %s %s" % ("raw", img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) def setup_iscsi_auth(): iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd) return iscsi_target def check_auth_in_xml(dparams): sourcexml = pool_xml.PoolXML.new_from_dumpxml(pool_name).get_source() with open(sourcexml.xml) as xml_f: logging.debug("Source XML is: \n%s", xml_f.read()) # Check result try: for name, v_expect in dparams.items(): if v_expect != sourcexml[name]: test.fail("Expect to find %s=%s, but got %s=%s" % (name, v_expect, name, sourcexml[name])) except xcepts.LibvirtXMLNotFoundError as details: if "usage not found" in str(details) and auth_uuid: pass # Not a auth_usage test elif "uuid not found" in str(details) and auth_usage: pass # Not a auth_uuid test else: test.fail(details) def check_result(result, expect_error=False): # pool-define-as return CmdResult if isinstance(result, process.CmdResult): result = (result.exit_status == 0) # True means run success if expect_error: if result: test.fail("Expect to fail but run success") elif not expect_error: if not result: test.fail("Expect to succeed but run failure") else: logging.info("It's an expected error") if not libvirt_version.version_compare(3, 9, 0): test.cancel("Pool create/define with authentication" " not support in this libvirt version") sec_uuid = "" img_file = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" libvirt_pool = libvirt_storage.StoragePool() try: # Create secret xml and set value encode = True if sec_usage == "ceph": encode = False # Ceph key already encoded sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(sec_uuid, sec_password, encode=encode, debug=True) if sec_usage == "iscsi": iscsi_dev = setup_iscsi_auth() pool_options += (" --source-host %s --source-dev %s" " --auth-type %s --auth-username %s" % (iscsi_host, iscsi_dev, auth_type, auth_username)) if sec_usage == "ceph": # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) setup_ceph_auth() rbd_pool = ceph_disk_name.split('/')[0] pool_options += ( " --source-host %s --source-name %s" " --auth-type %s --auth-username %s" % (ceph_host_ip, rbd_pool, auth_type, auth_username)) if auth_usage: pool_options += " --secret-usage %s" % secret_usage if auth_uuid: pool_options += " --secret-uuid %s" % sec_uuid # Run test cases func_name = params.get("test_func", "pool_create_as") logging.info('Perform test runner: %s', func_name) if func_name == "pool_create_as": func = virsh.pool_create_as if func_name == "pool_define_as": func = virsh.pool_define_as result = func(pool_name, pool_type, pool_target, extra=pool_options, debug=True) # Check status_error check_result(result, expect_error=status_error) if not status_error: # Check pool status pool_status = libvirt_pool.pool_state(pool_name) if ((pool_status == 'inactive' and func_name == "pool_define_as") or (pool_status == "active" and func_name == "pool_create_as")): logging.info("Expected pool status:%s" % pool_status) else: test.fail("Not an expected pool status: %s" % pool_status) # Check pool dumpxml dict_expect = { "auth_type": auth_type, "auth_username": auth_username, "secret_usage": secret_usage, "secret_uuid": sec_uuid } check_auth_in_xml(dict_expect) finally: # Clean up logging.info("Start to cleanup") # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) if os.path.exists(img_file): os.remove(img_file) virsh.secret_undefine(sec_uuid, ignore_status=True) libvirt.setup_or_cleanup_iscsi(is_setup=False) if libvirt_pool.pool_exists(pool_name): libvirt_pool.delete_pool(pool_name)
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(postfix_n, snapshot_take): """ Make external snapshots for disks only. :param postfix_n: postfix option :param snapshot_take: snapshots taken. """ # Add all disks into command line. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, snapshot_take): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: test.fail("Failed to make snapshots for disks!") # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() first_disk_src = first_device['source'] return first_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") with_active_commit = "yes" == params.get("with_active_commit", "no") multiple_chain = "yes" == params.get("multiple_chain", "no") virsh_dargs = {'debug': True} # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", 'no') vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): test.cancel("live active block commit is not supported" " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] cmd_session = None # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = '' try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide rbd host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot postfix_n = 'snap' make_disk_snapshot(postfix_n, snapshot_take) basename = os.path.basename(blk_source) diskname = basename.split(".")[0] snap_src_lst = [blk_source] if multiple_chain: snap_name = "%s.%s1" % (diskname, postfix_n) snap_top = os.path.join(tmp_dir, snap_name) top_index = snapshot_external_disks.index(snap_top) + 1 omit_list = snapshot_external_disks[top_index:] vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break vmxml.del_device(disk_xml) disk_dict = {'attrs': {'file': snap_top}} disk_xml.source = disk_xml.new_disk_source(**disk_dict) vmxml.add_device(disk_xml) vmxml.sync() vm.start() session = vm.wait_for_login() postfix_n = 'new_snap' make_disk_snapshot(postfix_n, snapshot_take) snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks logging.debug("omit list is %s", omit_list) for i in omit_list: snap_src_lst.remove(i) else: # snapshot src file list snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(list(range(snapshot_take))): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: if disk.device != 'disk': continue disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: test.fail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introuduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" if with_timeout: blockcommit_options += " --timeout 1" if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: snap_name = "%s.%s1" % (diskname, postfix_n) blk_source = os.path.join(tmp_dir, snap_name) blockcommit_options += " --base %s" % blk_source if top_inactive: snap_name = "%s.%s2" % (diskname, postfix_n) top_image = os.path.join(tmp_dir, snap_name) blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if vm_state == "shut off": vm.destroy(gracefully=True) if with_active_commit: # inactive commit follow active commit will fail with bug 1135339 cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name, blk_target) cmd_session = aexpect.ShellSession(cmd) if backing_file_relative_path: blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" block_commit_index = snapshot_take expect_backing_file = False # Do block commit using --active for count in range(1, snapshot_take): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) if top_inactive: blockcommit_options = " --wait --verbose --top vda[1] --base vda[2] --keep-relative" block_commit_index = snapshot_take - 1 expect_backing_file = True # Do block commit with --wait if top_inactive for count in range(1, block_commit_index): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) check_chain_backing_files(blk_source_image, expect_backing_file) return # Run test case # Active commit does not support on rbd based disk with bug 1200726 result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if not multiple_chain: if disk_src_file != snap_src_lst[2]: test.fail(err_msg) else: if disk_src_file != snap_src_lst[3]: test.fail(err_msg) else: if disk_src_file != blk_source: test.fail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': test.fail("blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug("after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") cmd_result = virsh.blockjob(vm_name, blk_target, '', ignore_status=True, debug=True) libvirt.check_exit_status(cmd_result) elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off" and not multiple_chain: for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if cmd_session: cmd_session.close() for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=True): """ Setup vm ceph disk with given parameters :param vm: the vm object :param params: dict, dict include setup vm disk xml configurations :param is_setup: one parameter indicate whether setup or clean up """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) logging.debug("original xml is: %s", vmxml) # Device related configurations device_format = params.get("virt_disk_device_format", "raw") device_bus = params.get("virt_disk_device_bus", "virtio") device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdb") hotplug = "yes" == params.get("virt_disk_device_hotplug", "no") keep_raw_image_as = "yes" == params.get("keep_raw_image_as", "no") # Ceph related configurations ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") auth_sec_usage_type = params.get("ceph_auth_sec_usage_type", "ceph") storage_size = params.get("storage_size", "1G") img_file = params.get("ceph_image_file") attach_option = params.get("virt_device_attach_option", "--live") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" is_local_img_file = True if img_file is None else False rbd_key_file = None # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" disk_auth_dict = None auth_sec_uuid = None names = ceph_disk_name.split('/') pool_name = names[0] image_name = names[1] if not utils_package.package_install(["ceph-common"]): raise exceptions.TestError("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file rbd_key_file = key_file if is_setup: # If enable auth, prepare disk auth if ceph_client_name and ceph_client_key: auth_sec_uuid = _create_secret(auth_sec_usage_type, ceph_auth_key) disk_auth_dict = {"auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} # clean up image file if exists ceph.rbd_image_rm(ceph_mon_ip, pool_name, image_name, keyfile=rbd_key_file) #Create necessary image file if not exists _create_image(device_format, img_file, vm.name, storage_size, ceph_disk_name, ceph_mon_ip, key_opt, ceph_auth_user, ceph_auth_key) # Disk related config disk_src_dict = {"attrs": {"protocol": "rbd", "name": ceph_disk_name}, "hosts": [{"name": ceph_mon_ip, "port": ceph_host_port}]} # Create network disk disk_xml = libvirt_disk.create_primitive_disk_xml("network", device, device_target, device_bus, device_format, disk_src_dict, disk_auth_dict) if not keep_raw_image_as: if hotplug: virsh.attach_device(vm.name, disk_xml.xml, flagstr=attach_option, ignore_status=False, debug=True) else: vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) vmxml.add_device(disk_xml) vmxml.sync() else: ceph.rbd_image_rm(ceph_mon_ip, pool_name, image_name, keyfile=rbd_key_file) # Remove ceph config and key file if created. for file_path in [ceph_cfg, key_file]: if os.path.exists(file_path): os.remove(file_path) if is_local_img_file and img_file and os.path.exists(img_file): libvirt.delete_local_disk("file", img_file) if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid, ignore_status=True)
def run(test, params, env): """ Test command: virsh blockpull <domain> <path> 1) Prepare test environment. 2) Populate a disk from its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(snapshot_take): """ Make external snapshots for disks only. :param snapshot_take: snapshots taken. """ for count in range(1, snapshot_take + 1): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "snapshot_test%s" % count snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test %s" % count # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile # Skip cdrom if disk_xml.device == "cdrom": continue del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: file_name = disk_xml.source.attrs['file'] new_file = "%s.snap%s" % (file_name.split('.')[0], count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('name' in disk_xml.source.attrs and disk_src_protocol == 'gluster'): src_name = disk_xml.source.attrs['name'] new_name = "%s.snap%s" % (src_name.split('.')[0], count) new_attrs.update({'name': new_name}) snapshot_external_disks.append(new_name) hosts = disk_xml.source.hosts elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_src_protocol in ['iscsi', 'rbd']): # Use local file as external snapshot target for block # and iscsi network type. # As block device will be treat as raw format by # default, it's not fit for external disk snapshot # target. A work around solution is use qemu-img again # with the target. # And external active snapshots are not supported on # 'network' disks using 'iscsi' protocol disk_xml.type_name = 'file' if 'dev' in new_attrs: del new_attrs['dev'] elif 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: test.fail(snapshot_result.stderr) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() firt_disk_src = first_device['source'] return firt_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no') snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no') bandwidth = params.get("bandwidth", None) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", None) keep_relative = "yes" == params.get("keep_relative", 'no') virsh_dargs = {'debug': True} # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_target = params.get("disk_target", 'vda') disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_src_protocol == 'gluster': if not libvirt_version.version_compare(1, 2, 7): test.cancel("Snapshot on glusterfs not" " support in current " "version. Check more info " " with https://bugzilla.re" "dhat.com/show_bug.cgi?id=" "1017289") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide ceph host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockpull operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot(snapshot_take) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("The domain xml after snapshot is %s" % vmxml) # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks if snap_in_mirror: blockpull_options = "--bandwidth 1" else: blockpull_options = "--wait --verbose" if with_timeout: blockpull_options += " --timeout 1" if bandwidth: blockpull_options += " --bandwidth %s" % bandwidth if base_option == "async": blockpull_options += " --async" base_image = None base_index = None if (libvirt_version.version_compare(1, 2, 4) or disk_src_protocol == 'gluster'): # For libvirt is older version than 1.2.4 or source protocol is gluster # there are various base image,which depends on base option:shallow,base,top respectively if base_option == "shallow": base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "base": base_index = 2 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "top": base_index = 0 base_image = "%s[%s]" % (disk_target, base_index) else: if base_option == "shallow": base_image = snap_src_lst[3] elif base_option == "base": base_image = snap_src_lst[2] elif base_option == "top": base_image = snap_src_lst[4] if base_option and base_image: blockpull_options += " --base %s" % base_image if keep_relative: blockpull_options += " --keep-relative" if backing_file_relative_path: # Use block commit to shorten previous snapshots. blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" for count in range(1, snapshot_take + 1): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) #Use block pull with --keep-relative flag,and reset base_index to 2. base_index = 2 for count in range(1, snapshot_take): # If block pull operations are more than or equal to 3,it need reset base_index to 1. if count >= 3: base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) blockpull_options = " --wait --verbose --base %s --keep-relative" % base_image res = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) # Check final backing chain files. check_chain_backing_files(blk_source_image, True) return # Run test case result = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) status = result.exit_status # If pull job aborted as timeout, the exit status is different # on RHEL6(0) and RHEL7(1) if with_timeout and 'Pull aborted' in result.stdout.strip(): if libvirt_version.version_compare(1, 1, 1): status_error = True else: status_error = False # Check status_error libvirt.check_exit_status(result, status_error) if not status and not with_timeout: if snap_in_mirror: snap_mirror_path = "%s/snap_mirror" % tmp_dir snap_options = "--diskspec vda,snapshot=external," snap_options += "file=%s --disk-only" % snap_mirror_path snapshot_external_disks.append(snap_mirror_path) ret = virsh.snapshot_create_as(vm_name, snap_options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, snap_in_mirror_err) return vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break logging.debug("after pull the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): err_msg = "Domain image backing chain check failed" if not base_option or "async" in base_option: chain_lst = snap_src_lst[-1:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) elif "base" or "shallow" in base_option: chain_lst = snap_src_lst[::-1] if not base_index and base_image: base_index = chain_lst.index(base_image) val_tmp = [] for i in range(1, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) # If base image is the top layer of snapshot chain, # virsh blockpull should fail, return directly if base_option == "top": return # Check flag files for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockpull failed: %s" % output) finally: # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") if not disk_src_protocol or disk_src_protocol != 'gluster': for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) libvirtd = utils_libvirtd.Libvirtd() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test command: virsh blockcommit <domain> <path> 1) Prepare test environment. 2) Commit changes from a snapshot down to its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(postfix_n, snapshot_take, is_check_snapshot_tree=False, is_create_image_file_in_vm=False): """ Make external snapshots for disks only. :param postfix_n: postfix option :param snapshot_take: snapshots taken. :param is_create_image_file_in_vm: create image file in VM. """ # Add all disks into command line. disks = vm.get_disk_devices() # Make three external snapshots for disks only for count in range(1, snapshot_take): options = "%s_%s %s%s-desc " % (postfix_n, count, postfix_n, count) options += "--disk-only --atomic --no-metadata" if needs_agent: options += " --quiesce" for disk in disks: disk_detail = disks[disk] basename = os.path.basename(disk_detail['source']) # Remove the original suffix if any, appending # ".postfix_n[0-9]" diskname = basename.split(".")[0] snap_name = "%s.%s%s" % (diskname, postfix_n, count) disk_external = os.path.join(tmp_dir, snap_name) snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) clean_snap_file(disk_external) if is_check_snapshot_tree: options = options.replace("--no-metadata", "") cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) status = cmd_result.exit_status if status != 0: test.fail("Failed to make snapshots for disks!") if is_create_image_file_in_vm: create_file_cmd = "dd if=/dev/urandom of=/mnt/snapshot_%s.img bs=1M count=2" % count session.cmd_status_output(create_file_cmd) created_image_files_in_vm.append("snapshot_%s.img" % count) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def check_snapshot_tree(): """ Check whether predefined snapshot names are equals to snapshot names by virsh snapshot-list --tree """ predefined_snapshot_name_list = [] for count in range(1, snapshot_take): predefined_snapshot_name_list.append("%s_%s" % (postfix_n, count)) snapshot_list_cmd = "virsh snapshot-list %s --tree" % vm_name result_output = process.run(snapshot_list_cmd, ignore_status=True, shell=True).stdout_text virsh_snapshot_name_list = [] for line in result_output.rsplit("\n"): strip_line = line.strip() if strip_line and "|" not in strip_line: virsh_snapshot_name_list.append(strip_line) # Compare two lists in their order and values, all need to be same. compare_list = [out_p for out_p, out_v in zip(predefined_snapshot_name_list, virsh_snapshot_name_list) if out_p not in out_v] if compare_list: test.fail("snapshot tree not correctly returned.") # If check_snapshot_tree is True, check snapshot tree output. if is_check_snapshot_tree: check_snapshot_tree() def check_vm_disk_file(vm): """ Check current vm disk source. :param vm: The vm to be checked """ image_name1, image_format = params.get("image_name", "image"), params.get("image_format", "qcow2") image_dir = os.path.join(data_dir.get_data_dir(), image_name1) original_image_path = image_dir + "." + image_format logging.debug("Source file should be : %s", original_image_path) vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) disk = vmxml.get_devices('disk')[0] logging.debug("Current disk info is : %s", disk) if disk.source.attrs['file'] != original_image_path: test.error("Please check current vm disk source") def clean_snap_file(snap_path): """ Clean the existed duplicate snap file. :param snap_path: snap file path """ if os.path.exists(snap_path): os.remove(snap_path) logging.debug("Cleaned snap file before creating :%s" % snap_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() first_disk_src = first_device['source'] return first_disk_src def make_relative_path_backing_files(pre_set_root_dir=None): """ Create backing chain files of relative path. :param pre_set_root_dir: preset root dir :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) if pre_set_root_dir is None: root_dir = os.path.dirname(first_disk_source) else: root_dir = pre_set_root_dir cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" if pre_set_root_dir: backing_file_dict["b"] = "%s" % first_disk_source backing_file_dict["c"] = "%s/b/b.img" % root_dir backing_file_dict["d"] = "%s/c/c.img" % root_dir disk_format = params.get("disk_format", "qcow2") for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s.img" % (backing_file_path, "qcow2", value, disk_format, key)) ret = process.run(cmd, shell=True) disk_format = "qcow2" libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) def create_reuse_external_snapshots(pre_set_root_dir=None): """ Create reuse external snapshots :param pre_set_root_dir: preset root directory :return: absolute path of base file """ if pre_set_root_dir is None: first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) else: root_dir = pre_set_root_dir meta_options = " --reuse-external --disk-only --no-metadata" # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "b.img" backing_file_dict["c"] = "c.img" backing_file_dict["d"] = "d.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) external_snap_shot = "%s/%s" % (backing_file_path, value) snapshot_external_disks.append(external_snap_shot) options = "%s --diskspec %s,file=%s" % (meta_options, disk_target, external_snap_shot) cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) libvirt.check_exit_status(cmd_result) logging.debug('reuse external snapshots:%s' % snapshot_external_disks) return root_dir def check_file_in_vm(): """ Check whether certain image files exists in VM internal. """ for img_file in created_image_files_in_vm: status, output = session.cmd_status_output("ls -l /mnt/%s" % img_file) logging.debug(output) if status: test.fail("blockcommit from top to base failed when ls image file in VM: %s" % output) def do_blockcommit_pivot_repeatedly(): """ Validate bugzilla:https://bugzilla.redhat.com/show_bug.cgi?id=1857735 """ # Make external snapshot,pivot and delete snapshot file repeatedly. tmp_snapshot_name = "external_snapshot_" + "repeated.qcow2" block_target = 'vda' for count in range(0, 5): options = "%s " % tmp_snapshot_name options += "--disk-only --atomic" disk_external = os.path.join(tmp_dir, tmp_snapshot_name) options += " --diskspec %s,snapshot=external,file=%s" % (block_target, disk_external) virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) virsh.blockcommit(vm_name, block_target, " --active --pivot ", ignore_status=False, debug=True) virsh.snapshot_delete(vm_name, tmp_snapshot_name, " --metadata") libvirt.delete_local_disk('file', disk_external) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) vm_state = params.get("vm_state", "running") needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") top_inactive = ("yes" == params.get("top_inactive")) with_timeout = ("yes" == params.get("with_timeout_option", "no")) cmd_timeout = params.get("cmd_timeout", "1") status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", "none") middle_base = "yes" == params.get("middle_base", "no") pivot_opt = "yes" == params.get("pivot_opt", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", "no") snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no") with_active_commit = "yes" == params.get("with_active_commit", "no") multiple_chain = "yes" == params.get("multiple_chain", "no") virsh_dargs = {'debug': True} check_snapshot_tree = "yes" == params.get("check_snapshot_tree", "no") bandwidth = params.get("blockcommit_bandwidth", "") bandwidth_byte = "yes" == params.get("bandwidth_byte", "no") disk_target = params.get("disk_target", "vda") disk_format = params.get("disk_format", "qcow2") reuse_external_snapshot = "yes" == params.get("reuse_external_snapshot", "no") restart_vm_before_commit = "yes" == params.get("restart_vm_before_commit", "no") check_image_file_in_vm = "yes" == params.get("check_image_file_in_vm", "no") pre_set_root_dir = None blk_source_folder = None convert_qcow2_image_to_raw = "yes" == params.get("convert_qcow2_image_to_raw", "no") repeatedly_do_blockcommit_pivot = "yes" == params.get("repeatedly_do_blockcommit_pivot", "no") from_top_without_active_option = "yes" == params.get("from_top_without_active_option", "no") top_to_middle_keep_overlay = "yes" == params.get("top_to_middle_keep_overlay", "no") block_disk_type_based_on_file_backing_file = "yes" == params.get("block_disk_type_based_on_file_backing_file", "no") block_disk_type_based_on_gluster_backing_file = "yes" == params.get("block_disk_type_based_on_gluster_backing_file", "no") # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", 'no') vol_name = params.get("vol_name") tmp_dir = data_dir.get_data_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) if not top_inactive: if not libvirt_version.version_compare(1, 2, 4): test.cancel("live active block commit is not supported" " in current libvirt version.") # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) check_vm_disk_file(vm) snapshot_external_disks = [] cmd_session = None # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = '' try: if disk_src_protocol == 'iscsi' and disk_type == 'block' and reuse_external_snapshot: first_disk = vm.get_first_disk_devices() pre_set_root_dir = os.path.dirname(first_disk['source']) if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide rbd host first.") params.update( {"disk_source_name": os.path.join( pool_name, 'rbd_'+utils_misc.generate_random_string(4)+'.img')}) if utils_package.package_install(["ceph-common"]): ceph.rbd_image_rm(mon_host, *params.get("disk_source_name").split('/')) else: test.error('Failed to install ceph-common to clean image.') if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() if convert_qcow2_image_to_raw: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) blk_source_image_after_converted = "%s/converted_%s" % (blk_source_folder, blk_source_image) # Convert the image from qcow2 to raw convert_disk_cmd = ("qemu-img convert" " -O %s %s %s" % (disk_format, first_src_file, blk_source_image_after_converted)) process.run(convert_disk_cmd, ignore_status=False, shell=True) params.update({'disk_source_name': blk_source_image_after_converted, 'disk_type': 'file', 'disk_src_protocol': 'file'}) libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() if repeatedly_do_blockcommit_pivot: do_blockcommit_pivot_repeatedly() # Create block type disk on file backing file if block_disk_type_based_on_file_backing_file or block_disk_type_based_on_gluster_backing_file: if not vm.is_alive(): vm.start() first_src_file = get_first_disk_source() libvirt.setup_or_cleanup_iscsi(is_setup=False) iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True) block_type_backstore = iscsi_target if block_disk_type_based_on_file_backing_file: first_src_file = get_first_disk_source() if block_disk_type_based_on_gluster_backing_file: first_src_file = "gluster://%s/%s/gluster.qcow2" % (params.get("gluster_server_ip"), params.get("vol_name")) backing_file_create_cmd = ("qemu-img create -f %s -o backing_file=%s,backing_fmt=%s %s" % ("qcow2", first_src_file, "qcow2", block_type_backstore)) process.run(backing_file_create_cmd, ignore_status=False, shell=True) meta_options = " --reuse-external --disk-only --no-metadata" options = "%s --diskspec %s,file=%s,stype=block" % (meta_options, 'vda', block_type_backstore) virsh.snapshot_create_as(vm_name, options, ignore_status=False, debug=True) # The first disk is supposed to include OS # We will perform blockcommit operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] created_image_files_in_vm = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot postfix_n = 'snap' if reuse_external_snapshot: make_relative_path_backing_files(pre_set_root_dir) blk_source_folder = create_reuse_external_snapshots(pre_set_root_dir) else: make_disk_snapshot(postfix_n, snapshot_take, check_snapshot_tree, check_image_file_in_vm) basename = os.path.basename(blk_source) diskname = basename.split(".")[0] snap_src_lst = [blk_source] if multiple_chain: snap_name = "%s.%s1" % (diskname, postfix_n) snap_top = os.path.join(tmp_dir, snap_name) top_index = snapshot_external_disks.index(snap_top) + 1 omit_list = snapshot_external_disks[top_index:] vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break vmxml.del_device(disk_xml) disk_dict = {'attrs': {'file': snap_top}} disk_xml.source = disk_xml.new_disk_source(**disk_dict) if libvirt_version.version_compare(6, 0, 0): bs_source = {'file': blk_source} bs_dict = {"type": params.get("disk_type", "file"), "format": {'type': params.get("disk_format", "qcow2")}} new_bs = disk_xml.new_backingstore(**bs_dict) new_bs["source"] = disk_xml.backingstore.new_source(**bs_source) disk_xml.backingstore = new_bs vmxml.add_device(disk_xml) vmxml.sync() vm.start() session = vm.wait_for_login() postfix_n = 'new_snap' make_disk_snapshot(postfix_n, snapshot_take) snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks logging.debug("omit list is %s", omit_list) for i in omit_list: snap_src_lst.remove(i) else: # snapshot src file list snap_src_lst += snapshot_external_disks backing_chain = '' for i in reversed(list(range(snapshot_take))): if i == 0: backing_chain += "%s" % snap_src_lst[i] else: backing_chain += "%s -> " % snap_src_lst[i] logging.debug("The backing chain is: %s" % backing_chain) # check snapshot disk xml backingStore is expected vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != blk_target: continue else: if disk.device != 'disk': continue disk_xml = disk.xmltreefile logging.debug("the target disk xml after snapshot is %s", disk_xml) break if not disk_xml: test.fail("Can't find disk xml with target %s" % blk_target) elif libvirt_version.version_compare(1, 2, 4): # backingStore element introduced in 1.2.4 chain_lst = snap_src_lst[::-1] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain check failed") # set blockcommit_options top_image = None blockcommit_options = "--wait --verbose" if with_timeout: blockcommit_options += " --timeout %s" % cmd_timeout if base_option == "shallow": blockcommit_options += " --shallow" elif base_option == "base": if middle_base: snap_name = "%s.%s1" % (diskname, postfix_n) blk_source = os.path.join(tmp_dir, snap_name) blockcommit_options += " --base %s" % blk_source if len(bandwidth): blockcommit_options += " --bandwidth %s" % bandwidth if bandwidth_byte: blockcommit_options += " --bytes" if top_inactive: snap_name = "%s.%s2" % (diskname, postfix_n) top_image = os.path.join(tmp_dir, snap_name) if reuse_external_snapshot: index = len(snapshot_external_disks) - 2 top_image = snapshot_external_disks[index] blockcommit_options += " --top %s" % top_image else: blockcommit_options += " --active" if pivot_opt: blockcommit_options += " --pivot" if from_top_without_active_option: blockcommit_options = blockcommit_options.replace("--active", "") if top_to_middle_keep_overlay: blockcommit_options = blockcommit_options.replace("--active", "") blockcommit_options = blockcommit_options.replace("--pivot", "") blockcommit_options += " --keep-overlay" if restart_vm_before_commit: top = 2 base = len(snapshot_external_disks) blockcommit_options = ("--top %s[%d] --base %s[%d] --verbose --wait --keep-relative" % (disk_target, top, disk_target, base)) vm.destroy(gracefully=True) vm.start() if vm_state == "shut off": vm.destroy(gracefully=True) if with_active_commit: # inactive commit follow active commit will fail with bug 1135339 cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name, blk_target) cmd_session = aexpect.ShellSession(cmd) if backing_file_relative_path: blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" block_commit_index = snapshot_take expect_backing_file = False # Do block commit using --active for count in range(1, snapshot_take): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) if top_inactive: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = '' disk_xmls = vmxml.get_devices(device_type="disk") for disk in disk_xmls: if disk.get('device_tag') == 'disk': disk_xml = disk break top_index = 1 try: top_index = disk_xml.backingstore.index except AttributeError: pass else: top_index = int(top_index) block_commit_index = snapshot_take - 1 expect_backing_file = True for count in range(1, block_commit_index): # Do block commit with --wait if top_inactive if top_inactive: blockcommit_options = (" --wait --verbose --top vda[%d] " "--base vda[%d] --keep-relative" % (top_index, top_index+1)) if not libvirt_version.version_compare(6, 0, 0): top_index = 1 else: top_index += 1 res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) check_chain_backing_files(blk_source_image, expect_backing_file) return if reuse_external_snapshot and not top_inactive: block_commit_index = len(snapshot_external_disks) - 1 for index in range(block_commit_index): # Do block commit with --shallow --wait external_blockcommit_options = (" --shallow --wait --verbose --top %s " % (snapshot_external_disks[index])) res = virsh.blockcommit(vm_name, blk_target, external_blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) # Do blockcommit with top active result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) return # Start one thread to check the bandwidth in output if bandwidth and bandwidth_byte: bandwidth += 'B' pool = ThreadPool(processes=1) pool.apply_async(check_bandwidth_thread, (libvirt.check_blockjob, vm_name, blk_target, bandwidth, test)) # Run test case # Active commit does not support on rbd based disk with bug 1200726 result = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) # Check status_error libvirt.check_exit_status(result, status_error) # Skip check chain file as per test case description if restart_vm_before_commit: return if check_image_file_in_vm: check_file_in_vm() if result.exit_status and status_error: return while True: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break if not top_inactive: disk_mirror = disk_xml.find('mirror') if '--pivot' not in blockcommit_options: if disk_mirror is not None: job_type = disk_mirror.get('job') job_ready = disk_mirror.get('ready') src_element = disk_mirror.find('source') disk_src_file = None for elem in ('file', 'name', 'dev'): elem_val = src_element.get(elem) if elem_val: disk_src_file = elem_val break err_msg = "blockcommit base source " err_msg += "%s not expected" % disk_src_file if '--shallow' in blockcommit_options: if not multiple_chain: if disk_src_file != snap_src_lst[2]: test.fail(err_msg) else: if disk_src_file != snap_src_lst[3]: test.fail(err_msg) else: if disk_src_file != blk_source: test.fail(err_msg) if libvirt_version.version_compare(1, 2, 7): # The job attribute mentions which API started the # operation since 1.2.7. if job_type != 'active-commit': test.fail("blockcommit job type '%s'" " not expected" % job_type) if job_ready != 'yes': # The attribute ready, if present, tracks # progress of the job: yes if the disk is known # to be ready to pivot, or, since 1.2.7, abort # or pivot if the job is in the process of # completing. continue else: logging.debug("after active block commit job " "ready for pivot, the target disk" " xml is %s", disk_xml) break else: break else: break else: if disk_mirror is None: logging.debug(disk_xml) if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.pop(0) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") cmd_result = virsh.blockjob(vm_name, blk_target, '', ignore_status=True, debug=True) libvirt.check_exit_status(cmd_result) elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] base_index = chain_lst.index(blk_source) chain_lst = chain_lst[base_index:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing " "chain check failed") break else: # wait pivot after commit is synced continue else: logging.debug("after inactive commit the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): if "--shallow" in blockcommit_options: chain_lst = snap_src_lst[::-1] chain_lst.remove(top_image) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") elif "--base" in blockcommit_options: chain_lst = snap_src_lst[::-1] top_index = chain_lst.index(top_image) base_index = chain_lst.index(blk_source) val_tmp = [] for i in range(top_index, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail("Domain image backing chain " "check failed") break else: break # Check flag files if not vm_state == "shut off" and not multiple_chain: for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockcommit failed: %s" % output) if not pivot_opt and snap_in_mirror: # do snapshot during mirror phase snap_path = "%s/%s.snap" % (tmp_dir, vm_name) snap_opt = "--disk-only --atomic --no-metadata " snap_opt += "vda,snapshot=external,file=%s" % snap_path snapshot_external_disks.append(snap_path) cmd_result = virsh.snapshot_create_as(vm_name, snap_opt, ignore_statues=True, debug=True) libvirt.check_exit_status(cmd_result, snap_in_mirror_err) finally: if vm.is_alive(): vm.destroy(gracefully=False) # Clean ceph image if used in test if 'mon_host' in locals(): if utils_package.package_install(["ceph-common"]): disk_source_name = params.get("disk_source_name") cmd = ("rbd -m {0} info {1} && rbd -m {0} rm " "{1}".format(mon_host, disk_source_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) else: logging.debug('Failed to install ceph-common to clean ceph.') # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if cmd_session: cmd_session.close() for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path or reuse_external_snapshot: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) if blk_source_folder: process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) if disk_src_protocol == 'iscsi' or 'iscsi_target' in locals(): libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux) # Recover images xattr if having some dirty_images = get_images_with_xattr(vm) if dirty_images: clean_images_with_xattr(dirty_images) test.fail("VM's image(s) having xattr left")
def run(test, params, env): """ Test disk encryption option. 1.Prepare backend storage (blkdev/iscsi/gluster/ceph) 2.Use luks format to encrypt the backend storage 3.Prepare a disk xml indicating to the backend storage with valid/invalid luks password 4.Start VM with disk hot/cold plugged 5.Check some disk operations in VM 6.Check backend storage is still in luks format 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def encrypt_dev(device, params): """ Encrypt device with luks format :param device: Storage deivce to be encrypted. :param params: From the dict to get encryption password. """ password = params.get("luks_encrypt_passwd", "password") size = params.get("luks_size", "500M") cmd = ("qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (password, device, size)) if process.system(cmd, shell=True): test.fail("Can't create a luks encrypted img by qemu-img") def check_dev_format(device, fmt="luks"): """ Check if device is in luks format :param device: Storage deivce to be checked. :param fmt: Expected disk format. :return: If device's format equals to fmt, return True, else return False. """ cmd_result = process.run("qemu-img" + ' -h', ignore_status=True, shell=True, verbose=False) if b'-U' in cmd_result.stdout: cmd = ("qemu-img info -U %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) else: cmd = ("qemu-img info %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.fail("device %s is not in %s format. err is: %s" % (device, fmt, cmd_result.stderr)) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test" .format(added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") # Backend storage options. storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") # Luks encryption info, luks_encrypt_passwd is the password used to encrypt # luks image, and luks_secret_passwd is the password set to luks secret, you # can set a wrong password to luks_secret_passwd for negative tests luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Backend storage auth info use_auth_usage = "yes" == params.get("use_auth_usage") if use_auth_usage: use_auth_uuid = False else: use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") status_error = "yes" == params.get("status_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") encryption_in_source = "yes" == params.get("encryption_in_source", "no") auth_in_source = "yes" == params.get("auth_in_source", "no") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} pvt = None if ((encryption_in_source or auth_in_source) and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot put <encryption> or <auth> inside disk <source> " "in this libvirt version.") # Start VM and get all partions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Setup backend storage if backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": if enable_auth: chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml if use_auth_uuid: disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} elif use_auth_usage: disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target} else: iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, portal_ip=iscsi_host) device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = {"attrs": {"protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num)}, "hosts": [{"name": iscsi_host, "port": iscsi_port}]} elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = libvirt.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name) device_source = "gluster://%s/%s/%s" % (gluster_host_ip, gluster_vol_name, gluster_img_name) disk_src_dict = {"attrs": {"protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name)}, "hosts": [{"name": gluster_host_ip, "port": "24007"}]} elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) if enable_auth: # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = {"sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret"} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = {"auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name, ceph_mon_ip, key_file) else: device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip) disk_src_dict = {"attrs": {"protocol": "rbd", "name": ceph_disk_name}, "hosts": [{"name": ceph_host_ip, "port": ceph_host_port}]} elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = data_dir.get_tmp_dir() pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} else: test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.") logging.debug("device source is: %s", device_source) luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, debug=True) encrypt_dev(device_source, params) libvirt.check_exit_status(ret) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) if auth_in_source: disk_source.auth = disk_xml.new_auth(**disk_auth_dict) else: disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) if encryption_in_source: disk_source.encryption = disk_encryption else: disk_xml.encryption = disk_encryption disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) vmxml.sync() try: vm.start() vm.wait_for_login() except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) if check_partitions and not status_error: if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") check_dev_format(device_source) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": libvirt.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env): ''' Test the command virsh pool-create-as (1) Prepare backend storage device (2) Define secret xml and set secret value (3) Test pool-create-as or virsh pool-define with authenication ''' pool_options = params.get('pool_options', '') pool_name = params.get('pool_name') pool_type = params.get('pool_type') pool_target = params.get('pool_target', '') status_error = params.get('status_error') == "yes" # iscsi options emulated_size = params.get("iscsi_image_size", "1") iscsi_host = params.get("iscsi_host", "127.0.0.1") chap_user = params.get("iscsi_user") chap_passwd = params.get("iscsi_password") # ceph options ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "--keyring %s" % key_file # auth options auth_usage = (params.get('auth_usage') == 'yes') auth_uuid = (params.get('auth_uuid') == 'yes') sec_ephemeral = params.get("secret_ephemeral", "no") sec_private = params.get("secret_private", "yes") sec_desc = params.get("secret_description") auth_type = params.get("auth_type") sec_usage = params.get("secret_usage_type") sec_target = params.get("secret_usage_target") sec_name = params.get("secret_name") auth_sec_dict = {"sec_ephemeral": sec_ephemeral, "sec_private": sec_private, "sec_desc": sec_desc, "sec_usage": sec_usage, "sec_target": sec_target, "sec_name": sec_name} if sec_usage == "iscsi": auth_username = chap_user sec_password = chap_passwd secret_usage = sec_target if sec_usage == "ceph": auth_username = ceph_auth_user sec_password = ceph_auth_key secret_usage = sec_name if pool_target and not os.path.isdir(pool_target): if os.path.isfile(pool_target): logging.error('<target> must be a directory') else: os.makedirs(pool_target) def setup_ceph_auth(): disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) process.run(cmd, ignore_status=True, shell=True) # Create an local image and make FS on it. img_file = os.path.join(data_dir.get_tmp_dir(), "test.img") disk_cmd = ("qemu-img create -f raw {0} 10M && mkfs.ext4 -F {0}" .format(img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage # Ceph can only support raw format disk_cmd = ("qemu-img convert -O %s %s %s" % ("raw", img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) def setup_iscsi_auth(): iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd) return iscsi_target def check_auth_in_xml(dparams): sourcexml = pool_xml.PoolXML.new_from_dumpxml(pool_name).get_source() with open(sourcexml.xml) as xml_f: logging.debug("Source XML is: \n%s", xml_f.read()) # Check result try: for name, v_expect in dparams.items(): if v_expect != sourcexml[name]: test.fail("Expect to find %s=%s, but got %s=%s" % (name, v_expect, name, sourcexml[name])) except xcepts.LibvirtXMLNotFoundError as details: if "usage not found" in str(details) and auth_uuid: pass # Not a auth_usage test elif "uuid not found" in str(details) and auth_usage: pass # Not a auth_uuid test else: test.fail(details) def check_result(result, expect_error=False): # pool-define-as return CmdResult if isinstance(result, process.CmdResult): result = (result.exit_status == 0) # True means run success if expect_error: if result: test.fail("Expect to fail but run success") elif not expect_error: if not result: test.fail("Expect to succeed but run failure") else: logging.info("It's an expected error") if not libvirt_version.version_compare(3, 9, 0): test.cancel("Pool create/define with authentication" " not support in this libvirt version") sec_uuid = "" img_file = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" libvirt_pool = libvirt_storage.StoragePool() try: # Create secret xml and set value encode = True if sec_usage == "ceph": encode = False # Ceph key already encoded sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(sec_uuid, sec_password, encode=encode, debug=True) if sec_usage == "iscsi": iscsi_dev = setup_iscsi_auth() pool_options += (" --source-host %s --source-dev %s" " --auth-type %s --auth-username %s" % (iscsi_host, iscsi_dev, auth_type, auth_username)) if sec_usage == "ceph": # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) setup_ceph_auth() rbd_pool = ceph_disk_name.split('/')[0] pool_options += (" --source-host %s --source-name %s" " --auth-type %s --auth-username %s" % (ceph_host_ip, rbd_pool, auth_type, auth_username)) if auth_usage: pool_options += " --secret-usage %s" % secret_usage if auth_uuid: pool_options += " --secret-uuid %s" % sec_uuid # Run test cases func_name = params.get("test_func", "pool_create_as") logging.info('Perform test runner: %s', func_name) if func_name == "pool_create_as": func = virsh.pool_create_as if func_name == "pool_define_as": func = virsh.pool_define_as result = func(pool_name, pool_type, pool_target, extra=pool_options, debug=True) # Check status_error check_result(result, expect_error=status_error) if not status_error: # Check pool status pool_status = libvirt_pool.pool_state(pool_name) if ((pool_status == 'inactive' and func_name == "pool_define_as") or (pool_status == "active" and func_name == "pool_create_as")): logging.info("Expected pool status:%s" % pool_status) else: test.fail("Not an expected pool status: %s" % pool_status) # Check pool dumpxml dict_expect = {"auth_type": auth_type, "auth_username": auth_username, "secret_usage": secret_usage, "secret_uuid": sec_uuid} check_auth_in_xml(dict_expect) finally: # Clean up logging.info("Start to cleanup") # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) if os.path.exists(img_file): os.remove(img_file) virsh.secret_undefine(sec_uuid, ignore_status=True) libvirt.setup_or_cleanup_iscsi(is_setup=False) if libvirt_pool.pool_exists(pool_name): libvirt_pool.delete_pool(pool_name)
def run(test, params, env): """ Test command: virsh blockpull <domain> <path> 1) Prepare test environment. 2) Populate a disk from its backing image. 3) Recover test environment. 4) Check result. """ def make_disk_snapshot(snapshot_take): """ Make external snapshots for disks only. :param snapshot_take: snapshots taken. """ for count in range(1, snapshot_take + 1): snap_xml = snapshot_xml.SnapshotXML() snapshot_name = "snapshot_test%s" % count snap_xml.snap_name = snapshot_name snap_xml.description = "Snapshot Test %s" % count # Add all disks into xml file. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') new_disks = [] for src_disk_xml in disks: disk_xml = snap_xml.SnapDiskXML() disk_xml.xmltreefile = src_disk_xml.xmltreefile # Skip cdrom if disk_xml.device == "cdrom": continue del disk_xml.device del disk_xml.address disk_xml.snapshot = "external" disk_xml.disk_name = disk_xml.target['dev'] # Only qcow2 works as external snapshot file format, update it # here driver_attr = disk_xml.driver driver_attr.update({'type': 'qcow2'}) disk_xml.driver = driver_attr new_attrs = disk_xml.source.attrs if 'file' in disk_xml.source.attrs: file_name = disk_xml.source.attrs['file'] new_file = "%s.snap%s" % (file_name.split('.')[0], count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None elif ('name' in disk_xml.source.attrs and disk_src_protocol == 'gluster'): src_name = disk_xml.source.attrs['name'] new_name = "%s.snap%s" % (src_name.split('.')[0], count) new_attrs.update({'name': new_name}) snapshot_external_disks.append(new_name) hosts = disk_xml.source.hosts elif ('dev' in disk_xml.source.attrs or 'name' in disk_xml.source.attrs): if (disk_xml.type_name == 'block' or disk_src_protocol in ['iscsi', 'rbd']): # Use local file as external snapshot target for block # and iscsi network type. # As block device will be treat as raw format by # default, it's not fit for external disk snapshot # target. A work around solution is use qemu-img again # with the target. # And external active snapshots are not supported on # 'network' disks using 'iscsi' protocol disk_xml.type_name = 'file' if 'dev' in new_attrs: del new_attrs['dev'] elif 'name' in new_attrs: del new_attrs['name'] del new_attrs['protocol'] new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count) snapshot_external_disks.append(new_file) new_attrs.update({'file': new_file}) hosts = None new_src_dict = {"attrs": new_attrs} if hosts: new_src_dict.update({"hosts": hosts}) disk_xml.source = disk_xml.new_disk_source(**new_src_dict) new_disks.append(disk_xml) snap_xml.set_disks(new_disks) snapshot_xml_path = snap_xml.xml logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile) options = "--disk-only --xmlfile %s " % snapshot_xml_path snapshot_result = virsh.snapshot_create( vm_name, options, debug=True) if snapshot_result.exit_status != 0: test.fail(snapshot_result.stderr) # Create a file flag in VM after each snapshot flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"), dir="/tmp") file_path = flag_file.name flag_file.close() status, output = session.cmd_status_output("touch %s" % file_path) if status: test.fail("Touch file in vm failed. %s" % output) snapshot_flag_files.append(file_path) def get_first_disk_source(): """ Get disk source of first device :return: first disk of first device. """ first_device = vm.get_first_disk_devices() firt_disk_src = first_device['source'] return firt_disk_src def make_relative_path_backing_files(): """ Create backing chain files of relative path. :return: absolute path of top active file """ first_disk_source = get_first_disk_source() basename = os.path.basename(first_disk_source) root_dir = os.path.dirname(first_disk_source) cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}') ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) # Make three external relative path backing files. backing_file_dict = collections.OrderedDict() backing_file_dict["b"] = "../%s" % basename backing_file_dict["c"] = "../b/b.img" backing_file_dict["d"] = "../c/c.img" for key, value in list(backing_file_dict.items()): backing_file_path = os.path.join(root_dir, key) cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img" % (backing_file_path, value, key)) ret = process.run(cmd, shell=True) libvirt.check_exit_status(ret) return os.path.join(backing_file_path, "d.img") def check_chain_backing_files(disk_src_file, expect_backing_file=False): """ Check backing chain files of relative path after blockcommit. :param disk_src_file: first disk src file. :param expect_backing_file: whether it expect to have backing files. """ first_disk_source = get_first_disk_source() # Validate source image need refer to original one after active blockcommit if not expect_backing_file and disk_src_file not in first_disk_source: test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file)) # Validate source image doesn't have backing files after active blockcommit cmd = "qemu-img info %s --backing-chain" % first_disk_source if qemu_img_locking_feature_support: cmd = "qemu-img info -U %s --backing-chain" % first_disk_source ret = process.run(cmd, shell=True).stdout_text.strip() if expect_backing_file: if 'backing file' not in ret: test.fail("The disk image doesn't have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) else: if 'backing file' in ret: test.fail("The disk image still have backing files") else: logging.debug("The actual qemu-img output:%s\n", ret) # MAIN TEST CODE ### # Process cartesian parameters vm_name = params.get("main_vm") vm = env.get_vm(vm_name) snapshot_take = int(params.get("snapshot_take", '0')) needs_agent = "yes" == params.get("needs_agent", "yes") replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no') snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no') bandwidth = params.get("bandwidth", None) with_timeout = ("yes" == params.get("with_timeout_option", "no")) status_error = ("yes" == params.get("status_error", "no")) base_option = params.get("base_option", None) keep_relative = "yes" == params.get("keep_relative", 'no') virsh_dargs = {'debug': True} # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10 qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support() backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no") # Process domain disk device parameters disk_type = params.get("disk_type") disk_target = params.get("disk_target", 'vda') disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Abort the test if there are snapshots already exsiting_snaps = virsh.snapshot_list(vm_name) if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) snapshot_external_disks = [] # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" try: if disk_src_protocol == 'iscsi' and disk_type == 'network': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_src_protocol == 'gluster': if not libvirt_version.version_compare(1, 2, 7): test.cancel("Snapshot on glusterfs not" " support in current " "version. Check more info " " with https://bugzilla.re" "dhat.com/show_bug.cgi?id=" "1017289") # Set vm xml and guest agent if replace_vm_disk: if disk_src_protocol == "rbd" and disk_type == "network": src_host = params.get("disk_source_host", "EXAMPLE_HOSTS") mon_host = params.get("mon_host", "EXAMPLE_MON_HOST") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"): test.cancel("Please provide ceph host first.") if backing_file_relative_path: if vm.is_alive(): vm.destroy(gracefully=False) first_src_file = get_first_disk_source() blk_source_image = os.path.basename(first_src_file) blk_source_folder = os.path.dirname(first_src_file) replace_disk_image = make_relative_path_backing_files() params.update({'disk_source_name': replace_disk_image, 'disk_type': 'file', 'disk_src_protocol': 'file'}) vm.start() libvirt.set_vm_disk(vm, params, tmp_dir) if needs_agent: vm.prepare_guest_agent() # The first disk is supposed to include OS # We will perform blockpull operation for it. first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] blk_target = first_disk['target'] snapshot_flag_files = [] # get a vm session before snapshot session = vm.wait_for_login() # do snapshot make_disk_snapshot(snapshot_take) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("The domain xml after snapshot is %s" % vmxml) # snapshot src file list snap_src_lst = [blk_source] snap_src_lst += snapshot_external_disks if snap_in_mirror: blockpull_options = "--bandwidth 1" else: blockpull_options = "--wait --verbose" if with_timeout: blockpull_options += " --timeout 1" if bandwidth: blockpull_options += " --bandwidth %s" % bandwidth if base_option == "async": blockpull_options += " --async" base_image = None base_index = None if (libvirt_version.version_compare(1, 2, 4) or disk_src_protocol == 'gluster'): # For libvirt is older version than 1.2.4 or source protocol is gluster # there are various base image,which depends on base option:shallow,base,top respectively if base_option == "shallow": base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "base": base_index = 2 base_image = "%s[%s]" % (disk_target, base_index) elif base_option == "top": base_index = 0 base_image = "%s[%s]" % (disk_target, base_index) else: if base_option == "shallow": base_image = snap_src_lst[3] elif base_option == "base": base_image = snap_src_lst[2] elif base_option == "top": base_image = snap_src_lst[4] if base_option and base_image: blockpull_options += " --base %s" % base_image if keep_relative: blockpull_options += " --keep-relative" if backing_file_relative_path: # Use block commit to shorten previous snapshots. blockcommit_options = " --active --verbose --shallow --pivot --keep-relative" for count in range(1, snapshot_take + 1): res = virsh.blockcommit(vm_name, blk_target, blockcommit_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) #Use block pull with --keep-relative flag,and reset base_index to 2. base_index = 2 for count in range(1, snapshot_take): # If block pull operations are more than or equal to 3,it need reset base_index to 1. if count >= 3: base_index = 1 base_image = "%s[%s]" % (disk_target, base_index) blockpull_options = " --wait --verbose --base %s --keep-relative" % base_image res = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) libvirt.check_exit_status(res, status_error) # Check final backing chain files. check_chain_backing_files(blk_source_image, True) return # Run test case result = virsh.blockpull(vm_name, blk_target, blockpull_options, **virsh_dargs) status = result.exit_status # If pull job aborted as timeout, the exit status is different # on RHEL6(0) and RHEL7(1) if with_timeout and 'Pull aborted' in result.stdout.strip(): if libvirt_version.version_compare(1, 1, 1): status_error = True else: status_error = False # Check status_error libvirt.check_exit_status(result, status_error) if not status and not with_timeout: if snap_in_mirror: snap_mirror_path = "%s/snap_mirror" % tmp_dir snap_options = "--diskspec vda,snapshot=external," snap_options += "file=%s --disk-only" % snap_mirror_path snapshot_external_disks.append(snap_mirror_path) ret = virsh.snapshot_create_as(vm_name, snap_options, ignore_status=True, debug=True) libvirt.check_exit_status(ret, snap_in_mirror_err) return vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') for disk in disks: if disk.target['dev'] != blk_target: continue else: disk_xml = disk.xmltreefile break logging.debug("after pull the disk xml is: %s" % disk_xml) if libvirt_version.version_compare(1, 2, 4): err_msg = "Domain image backing chain check failed" if not base_option or "async" in base_option: chain_lst = snap_src_lst[-1:] ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) elif "base" or "shallow" in base_option: chain_lst = snap_src_lst[::-1] if not base_index and base_image: base_index = chain_lst.index(base_image) val_tmp = [] for i in range(1, base_index): val_tmp.append(chain_lst[i]) for i in val_tmp: chain_lst.remove(i) ret = check_chain_xml(disk_xml, chain_lst) if not ret: test.fail(err_msg) # If base image is the top layer of snapshot chain, # virsh blockpull should fail, return directly if base_option == "top": return # Check flag files for flag in snapshot_flag_files: status, output = session.cmd_status_output("cat %s" % flag) if status: test.fail("blockpull failed: %s" % output) finally: # Remove ceph configure file if created if ceph_cfg: os.remove(ceph_cfg) if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync("--snapshots-metadata") # Clean ceph image if used in test if 'mon_host' in locals(): if utils_package.package_install(["ceph-common"]): disk_source_name = params.get("disk_source_name") cmd = ("rbd -m {0} info {1} && rbd -m {0} rm " "{1}".format(mon_host, disk_source_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) else: logging.debug('Failed to install ceph-common to clean ceph.') if not disk_src_protocol or disk_src_protocol != 'gluster': for disk in snapshot_external_disks: if os.path.exists(disk): os.remove(disk) if backing_file_relative_path: libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True) libvirtd = utils_libvirtd.Libvirtd() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(is_setup=False, restart_tgtd=restart_tgtd) elif disk_src_protocol == 'gluster': logging.info("clean gluster env") libvirt.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() elif disk_src_protocol == 'netfs': restore_selinux = params.get('selinux_status_bak') libvirt.setup_or_cleanup_nfs(is_setup=False, restore_selinux=restore_selinux)
def run(test, params, env): """ Test disk encryption option. 1.Prepare backend storage (blkdev/iscsi/gluster/ceph) 2.Use luks format to encrypt the backend storage 3.Prepare a disk xml indicating to the backend storage with valid/invalid luks password 4.Start VM with disk hot/cold plugged 5.Check some disk operations in VM 6.Check backend storage is still in luks format 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def encrypt_dev(device, params): """ Encrypt device with luks format :param device: Storage device to be encrypted. :param params: From the dict to get encryption password. """ password = params.get("luks_encrypt_passwd", "password") size = params.get("luks_size", "500M") preallocation = params.get("preallocation") cmd = ("qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (password, device, size)) # Add preallocation if it is given in params if preallocation: cmd = cmd.replace("key-secret=sec0", "key-secret=sec0,preallocation=%s" % preallocation) if process.system(cmd, shell=True): test.fail("Can't create a luks encrypted img by qemu-img") def check_dev_format(device, fmt="luks"): """ Check if device is in luks format :param device: Storage device to be checked. :param fmt: Expected disk format. :return: If device's format equals to fmt, return True, else return False. """ cmd_result = process.run("qemu-img" + ' -h', ignore_status=True, shell=True, verbose=False) if b'-U' in cmd_result.stdout: cmd = ("qemu-img info -U %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) else: cmd = ("qemu-img info %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.fail("device %s is not in %s format. err is: %s" % (device, fmt, cmd_result.stderr)) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test" .format(added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def create_vol(p_name, target_encrypt_params, vol_params): """ Create volume. :param p_name: Pool name. :param target_encrypt_params: encrypt parameters in dict. :param vol_params: Volume parameters dict. """ # Clean up dirty volumes if pool has. pv = libvirt_storage.PoolVolume(p_name) vol_name_list = pv.list_volumes() for vol_name in vol_name_list: pv.delete_volume(vol_name) volxml = vol_xml.VolXML() v_xml = volxml.new_vol(**vol_params) v_xml.encryption = volxml.new_encryption(**target_encrypt_params) v_xml.xmltreefile.write() ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = secret_list_result.stdout.strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result def check_top_image_in_xml(expected_top_image): """ check top image in src file :param expected_top_image: expect top image """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != device_target: continue else: disk_xml = disk.xmltreefile break logging.debug("disk xml in top: %s\n", disk_xml) src_file = disk_xml.find('source').get('file') if src_file is None: src_file = disk_xml.find('source').get('name') if src_file != expected_top_image: test.fail("Current top img %s is not the same with %s" % (src_file, expected_top_image)) # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_type = params.get("virt_disk_device_type", "file") device_format = params.get("virt_disk_device_format", "raw") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") volume_target_format = params.get("target_format", "raw") # Backend storage options. storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") # Luks encryption info, luks_encrypt_passwd is the password used to encrypt # luks image, and luks_secret_passwd is the password set to luks secret, you # can set a wrong password to luks_secret_passwd for negative tests luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Backend storage auth info use_auth_usage = "yes" == params.get("use_auth_usage") if use_auth_usage: use_auth_uuid = False else: use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") encryption_in_source = "yes" == params.get("encryption_in_source", "no") auth_in_source = "yes" == params.get("auth_in_source", "no") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} pvt = None duplicated_encryption = "yes" == params.get("duplicated_encryption", "no") slice_support_enable = "yes" == params.get("slice_support_enable", "no") block_copy_test = "yes" == params.get("block_copy_test", "no") if ((encryption_in_source or auth_in_source) and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot put <encryption> or <auth> inside disk <source> " "in this libvirt version.") # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Clean up dirty secrets in test environments if there are. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Create secret luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, debug=True) libvirt.check_exit_status(ret) # Setup backend storage if backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": if enable_auth: chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml if use_auth_uuid: disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} elif use_auth_usage: disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target} else: iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, portal_ip=iscsi_host) device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = {"attrs": {"protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num)}, "hosts": [{"name": iscsi_host, "port": iscsi_port}]} elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) device_source = "gluster://%s/%s/%s" % (gluster_host_ip, gluster_vol_name, gluster_img_name) disk_src_dict = {"attrs": {"protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name)}, "hosts": [{"name": gluster_host_ip, "port": "24007"}]} elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(TMP_DATA_DIR, "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) if enable_auth: # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = {"sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret"} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = {"auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name, ceph_mon_ip, key_file) else: device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("pre clean up rbd disk if exists: %s", cmd_result) disk_src_dict = {"attrs": {"protocol": "rbd", "name": ceph_disk_name}, "hosts": [{"name": ceph_host_ip, "port": ceph_host_port}]} elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = TMP_DATA_DIR pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} # Create dir based pool,and then create one volume on it. elif backend_storage_type == "dir": pool_name = params.get("pool_name", "dir_pool") pool_target = params.get("pool_target") pool_type = params.get("pool_type") emulated_image = params.get("emulated_image") image_name = params.get("dir_image_name", "luks_1.img") # Create and start dir_based pool. pvt = libvirt.PoolVolumeTest(test, params) if not os.path.exists(pool_target): os.mkdir(pool_target) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) sp = libvirt_storage.StoragePool() if not sp.is_pool_active(pool_name): sp.set_pool_autostart(pool_name) sp.start_pool(pool_name) # Create one volume on the pool. volume_name = params.get("vol_name") volume_alloc = params.get("vol_alloc") volume_cap_unit = params.get("vol_cap_unit") volume_cap = params.get("vol_cap") volume_target_path = params.get("sec_volume") volume_target_format = params.get("target_format") device_format = volume_target_format volume_target_encypt = params.get("target_encypt", "") volume_target_label = params.get("target_label") vol_params = {"name": volume_name, "capacity": int(volume_cap), "allocation": int(volume_alloc), "format": volume_target_format, "path": volume_target_path, "label": volume_target_label, "capacity_unit": volume_cap_unit} vol_encryption_params = {} vol_encryption_params.update({"format": "luks"}) vol_encryption_params.update({"secret": {"type": "passphrase", "uuid": luks_sec_uuid}}) try: # If target format is qcow2,need to create test image with "qemu-img create" if volume_target_format == "qcow2": option = params.get("luks_extra_elements") libvirt.create_local_disk("file", path=volume_target_path, extra=option, disk_format="qcow2", size="1") else: # If Libvirt version is lower than 2.5.0 # Creating luks encryption volume is not supported,so skip it. create_vol(pool_name, vol_encryption_params, vol_params) except AssertionError as info: err_msgs = ("create: invalid option") if str(info).count(err_msgs): test.cancel("Creating luks encryption volume " "is not supported on this libvirt version") else: test.error("Failed to create volume." "Error: %s" % str(info)) disk_src_dict = {'attrs': {'file': volume_target_path}} device_source = volume_target_path elif backend_storage_type == "file": tmp_dir = TMP_DATA_DIR image_name = params.get("file_image_name", "slice.img") device_source = os.path.join(tmp_dir, image_name) disk_src_dict = {'attrs': {'file': device_source}} else: test.cancel("Only iscsi/gluster/rbd/nfs/file can be tested for now.") logging.debug("device source is: %s", device_source) if backend_storage_type != "dir": encrypt_dev(device_source, params) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} print() driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) if auth_in_source: disk_source.auth = disk_xml.new_auth(**disk_auth_dict) else: disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) if encryption_in_source: disk_source.encryption = disk_encryption else: disk_xml.encryption = disk_encryption if duplicated_encryption: disk_xml.encryption = disk_encryption if slice_support_enable: if not libvirt_version.version_compare(6, 0, 0): test.cancel("Cannot put <slice> inside disk <source> " "in this libvirt version.") else: check_du_output = process.run("du -b %s" % device_source, shell=True).stdout_text slice_size = re.findall(r'[0-9]+', check_du_output) disk_source.slices = disk_xml.new_slices( **{"slice_type": "storage", "slice_offset": "0", "slice_size": slice_size[0]}) disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) try: vmxml.sync() vm.start() vm.wait_for_login() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % str(xml_error)) except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) if check_partitions and not status_error: if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") if volume_target_format == "qcow2": check_dev_format(device_source, fmt="qcow2") else: check_dev_format(device_source) if block_copy_test: # Create a transient VM transient_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) virsh.undefine(vm_name, debug=True, ignore_status=False) virsh.create(transient_vmxml.xml, ignore_status=False, debug=True) expected_top_image = vm.get_blk_devices()[device_target].get('source') options = params.get("blockcopy_options") tmp_dir = TMP_DATA_DIR tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") dest_path = os.path.join(tmp_dir, tmp_file) # Need cover a few scenarios:single blockcopy, blockcopy and abort combined virsh.blockcopy(vm_name, device_target, dest_path, options, ignore_status=False, debug=True) if encryption_in_source: virsh.blockjob(vm_name, device_target, " --pivot", ignore_status=False) expected_top_image = dest_path else: virsh.blockjob(vm_name, device_target, " --abort", ignore_status=False) check_top_image_in_xml(expected_top_image) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env): """ Test Boot OVMF Guest and Seabios Guest with options Steps: 1) Edit VM xml with specified options 2) For secure boot mode, boot OVMF Guest from cdrom first, enroll the key, then switch boot from hd 3) For normal boot mode, directly boot Guest from given device 4) Verify if Guest can boot as expected """ vm_name = params.get("main_vm", "") vm = env.get_vm(vm_name) username = params.get("username", "root") password = params.get("password", "redhat") test_cmd = params.get("test_cmd", "") expected_output = params.get("expected_output", "") check_point = params.get("checkpoint", "") status_error = "yes" == params.get("status_error", "no") boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso") non_release_os_url = params.get("non_release_os_url", "") download_file_path = os.path.join(data_dir.get_tmp_dir(), "non_released_os.qcow2") release_os_url = params.get("release_os_url", "") download_released_file_path = os.path.join(data_dir.get_tmp_dir(), "released_os.qcow2") uefi_iso = params.get("uefi_iso", "") custom_codes = params.get("uefi_custom_codes", "") uefi_target_dev = params.get("uefi_target_dev", "") uefi_device_bus = params.get("uefi_device_bus", "") with_boot = (params.get("with_boot", "no") == "yes") boot_ref = params.get("boot_ref", "dev") boot_order = params.get("boot_order", "1") boot_dev = params.get("boot_dev", "hd") target_dev = params.get("target_dev", "vdb") vol_name = params.get("vol_name") brick_path = os.path.join(test.virtdir, "gluster-pool") boot_type = params.get("boot_type", "seabios") boot_loadparm = params.get("boot_loadparm", None) libvirt_version.is_libvirt_feature_supported(params) # Prepare result checkpoint list check_points = [] if check_point: check_points.append(check_point) # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = '' try: # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(params.get("mon_host")) setup_test_env(params, test) apply_boot_options(vmxml, params, test) blk_source = vm.get_first_disk_devices()['source'] set_domain_disk(vmxml, blk_source, params, test) vmxml.remove_all_boots() if with_boot: boot_kwargs = { "boot_ref": boot_ref, "boot_dev": boot_dev, "boot_order": boot_order, "target_dev": target_dev, "loadparm": boot_loadparm } if "yes" == params.get("two_same_boot_dev", "no"): boot_kwargs.update({"two_same_boot_dev": True}) set_boot_dev_or_boot_order(vmxml, **boot_kwargs) define_error = ("yes" == params.get("define_error", "no")) enable_normal_boot(vmxml, check_points, define_error, test) # Some negative cases failed at virsh.define if define_error: return # Start VM and check result # For boot from cdrom or non_released_os, just verify key words from serial console output # For boot from disk image, run 'test cmd' to verify if OS boot well if boot_dev == "cdrom" or non_release_os_url: if not vm.is_alive(): vm.start() check_prompt = params.get("check_prompt", "") while True: if boot_type == "ovmf": match, text = vm.serial_console.read_until_any_line_matches( [check_prompt], timeout=30.0, internal_timeout=0.5) else: match, text = read_until_any_line_matches( vm.serial_console, [check_prompt], timeout=30.0, internal_timeout=0.5) logging.debug("matches %s", check_prompt) if match == -1: logging.debug("Got check point as expected") break elif boot_dev == "hd": ret = virsh.start(vm_name, timeout=60) utlv.check_result(ret, expected_fails=check_points) # For no boot options, further check if boot dev can be automatically added if not with_boot: if re.search(r"<boot dev='hd'/>", virsh.dumpxml(vm_name).stdout.strip()): logging.debug("OS boot dev added automatically") else: test.fail("OS boot dev not added as expected") if not status_error: vm_ip = vm.wait_for_get_address(0, timeout=240) remote_session = remote.wait_for_login("ssh", vm_ip, "22", username, password, r"[\#\$]\s*$") if test_cmd: status, output = remote_session.cmd_status_output(test_cmd) logging.debug("CMD '%s' running result is:\n%s", test_cmd, output) if expected_output: if not re.search(expected_output, output): test.fail("Expected '%s' to match '%s'" " but failed." % (output, expected_output)) if status: test.fail("Failed to boot %s from %s" % (vm_name, vmxml.xml)) remote_session.close() logging.debug("Succeed to boot %s" % vm_name) finally: # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) logging.debug("Start to cleanup") if vm.is_alive: vm.destroy() logging.debug("Restore the VM XML") vmxml_backup.sync(options="--nvram") if cleanup_gluster: process.run("umount /mnt", ignore_status=True, shell=True) gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) if cleanup_iscsi: utlv.setup_or_cleanup_iscsi(False) if cleanup_iso_file: process.run("rm -rf %s" % boot_iso_file, shell=True, ignore_status=True) if cleanup_image_file: process.run("rm -rf %s" % download_file_path, shell=True, ignore_status=True) if cleanup_released_image_file: process.run("rm -rf %s" % download_released_file_path, shell=True, ignore_status=True)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml") def config_ceph(): """ Write the configs to the file. """ src_host = disk_src_host.split() src_port = disk_src_port.split() conf_str = "mon_host = " hosts = [] for host, port in zip(src_host, src_port): hosts.append("%s:%s" % (host, port)) with open(disk_src_config, 'w') as f: f.write(conf_str + ','.join(hosts) + '\n') def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip( disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool(pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err) def create_vol(vol_params): """ Create volume. :param p_name. Pool name. :param vol_params. Volume parameters dict. :return: True if create successfully. """ pvt = libvirt.PoolVolumeTest(test, params) if create_by_xml: pvt.pre_vol_by_xml(pool_name, **vol_params) else: pvt.pre_vol(vol_name, None, '2G', None, pool_name) def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) process.run(cmd, shell=True) if disk_src_name: cmd += " | grep file=rbd:%s:" % disk_src_name if auth_user and auth_key: cmd += ('id=%s:auth_supported=cephx' % auth_user) if disk_src_config: cmd += " | grep 'conf=%s'" % disk_src_config elif mon_host: hosts = '\:6789\;'.join(mon_host.split()) cmd += " | grep 'mon_host=%s'" % hosts if driver_iothread: cmd += " | grep iothread%s" % driver_iothread # Run the command process.run(cmd, shell=True) def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev] xml_dom_exp = ["source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot or test_disk_readonly: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml") def check_blockcopy(target): """ Block copy operation test. """ blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd") if os.path.exists(blk_file): os.remove(blk_file) blk_mirror = ("mirror type='file' file='%s' " "format='raw' job='copy'" % blk_file) # Do blockcopy ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_result(ret, skip_if=unsupported_err) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count(blk_mirror): test.fail("Can't see block job in domain xml") # Abort ret = virsh.blockjob(vm_name, target, "--abort") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if dom_xml.count(blk_mirror): test.fail("Failed to abort block job") if os.path.exists(blk_file): os.remove(blk_file) # Sleep for a while after abort operation. time.sleep(5) # Do blockcopy again ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_exit_status(ret) # Wait for complete def wait_func(): ret = virsh.blockjob(vm_name, target, "--info") return ret.stderr.count("Block Copy: [100 %]") timeout = params.get("blockjob_timeout", 600) utils_misc.wait_for(wait_func, int(timeout)) # Pivot ret = virsh.blockjob(vm_name, target, "--pivot") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count("source file='%s'" % blk_file): test.fail("Failed to pivot block job") # Remove the disk file. if os.path.exists(blk_file): os.remove(blk_file) def check_in_vm(vm_obj, target, old_parts, read_only=False): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm_obj.wait_for_login() new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;" " touch /mnt/testfile; umount /mnt)" .format(added_part)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info("Check disk operation in VM:\n, %s, %s", s, o) # Readonly fs, check the error messages. # The command may return True, read-only # messges can be found from the command output if read_only: if "Read-only file system" not in o: return False else: return True # Other errors if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def clean_up_volume_snapshots(): """ Get all snapshots for rbd_vol.img volume,unprotect and then clean up them. """ cmd = ("rbd -m {0} {1} info {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) if process.run(cmd, ignore_status=True, shell=True).exit_status: return # Get snapshot list. cmd = ("rbd -m {0} {1} snap" " list {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text snap_names = [] if snaps_out: for line in snaps_out.rsplit("\n"): if line.startswith("SNAPID") or line == "": continue snap_line = line.rsplit() if len(snap_line) == 4: snap_names.append(snap_line[1]) logging.debug("Find snapshots: %s", snap_names) # Unprotect snapshot first,otherwise it will fail to purge volume for snap_name in snap_names: cmd = ("rbd -m {0} {1} snap" " unprotect {2}@{3}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name)) process.run(cmd, ignore_status=True, shell=True) # Purge volume,and then delete volume. cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) process.run(cmd, ignore_status=True, shell=True) def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)} for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = results_stdout_52lts(secret_list_result).strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_config = params.get("disk_source_config") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") disk_src_pool = params.get("disk_source_pool") disk_format = params.get("disk_format", "raw") driver_iothread = params.get("driver_iothread") snap_name = params.get("disk_snap_name") attach_device = "yes" == params.get("attach_device", "no") attach_disk = "yes" == params.get("attach_disk", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") test_blockcopy = "yes" == params.get("test_blockcopy", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_vm_parts = "yes" == params.get("test_vm_parts", "no") additional_guest = "yes" == params.get("additional_guest", "no") create_snapshot = "yes" == params.get("create_snapshot", "no") convert_image = "yes" == params.get("convert_image", "no") create_volume = "yes" == params.get("create_volume", "no") create_by_xml = "yes" == params.get("create_by_xml", "no") client_key = params.get("client_key") client_name = params.get("client_name") auth_key = params.get("auth_key") auth_user = params.get("auth_user") auth_type = params.get("auth_type") auth_usage = params.get("secret_usage") pool_name = params.get("pool_name") pool_type = params.get("pool_type") vol_name = params.get("vol_name") cloned_vol_name = params.get("cloned_volume", "cloned_test_volume") create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume") vol_cap = params.get("vol_cap") vol_cap_unit = params.get("vol_cap_unit") start_vm = "yes" == params.get("start_vm", "no") test_disk_readonly = "yes" == params.get("test_disk_readonly", "no") test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no") test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no") disk_snapshot_with_sanlock = "yes" == params.get("disk_internal_with_sanlock", "no") # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) if additional_guest: guest_name = "%s_%s" % (vm_name, '1') timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout, ignore_status=False) additional_vm = vm.clone(guest_name) if start_vm: virsh.start(guest_name) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) key_opt = "" secret_uuid = None snapshot_path = None key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) front_end_img_file = os.path.join(data_dir.get_tmp_dir(), "%s_frontend_test.img" % vm_name) # Construct a unsupported error message list to skip these kind of tests unsupported_err = [] if driver_iothread: unsupported_err.append('IOThreads not supported') if test_snapshot: unsupported_err.append('live disk snapshot not supported') if test_disk_readonly: if not libvirt_version.version_compare(5, 0, 0): unsupported_err.append('Could not create file: Permission denied') unsupported_err.append('Permission denied') else: unsupported_err.append('unsupported configuration: external snapshot ' + 'for readonly disk vdb is not supported') if test_disk_internal_snapshot: unsupported_err.append('unsupported configuration: internal snapshot for disk ' + 'vdb unsupported for storage type raw') if test_blockcopy: unsupported_err.append('block copy is not supported') if attach_disk: unsupported_err.append('No such file or directory') if create_volume: unsupported_err.append("backing 'volume' disks isn't yet supported") unsupported_err.append('this function is not supported') try: # Clean up dirty secrets in test environments if there have. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Prepare test environment. qemu_config = LibvirtQemuConfig() if disk_snapshot_with_sanlock: # Install necessary package:sanlock,libvirt-lock-sanlock if not utils_package.package_install(["sanlock"]): test.error("fail to install sanlock") if not utils_package.package_install(["libvirt-lock-sanlock"]): test.error("fail to install libvirt-lock-sanlock") # Set virt_use_sanlock result = process.run("setsebool -P virt_use_sanlock 1", shell=True) if result.exit_status: test.error("Failed to set virt_use_sanlock value") # Update lock_manager in qemu.conf qemu_config.lock_manager = 'sanlock' # Update qemu-sanlock.conf. san_lock_config = LibvirtSanLockConfig() san_lock_config.user = '******' san_lock_config.group = 'sanlock' san_lock_config.host_id = 1 san_lock_config.auto_disk_leases = True process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True) san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock" san_lock_config.require_lease_for_disks = False # Start sanlock service and restart libvirtd to enforce changes. result = process.run("systemctl start wdmd", shell=True) if result.exit_status: test.error("Failed to start wdmd service") result = process.run("systemctl start sanlock", shell=True) if result.exit_status: test.error("Failed to start sanlock service") utils_libvirtd.Libvirtd().restart() # Prepare lockspace and lease file for sanlock in order. sanlock_cmd_dict = OrderedDict() sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS" sanlock_cmd_dict["sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0" sanlock_cmd_dict["chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS" sanlock_cmd_dict["restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock" sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock" sanlock_cmd_dict["sanlock direct init -r TEST_LS:test-disk-resource-lock:" + "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock" sanlock_cmd_dict["chown sanlock:sanlock " + "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc" sanlock_cmd_dict["sanlock client add_lockspace -s TEST_LS:1:" + "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0" for sanlock_cmd in sanlock_cmd_dict.keys(): result = process.run(sanlock_cmd, shell=True) if result.exit_status: test.error(sanlock_cmd_dict[sanlock_cmd]) # Create one lease device and add it to VM. san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) lease_device = Lease() lease_device.lockspace = 'TEST_LS' lease_device.key = 'test-disk-resource-lock' lease_device.target = {'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'} san_lock_vmxml.add_device(lease_device) san_lock_vmxml.sync() # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb") # To be compatible with create_disk_xml function, # some parameters need to be updated. params.update({ "type_name": params.get("disk_type", "network"), "target_bus": params.get("disk_target_bus"), "target_dev": targetdev, "secret_uuid": secret_uuid, "source_protocol": params.get("disk_source_protocol"), "source_name": disk_src_name, "source_host_name": disk_src_host, "source_host_port": disk_src_port}) # Prepare disk image if convert_image: first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) elif create_volume: vol_params = {"name": vol_name, "capacity": int(vol_cap), "capacity_unit": vol_cap_unit, "format": disk_format} create_pool() create_vol(vol_params) check_vol(vol_params) else: # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" % (disk_format, img_file, img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) # Create disk snapshot if needed. if create_snapshot: snap_cmd = ("rbd -m %s %s snap create %s@%s" % (mon_host, key_opt, disk_src_name, snap_name)) process.run(snap_cmd, ignore_status=False, shell=True) if test_json_pseudo_protocol: # Create one frontend image with the rbd backing file. json_str = ('json:{"file.driver":"rbd",' '"file.filename":"rbd:%s:mon_host=%s"}' % (disk_src_name, mon_host)) # pass different json string according to the auth config if auth_user and auth_key: json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key)) disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" % (json_str, front_end_img_file)) disk_path = front_end_img_file process.run(disk_cmd, ignore_status=False, shell=True) # If hot plug, start VM first, and then wait the OS boot. # Otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() if attach_device: if create_volume: params.update({"source_pool": pool_name}) params.update({"type_name": "volume"}) # No need auth options for volume if "auth_user" in params: params.pop("auth_user") if "auth_type" in params: params.pop("auth_type") if "secret_type" in params: params.pop("secret_type") if "secret_uuid" in params: params.pop("secret_uuid") if "secret_usage" in params: params.pop("secret_usage") xml_file = libvirt.create_disk_xml(params) if additional_guest: # Copy xml_file for additional guest VM. shutil.copyfile(xml_file, additional_xml_file) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) if additional_guest: # Make sure the additional VM is running if additional_vm.is_dead(): additional_vm.start() additional_vm.wait_for_login().close() ret = virsh.attach_device(guest_name, additional_xml_file, "", debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif attach_disk: opts = params.get("attach_option", "") ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_readonly: params.update({'readonly': "yes"}) xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_internal_snapshot: xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif disk_snapshot_with_sanlock: if vm.is_dead(): vm.start() snapshot_path = make_snapshot() if vm.is_alive(): vm.destroy() elif not create_volume: libvirt.set_vm_disk(vm, params) if test_blockcopy: logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): vm.start() # Wait for vm is running vm.wait_for_login(timeout=600).close() if additional_guest: if additional_vm.is_dead(): additional_vm.start() # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check partitions in vm if test_vm_parts: if not check_in_vm(vm, targetdev, old_parts, read_only=create_snapshot): test.fail("Failed to check vm partitions") if additional_guest: if not check_in_vm(additional_vm, targetdev, old_parts): test.fail("Failed to check vm partitions") # Save and restore operation if test_save_restore: check_save_restore() if test_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option) if test_blockcopy: check_blockcopy(targetdev) if test_disk_readonly: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, 'vdb') if test_disk_internal_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, targetdev) # Detach the device. if attach_device: xml_file = libvirt.create_disk_xml(params) ret = virsh.detach_device(vm_name, xml_file) libvirt.check_exit_status(ret) if additional_guest: ret = virsh.detach_device(guest_name, xml_file) libvirt.check_exit_status(ret) elif attach_disk: ret = virsh.detach_disk(vm_name, targetdev) libvirt.check_exit_status(ret) # Check disk in vm after detachment. if attach_device or attach_disk: session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) if len(new_parts) != len(old_parts): test.fail("Disk still exists in vm" " after detachment") session.close() except virt_vm.VMStartError as details: for msg in unsupported_err: if msg in str(details): test.cancel(str(details)) else: test.fail("VM failed to start." "Error: %s" % str(details)) finally: # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) if additional_guest: virsh.remove_domain(guest_name, "--remove-all-storage", ignore_stauts=True) # Remove the snapshot. if create_snapshot: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) elif create_volume: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name))) process.run(cmd, ignore_status=True, shell=True) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume))) process.run(cmd, ignore_status=True, shell=True) clean_up_volume_snapshots() else: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Delete tmp files. if os.path.exists(key_file): os.remove(key_file) if os.path.exists(img_file): os.remove(img_file) # Clean up volume, pool if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout): virsh.vol_delete(vol_name, pool_name) if pool_name and pool_name in virsh.pool_state_dict(): virsh.pool_destroy(pool_name, **virsh_dargs) virsh.pool_undefine(pool_name, **virsh_dargs) # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) logging.info("Restoring vm...") vmxml_backup.sync() if disk_snapshot_with_sanlock: # Restore virt_use_sanlock setting. process.run("setsebool -P virt_use_sanlock 0", shell=True) # Restore qemu config qemu_config.restore() utils_libvirtd.Libvirtd().restart() # Force shutdown sanlock service. process.run("sanlock client shutdown -f 1", shell=True) # Clean up lockspace folder process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True) if snapshot_path is not None: for snapshot in snapshot_path: if os.path.exists(snapshot): os.remove(snapshot)