def _create_secret(auth_sec_usage_type, ceph_auth_key): """ Setup secret with auth :param auth_sec_usage_type: auth secret usage :param ceph_auth_key: ceph auth key :return: auth secret uuid """ auth_sec_dict = {"sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret"} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) return auth_sec_uuid
def set_secret_value(params): """ Set the secet value :params: the parameter dictionary """ uuid = params.get("secret_uuid") options = params.get("secret_options") status_error = params.get("status_error", "no") secret_string = params.get("secret_base64_no_encoded") # Encode secret string if it exists if secret_string: secret_string = base64.b64encode(secret_string) result = virsh.secret_set_value(uuid, secret_string, options) status = result.exit_status # Check status_error if status_error == "yes": if status: logging.info("It's an expected %s", result.stderr) else: raise error.TestFail("%d not a expected command " "return value", status) elif status_error == "no": if status: raise error.TestFail(result.stderr) else: # Check secret value if check_secret(params): logging.info(result.stdout) else: raise error.TestFail("The secret value " "mismatch with result")
def create_luks_secret(vol_path): """ Create secret for luks encryption :param vol_path. volume path. :return: secret id if create successfully. """ sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "volume secret" sec_xml.usage = 'volume' sec_xml.volume = vol_path sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) utlv.check_exit_status(ret) # Get secret uuid. try: encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() except IndexError as detail: test.error("Fail to get newly created secret uuid") logging.debug("Secret uuid %s", encryption_uuid) # Set secret value. encoding = locale.getpreferredencoding() secret_string = base64.b64encode( 'redhat'.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(encryption_uuid, secret_string) utlv.check_exit_status(ret) return encryption_uuid
def create_luks_secret(vol_path, password, test): """ Create secret for luks encryption :param vol_path: volume path. :return: secret id if create successfully. """ sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "volume secret" sec_xml.usage = 'volume' sec_xml.volume = vol_path sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) utlv.check_exit_status(ret) try: encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() except IndexError: test.error("Fail to get newly created secret uuid") logging.debug("Secret uuid %s", encryption_uuid) # Set secret value. encoding = locale.getpreferredencoding() secret_string = base64.b64encode(password.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(encryption_uuid, secret_string) utlv.check_exit_status(ret) return encryption_uuid
def create_secret(image_path): """ Create secret. :param image_path. Image path. :return: secret id if create successfully. """ sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "image secret" sec_xml.usage = 'volume' sec_xml.volume = image_path sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) # Get secret uuid. try: encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() except IndexError as e: test.error("Fail to get newly created secret uuid") logging.debug("Secret uuid %s", encryption_uuid) # Set secret value. encoding = locale.getpreferredencoding() secret_string = base64.b64encode( secret_password_no_encoded.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(encryption_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(ret) return encryption_uuid
def prepare_secret_disk(self, image_path, secret_disk_dict=None): """ Add secret disk for domain. :params image_path: image path for disk source file :secret_disk_dict: secret disk dict to add new disk """ vmxml = vm_xml.VMXML.new_from_dumpxml(self.vm.name) sec_dict = eval(self.params.get("sec_dict", '{}')) device_target = self.params.get("target_disk", "vdd") sec_passwd = self.params.get("private_key_password") if not secret_disk_dict: secret_disk_dict = { 'type_name': "file", 'target': { "dev": device_target, "bus": "virtio" }, 'driver': { "name": "qemu", "type": "qcow2" }, 'source': { 'encryption': { "encryption": 'luks', "secret": { "type": "passphrase" } } } } # Create secret libvirt_secret.clean_up_secrets() sec_uuid = libvirt_secret.create_secret(sec_dict=sec_dict) virsh.secret_set_value(sec_uuid, sec_passwd, encode=True, debug=True) secret_disk_dict['source']['encryption']['secret']["uuid"] = sec_uuid secret_disk_dict['source']['attrs'] = {'file': image_path} new_disk = Disk() new_disk.setup_attrs(**secret_disk_dict) vmxml.devices = vmxml.devices.append(new_disk) vmxml.xmltreefile.write() vmxml.sync()
def set_secret_value(password, encryption_uuid): """ Generate secret string and set secret value. :param password: password for encryption :param encryption_uuid: uuid of secret """ encoding = locale.getpreferredencoding() secret_string = base64.b64encode(password.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(encryption_uuid, secret_string) libvirt.check_exit_status(ret)
def set_secret_value(test, params): """ Set the secet value :params: the parameter dictionary """ uuid = params.get("secret_ref") options = params.get("set_secret_options") status_error = params.get("status_error", "no") secret_string = params.get("secret_base64_no_encoded") secret_encode = "yes" == params.get("secret_string_base64_encode", "yes") secret_file = "yes" == params.get("secret_file", "no") if options and "interactive" in options: cmd = "secret-set-value %s --%s" % (uuid, options) virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True) virsh_session.sendline(cmd) # Wait for 5s time.sleep(5) virsh_session.sendline(secret_string) # Wait for 5s to get stripped output time.sleep(5) output = virsh_session.get_stripped_output() exit_status = 0 if "Secret value set" in output else 1 result = process.CmdResult(cmd, output, output, exit_status) else: result = virsh.secret_set_value(uuid, secret_string, options=options, encode=secret_encode, use_file=secret_file, debug=True) status = result.exit_status # Don't check result if we don't need to. if params.get("check_set_status", "yes") == "no": return # Check status_error if status_error == "yes": if status: logging.info("It's an expected %s", result.stderr) else: test.fail("%d not a expected command " "return value", status) elif status_error == "no": if status: test.fail(result.stderr) else: # Check secret value if check_secret(params): logging.info(result.stdout.strip()) else: test.fail("The secret value " "mismatch with result")
def set_secret_value(password, encryption_uuid): """ Generate secret string and set secret value. :param password: password for encryption :param encryption_uuid: uuid of secret """ encoding = locale.getpreferredencoding() secret_string = base64.b64encode( password.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(encryption_uuid, secret_string) libvirt.check_exit_status(ret)
def set_secret_value(test, params): """ Set the secet value :params: the parameter dictionary """ uuid = params.get("secret_ref") options = params.get("set_secret_options") status_error = params.get("status_error", "no") secret_string = params.get("secret_base64_no_encoded") # Encode secret string if it exists if secret_string: encoding = locale.getpreferredencoding() secret_string = base64.b64encode(secret_string.encode(encoding)).decode(encoding) result = virsh.secret_set_value(uuid, secret_string, options) status = result.exit_status # Don't check result if we don't need to. if params.get("check_set_status", "yes") == "no": return # Check status_error if status_error == "yes": if status: logging.info("It's an expected %s", result.stderr) else: test.fail("%d not a expected command " "return value", status) elif status_error == "no": if status: test.fail(result.stderr) else: # Check secret value if check_secret(params): logging.info(result.stdout.strip()) else: test.fail("The secret value " "mismatch with result")
def set_secret_value(test, params): """ Set the secet value :params: the parameter dictionary """ uuid = params.get("secret_ref") options = params.get("set_secret_options") status_error = params.get("status_error", "no") secret_string = params.get("secret_base64_no_encoded") # Encode secret string if it exists if secret_string: encoding = locale.getpreferredencoding() secret_string = base64.b64encode( secret_string.encode(encoding)).decode(encoding) result = virsh.secret_set_value(uuid, secret_string, options) status = result.exit_status # Don't check result if we don't need to. if params.get("check_set_status", "yes") == "no": return # Check status_error if status_error == "yes": if status: logging.info("It's an expected %s", result.stderr) else: test.fail("%d not a expected command " "return value", status) elif status_error == "no": if status: test.fail(result.stderr) else: # Check secret value if check_secret(params): logging.info(result.stdout.strip()) else: test.fail("The secret value " "mismatch with result")
def run(test, params, env): ''' Test the command virsh pool-create-as (1) Prepare backend storage device (2) Define secret xml and set secret value (3) Test pool-create-as or virsh pool-define with authenication ''' pool_options = params.get('pool_options', '') pool_name = params.get('pool_name') pool_type = params.get('pool_type') pool_target = params.get('pool_target', '') status_error = params.get('status_error') == "yes" # iscsi options emulated_size = params.get("iscsi_image_size", "1") iscsi_host = params.get("iscsi_host", "127.0.0.1") chap_user = params.get("iscsi_user") chap_passwd = params.get("iscsi_password") # ceph options ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "--keyring %s" % key_file # auth options auth_usage = (params.get('auth_usage') == 'yes') auth_uuid = (params.get('auth_uuid') == 'yes') sec_ephemeral = params.get("secret_ephemeral", "no") sec_private = params.get("secret_private", "yes") sec_desc = params.get("secret_description") auth_type = params.get("auth_type") sec_usage = params.get("secret_usage_type") sec_target = params.get("secret_usage_target") sec_name = params.get("secret_name") auth_sec_dict = {"sec_ephemeral": sec_ephemeral, "sec_private": sec_private, "sec_desc": sec_desc, "sec_usage": sec_usage, "sec_target": sec_target, "sec_name": sec_name} if sec_usage == "iscsi": auth_username = chap_user sec_password = chap_passwd secret_usage = sec_target if sec_usage == "ceph": auth_username = ceph_auth_user sec_password = ceph_auth_key secret_usage = sec_name if pool_target and not os.path.isdir(pool_target): if os.path.isfile(pool_target): logging.error('<target> must be a directory') else: os.makedirs(pool_target) def setup_ceph_auth(): disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) process.run(cmd, ignore_status=True, shell=True) # Create an local image and make FS on it. img_file = os.path.join(data_dir.get_tmp_dir(), "test.img") disk_cmd = ("qemu-img create -f raw {0} 10M && mkfs.ext4 -F {0}" .format(img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage # Ceph can only support raw format disk_cmd = ("qemu-img convert -O %s %s %s" % ("raw", img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) def setup_iscsi_auth(): iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd) return iscsi_target def check_auth_in_xml(dparams): sourcexml = pool_xml.PoolXML.new_from_dumpxml(pool_name).get_source() with open(sourcexml.xml) as xml_f: logging.debug("Source XML is: \n%s", xml_f.read()) # Check result try: for name, v_expect in dparams.items(): if v_expect != sourcexml[name]: test.fail("Expect to find %s=%s, but got %s=%s" % (name, v_expect, name, sourcexml[name])) except xcepts.LibvirtXMLNotFoundError as details: if "usage not found" in str(details) and auth_uuid: pass # Not a auth_usage test elif "uuid not found" in str(details) and auth_usage: pass # Not a auth_uuid test else: test.fail(details) def check_result(result, expect_error=False): # pool-define-as return CmdResult if isinstance(result, process.CmdResult): result = (result.exit_status == 0) # True means run success if expect_error: if result: test.fail("Expect to fail but run success") elif not expect_error: if not result: test.fail("Expect to succeed but run failure") else: logging.info("It's an expected error") if not libvirt_version.version_compare(3, 9, 0): test.cancel("Pool create/define with authentication" " not support in this libvirt version") sec_uuid = "" img_file = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" libvirt_pool = libvirt_storage.StoragePool() try: # Create secret xml and set value encode = True if sec_usage == "ceph": encode = False # Ceph key already encoded sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(sec_uuid, sec_password, encode=encode, debug=True) if sec_usage == "iscsi": iscsi_dev = setup_iscsi_auth() pool_options += (" --source-host %s --source-dev %s" " --auth-type %s --auth-username %s" % (iscsi_host, iscsi_dev, auth_type, auth_username)) if sec_usage == "ceph": # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) setup_ceph_auth() rbd_pool = ceph_disk_name.split('/')[0] pool_options += (" --source-host %s --source-name %s" " --auth-type %s --auth-username %s" % (ceph_host_ip, rbd_pool, auth_type, auth_username)) if auth_usage: pool_options += " --secret-usage %s" % secret_usage if auth_uuid: pool_options += " --secret-uuid %s" % sec_uuid # Run test cases func_name = params.get("test_func", "pool_create_as") logging.info('Perform test runner: %s', func_name) if func_name == "pool_create_as": func = virsh.pool_create_as if func_name == "pool_define_as": func = virsh.pool_define_as result = func(pool_name, pool_type, pool_target, extra=pool_options, debug=True) # Check status_error check_result(result, expect_error=status_error) if not status_error: # Check pool status pool_status = libvirt_pool.pool_state(pool_name) if ((pool_status == 'inactive' and func_name == "pool_define_as") or (pool_status == "active" and func_name == "pool_create_as")): logging.info("Expected pool status:%s" % pool_status) else: test.fail("Not an expected pool status: %s" % pool_status) # Check pool dumpxml dict_expect = {"auth_type": auth_type, "auth_username": auth_username, "secret_usage": secret_usage, "secret_uuid": sec_uuid} check_auth_in_xml(dict_expect) finally: # Clean up logging.info("Start to cleanup") # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) if os.path.exists(img_file): os.remove(img_file) virsh.secret_undefine(sec_uuid, ignore_status=True) libvirt.setup_or_cleanup_iscsi(is_setup=False) if libvirt_pool.pool_exists(pool_name): libvirt_pool.delete_pool(pool_name)
def setup_auth_enabled_iscsi_disk(vm, params): """ Create one separate thread to do blockcopy :param vm: VM :param params: config params """ disk_type = params.get("disk_type", "file") disk_target = params.get("disk_target", 'vda') disk_target_bus = params.get("disk_target_bus", "virtio") disk_format = params.get("disk_format", "qcow2") disk_device = params.get("disk_device", "lun") first_disk = vm.get_first_disk_devices() logging.debug("first disk is %s", first_disk) blk_source = first_disk['source'] if vm.is_alive(): vm.destroy(gracefully=False) image_size = params.get("image_size", "5G") chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage} auth_sec_uuid = utl.create_secret(auth_sec_dict) # Set password of auth secret virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) emu_image = params.get("emulated_image", "emulated-iscsi") utl.setup_or_cleanup_iscsi(is_setup=False) iscsi_target, lun_num = utl.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size=image_size, chap_user=chap_user, chap_passwd=chap_passwd) # Copy first disk to emulated backing store path tmp_dir = data_dir.get_tmp_dir() emulated_path = os.path.join(tmp_dir, emu_image) cmd = "qemu-img convert -f %s -O %s %s %s" % ('qcow2', disk_format, blk_source, emulated_path) process.run(cmd, ignore_status=False, shell=True) # ISCSI auth attributes for disk xml auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") disk_auth_dict = { "auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target } disk_src_dict = { "attrs": { "protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num) }, "hosts": [{ "name": '127.0.0.1', "port": '3260' }] } # Add disk xml. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) # Delete disk elements disk_deleted = False disks = vmxml.get_devices(device_type="disk") for disk_ in disks: if disk_.target['dev'] == disk_target: vmxml.del_device(disk_) disk_deleted = True if disk_deleted: vmxml.sync() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) disk_xml = Disk(type_name=disk_type) disk_xml.device = disk_device disk_xml.target = {"dev": disk_target, "bus": disk_target_bus} driver_dict = {"name": "qemu", "type": disk_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) auth_in_source = randint(0, 50) % 2 == 0 if auth_in_source: disk_source.auth = disk_xml.new_auth(**disk_auth_dict) else: disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) vmxml.add_device(disk_xml) vmxml.sync() vm.start()
def run(test, params, env): """ Test push-mode incremental backup Steps: 1. create a vm with extra disk vdb 2. create some data on vdb in vm 3. start a push mode full backup on vdb 4. create some data on vdb in vm 5. start a push mode incremental backup 6. repeat step 4 and 5 as required 7. check the full/incremental backup file data """ def backup_job_done(vm_name, vm_disk): """ Check if a backup job for a vm's specific disk is finished. :param vm_name: vm's name :param vm_disk: the disk to be checked, such as 'vdb' :return: 'True' means job finished """ result = virsh.blockjob(vm_name, vm_disk, debug=True) if "no current block job" in result.stdout_text.strip().lower(): return True # Cancel the test if libvirt version is too low if not libvirt_version.version_compare(6, 0, 0): test.cancel("Current libvirt version doesn't support " "incremental backup.") hotplug_disk = "yes" == params.get("hotplug_disk", "no") original_disk_size = params.get("original_disk_size", "100M") original_disk_type = params.get("original_disk_type", "local") original_disk_target = params.get("original_disk_target", "vdb") target_driver = params.get("target_driver", "qcow2") target_type = params.get("target_type", "file") target_blkdev_path = params.get("target_blkdev_path") target_blkdev_size = params.get("target_blkdev_size", original_disk_size) reuse_target_file = "yes" == params.get("reuse_target_file") prepare_target_file = "yes" == params.get("prepare_target_file") prepare_target_blkdev = "yes" == params.get("prepare_target_blkdev") backup_rounds = int(params.get("backup_rounds", 3)) backup_error = "yes" == params.get("backup_error") tmp_dir = data_dir.get_data_dir() virsh_dargs = {'debug': True, 'ignore_status': True} try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Make sure there is no checkpoint metadata before test utils_backup.clean_checkpoints(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() utils_backup.enable_inc_backup_for_vm(vm) # Prepare the disk to be backuped. disk_params = {} disk_path = "" if original_disk_type == "local": image_name = "{}_image.qcow2".format(original_disk_target) disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, original_disk_size, "qcow2") disk_params = { "device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": original_disk_target, "source_file": disk_path } if original_disk_target: disk_params["target_dev"] = original_disk_target elif original_disk_type == "ceph": ceph_mon_host = params.get("ceph_mon_host", "EXAMPLE_MON_HOST_AUTHX") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORT") ceph_pool_name = params.get("ceph_pool_name", "EXAMPLE_POOL") ceph_file_name = params.get("ceph_file_name", "EXAMPLE_FILE") ceph_disk_name = ceph_pool_name + "/" + ceph_file_name ceph_client_name = params.get("ceph_client_name", "EXAMPLE_CLIENT_NAME") ceph_client_key = params.get("ceph_client_key", "EXAMPLE_CLIENT_KEY") ceph_auth_user = params.get("ceph_auth_user", "EXAMPLE_AUTH_USER") ceph_auth_key = params.get("ceph_auth_key", "EXAMPLE_AUTH_KEY") auth_sec_usage_type = "ceph" enable_auth = "yes" == params.get("enable_auth", "yes") key_file = os.path.join(tmp_dir, "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_host) if enable_auth: # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = { "sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret" } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_params_auth = { "auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid, "auth_in_source": True } else: test.error("No ceph client name/key provided.") disk_path = "rbd:%s:mon_host=%s:keyring=%s" % ( ceph_disk_name, ceph_mon_host, key_file) ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name, ceph_cfg, key_file) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'rbd', 'source_name': ceph_disk_name, 'source_host_name': ceph_mon_host, 'source_host_port': ceph_host_port } disk_params.update(disk_params_src) disk_params.update(disk_params_auth) else: test.error("The disk type '%s' not supported in this script." % original_disk_type) if hotplug_disk: vm.start() session = vm.wait_for_login().close() disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm_name, disk_xml, debug=True) else: disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True) vm.start() session = vm.wait_for_login() new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys()) session.close() if len(new_disks_in_vm) != 1: test.fail("Test disk not prepared in vm") # Use the newly added disk as test disk test_disk_in_vm = "/dev/" + new_disks_in_vm[0] vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_disks = list(vmxml.get_disk_all().keys()) checkpoint_list = [] is_incremental = False backup_path_list = [] for backup_index in range(backup_rounds): # Prepare backup xml backup_params = {"backup_mode": "push"} if backup_index > 0: is_incremental = True backup_params["backup_incremental"] = "checkpoint_" + str( backup_index - 1) backup_disk_xmls = [] for vm_disk in vm_disks: backup_disk_params = {"disk_name": vm_disk} if vm_disk != original_disk_target: backup_disk_params["enable_backup"] = "no" else: backup_disk_params["enable_backup"] = "yes" backup_disk_params["disk_type"] = target_type target_params = {"attrs": {}} if target_type == "file": target_file_name = "target_file_%s" % backup_index target_file_path = os.path.join( tmp_dir, target_file_name) if prepare_target_file: libvirt.create_local_disk("file", target_file_path, original_disk_size, target_driver) target_params["attrs"]["file"] = target_file_path backup_path_list.append(target_file_path) elif target_type == "block": if prepare_target_blkdev: target_blkdev_path = libvirt.setup_or_cleanup_iscsi( is_setup=True, image_size=target_blkdev_size) target_params["attrs"]["dev"] = target_blkdev_path backup_path_list.append(target_blkdev_path) else: test.fail( "We do not support backup target type: '%s'" % target_type) logging.debug("target params: %s", target_params) backup_disk_params["backup_target"] = target_params driver_params = {"type": target_driver} backup_disk_params["backup_driver"] = driver_params backup_disk_xml = utils_backup.create_backup_disk_xml( backup_disk_params) backup_disk_xmls.append(backup_disk_xml) logging.debug("disk list %s", backup_disk_xmls) backup_xml = utils_backup.create_backup_xml( backup_params, backup_disk_xmls) logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml) # Prepare checkpoint xml checkpoint_name = "checkpoint_%s" % backup_index checkpoint_list.append(checkpoint_name) cp_params = {"checkpoint_name": checkpoint_name} cp_params["checkpoint_desc"] = params.get( "checkpoint_desc", "desc of cp_%s" % backup_index) disk_param_list = [] for vm_disk in vm_disks: cp_disk_param = {"name": vm_disk} if vm_disk != original_disk_target: cp_disk_param["checkpoint"] = "no" else: cp_disk_param["checkpoint"] = "bitmap" cp_disk_bitmap = params.get("cp_disk_bitmap") if cp_disk_bitmap: cp_disk_param["bitmap"] = cp_disk_bitmap + str( backup_index) disk_param_list.append(cp_disk_param) checkpoint_xml = utils_backup.create_checkpoint_xml( cp_params, disk_param_list) logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index, checkpoint_xml) # Start backup backup_options = backup_xml.xml + " " + checkpoint_xml.xml # Create some data in vdb dd_count = "1" dd_seek = str(backup_index * 10 + 10) dd_bs = "1M" session = vm.wait_for_login() utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs, dd_seek, dd_count) session.close() if reuse_target_file: backup_options += " --reuse-external" backup_result = virsh.backup_begin(vm_name, backup_options, debug=True) if backup_result.exit_status: raise utils_backup.BackupBeginError( backup_result.stderr.strip()) # Wait for the backup job actually finished if not utils_misc.wait_for( lambda: backup_job_done(vm_name, original_disk_target), 60): test.fail("Backup job not finished in 60s") for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) # Compare the backup data and original data original_data_file = os.path.join(tmp_dir, "original_data.qcow2") cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path, original_data_file) process.run(cmd, shell=True, verbose=True) for backup_path in backup_path_list: if target_driver == "qcow2": # Clear backup image's backing file before comparison qemu_cmd = ("qemu-img rebase -u -f qcow2 -b '' -F qcow2 %s" % backup_path) process.run(qemu_cmd, shell=True, verbose=True) if not utils_backup.cmp_backup_data( original_data_file, backup_path, backup_file_driver=target_driver): test.fail("Backup and original data are not identical for" "'%s' and '%s'" % (disk_path, backup_path)) else: logging.debug("'%s' contains correct backup data", backup_path) except utils_backup.BackupBeginError as details: if backup_error: logging.debug("Backup failed as expected.") else: test.fail(details) finally: # Remove checkpoints if "checkpoint_list" in locals() and checkpoint_list: for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync() # Remove local backup file if "target_file_path" in locals(): if os.path.exists(target_file_path): os.remove(target_file_path) # Remove iscsi devices libvirt.setup_or_cleanup_iscsi(False) # Remove ceph related data if original_disk_type == "ceph": ceph.rbd_image_rm(ceph_mon_host, ceph_pool_name, ceph_file_name, ceph_cfg, key_file) if "auth_sec_uuid" in locals() and auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if "ceph_cfg" in locals() and os.path.exists(ceph_cfg): os.remove(ceph_cfg) if os.path.exists(key_file): os.remove(key_file)
def run(test, params, env): """ Test command: virsh secret-define <file> secret-undefine <secret> The testcase is to define or modify a secret from an XML file, then undefine it """ # MAIN TEST CODE ### # Process cartesian parameters secret_ref = params.get("secret_ref") ephemeral = params.get("ephemeral_value", "no") private = params.get("private_value", "no") modify_volume = ("yes" == params.get("secret_modify_volume", "no")) remove_uuid = ("yes" == params.get("secret_remove_uuid", "no")) if secret_ref == "secret_valid_uuid": # Generate valid uuid cmd = "uuidgen" status, uuid = commands.getstatusoutput(cmd) if status: raise error.TestNAError("Failed to generate valid uuid") elif secret_ref == "secret_invalid_uuid": uuid = params.get(secret_ref) # libvirt acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") get_value_acl = "yes" == params.get("get_value_acl", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") get_value_error = "yes" == params.get("get_value_error", "no") if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") acl_dargs = { 'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True } # Get a full path of tmpfile, the tmpfile need not exist tmp_dir = data_dir.get_tmp_dir() volume_path = os.path.join(tmp_dir, "secret_volume") secret_xml_obj = SecretXML(ephemeral, private) secret_xml_obj.uuid = uuid secret_xml_obj.volume = volume_path secret_xml_obj.usage = "volume" secret_obj_xmlfile = os.path.join(SECRET_DIR, uuid + ".xml") # Run the test try: if define_acl: utils.run("chmod 666 %s" % secret_xml_obj.xml) cmd_result = virsh.secret_define(secret_xml_obj.xml, **acl_dargs) else: cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True) libvirt.check_exit_status(cmd_result, define_error) if cmd_result.exit_status: return # Check ephemeral attribute exist = os.path.exists(secret_obj_xmlfile) if (ephemeral == "yes" and exist) or \ (ephemeral == "no" and not exist): raise error.TestFail("The ephemeral attribute worked not expected") # Check private attrbute virsh.secret_set_value(uuid, SECRET_BASE64, debug=True) if get_value_acl: cmd_result = virsh.secret_get_value(uuid, **acl_dargs) else: cmd_result = virsh.secret_get_value(uuid, debug=True) libvirt.check_exit_status(cmd_result, get_value_error) status = cmd_result.exit_status err_msg = "The private attribute worked not expected" if private == "yes" and not status: raise error.TestFail(err_msg) if private == "no" and status: if not get_value_error: raise error.TestFail(err_msg) if modify_volume: volume_path = os.path.join(tmp_dir, "secret_volume_modify") secret_xml_obj.volume = volume_path cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True) if cmd_result.exit_status == 0: raise error.TestFail("Expect fail on redefine after modify " "volume, but success indeed") if remove_uuid: secret_xml_obj2 = SecretXML(ephemeral, private) secret_xml_obj2.volume = volume_path secret_xml_obj2.usage = "volume" cmd_result = virsh.secret_define(secret_xml_obj2.xml, debug=True) if cmd_result.exit_status == 0: raise error.TestFail("Expect fail on redefine after remove " "uuid, but success indeed") if undefine_acl: cmd_result = virsh.secret_undefine(uuid, **acl_dargs) else: cmd_result = virsh.secret_undefine(uuid, debug=True) libvirt.check_exit_status(cmd_result, undefine_error) finally: # cleanup virsh.secret_undefine(uuid, ignore_status=True) if os.path.exists(volume_path): os.unlink(volume_path) if os.path.exists(secret_obj_xmlfile): os.unlink(secret_obj_xmlfile)
def run(test, params, env): """ Test virsh vol-create command to cover the following matrix: pool_type = [dir, fs, netfs] volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed, vmdk, vpc] pool_type = [disk] volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm, linux-raid, extended] pool_type = [logical] volume_format = [none] pool_type = [iscsi, scsi] Not supported with format type TODO: pool_type = [rbd, glusterfs] Reference: http://www.libvirt.org/storage.html """ src_pool_type = params.get("src_pool_type") src_pool_target = params.get("src_pool_target") src_pool_format = params.get("src_pool_format", "") pool_vol_num = int(params.get("src_pool_vol_num", '1')) src_emulated_image = params.get("src_emulated_image") extra_option = params.get("extra_option", "") prefix_vol_name = params.get("vol_name", "vol_create_test") vol_format = params.get("vol_format", "raw") vol_capacity = params.get("vol_capacity", 1048576) vol_allocation = params.get("vol_allocation", 1048576) image_size = params.get("emulate_image_size", "1G") lazy_refcounts = "yes" == params.get("lazy_refcounts") status_error = "yes" == params.get("status_error", "no") by_xml = "yes" == params.get("create_vol_by_xml", "yes") incomplete_target = "yes" == params.get("incomplete_target", "no") luks_encrypted = "luks" == params.get("encryption_method") encryption_secret_type = params.get("encryption_secret_type", "passphrase") if not libvirt_version.version_compare(1, 0, 0): if "--prealloc-metadata" in extra_option: raise error.TestNAError("metadata preallocation not supported in" " current libvirt version.") if incomplete_target: raise error.TestNAError("It does not support generate target " "path in thi libvirt version.") pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi'] if src_pool_type not in pool_type: raise error.TestNAError("pool type %s not in supported type list: %s" % (src_pool_type, pool_type)) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts def create_luks_secret(vol_path): """ Create secret for luks encryption :param vol_path. volume path. :return: secret id if create successfully. """ sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "volume secret" sec_xml.usage = 'volume' sec_xml.volume = vol_path sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) utlv.check_exit_status(ret) # Get secret uuid. try: encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout)[0].lstrip() except IndexError, e: raise exceptions.TestError("Fail to get newly created secret uuid") logging.debug("Secret uuid %s", encryption_uuid) # Set secret value. secret_string = base64.b64encode('redhat') ret = virsh.secret_set_value(encryption_uuid, secret_string) utlv.check_exit_status(ret) return encryption_uuid
def run(test, params, env): ''' Test the command virsh pool-create-as (1) Prepare backend storage device (2) Define secret xml and set secret value (3) Test pool-create-as or virsh pool-define with authentication ''' pool_options = params.get('pool_options', '') pool_name = params.get('pool_name') pool_type = params.get('pool_type') pool_target = params.get('pool_target', '') status_error = params.get('status_error') == "yes" # iscsi options emulated_size = params.get("iscsi_image_size", "1") iscsi_host = params.get("iscsi_host", "127.0.0.1") chap_user = params.get("iscsi_user") chap_passwd = params.get("iscsi_password") # ceph options ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "--keyring %s" % key_file # auth options auth_usage = (params.get('auth_usage') == 'yes') auth_uuid = (params.get('auth_uuid') == 'yes') sec_ephemeral = params.get("secret_ephemeral", "no") sec_private = params.get("secret_private", "yes") sec_desc = params.get("secret_description") auth_type = params.get("auth_type") sec_usage = params.get("secret_usage_type") sec_target = params.get("secret_usage_target") sec_name = params.get("secret_name") auth_sec_dict = { "sec_ephemeral": sec_ephemeral, "sec_private": sec_private, "sec_desc": sec_desc, "sec_usage": sec_usage, "sec_target": sec_target, "sec_name": sec_name } if sec_usage == "iscsi": auth_username = chap_user sec_password = chap_passwd secret_usage = sec_target if sec_usage == "ceph": auth_username = ceph_auth_user sec_password = ceph_auth_key secret_usage = sec_name if pool_target and not os.path.isdir(pool_target): if os.path.isfile(pool_target): logging.error('<target> must be a directory') else: os.makedirs(pool_target) def setup_ceph_auth(): disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) process.run(cmd, ignore_status=True, shell=True) # Create an local image and make FS on it. img_file = os.path.join(data_dir.get_tmp_dir(), "test.img") disk_cmd = ("qemu-img create -f raw {0} 10M && mkfs.ext4 -F {0}". format(img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage # Ceph can only support raw format disk_cmd = ("qemu-img convert -O %s %s %s" % ("raw", img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) def setup_iscsi_auth(): iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd) return iscsi_target def check_auth_in_xml(dparams): sourcexml = pool_xml.PoolXML.new_from_dumpxml(pool_name).get_source() with open(sourcexml.xml) as xml_f: logging.debug("Source XML is: \n%s", xml_f.read()) # Check result try: for name, v_expect in dparams.items(): if v_expect != sourcexml[name]: test.fail("Expect to find %s=%s, but got %s=%s" % (name, v_expect, name, sourcexml[name])) except xcepts.LibvirtXMLNotFoundError as details: if "usage not found" in str(details) and auth_uuid: pass # Not a auth_usage test elif "uuid not found" in str(details) and auth_usage: pass # Not a auth_uuid test else: test.fail(details) def check_result(result, expect_error=False): # pool-define-as return CmdResult if isinstance(result, process.CmdResult): result = (result.exit_status == 0) # True means run success if expect_error: if result: test.fail("Expect to fail but run success") elif not expect_error: if not result: test.fail("Expect to succeed but run failure") else: logging.info("It's an expected error") if not libvirt_version.version_compare(3, 9, 0): test.cancel("Pool create/define with authentication" " not support in this libvirt version") sec_uuid = "" img_file = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" libvirt_pool = libvirt_storage.StoragePool() try: # Create secret xml and set value encode = True if sec_usage == "ceph": encode = False # Ceph key already encoded sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(sec_uuid, sec_password, encode=encode, debug=True) if sec_usage == "iscsi": iscsi_dev = setup_iscsi_auth() pool_options += (" --source-host %s --source-dev %s" " --auth-type %s --auth-username %s" % (iscsi_host, iscsi_dev, auth_type, auth_username)) if sec_usage == "ceph": # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) setup_ceph_auth() rbd_pool = ceph_disk_name.split('/')[0] pool_options += ( " --source-host %s --source-name %s" " --auth-type %s --auth-username %s" % (ceph_host_ip, rbd_pool, auth_type, auth_username)) if auth_usage: pool_options += " --secret-usage %s" % secret_usage if auth_uuid: pool_options += " --secret-uuid %s" % sec_uuid # Run test cases func_name = params.get("test_func", "pool_create_as") logging.info('Perform test runner: %s', func_name) if func_name == "pool_create_as": func = virsh.pool_create_as if func_name == "pool_define_as": func = virsh.pool_define_as result = func(pool_name, pool_type, pool_target, extra=pool_options, debug=True) # Check status_error check_result(result, expect_error=status_error) if not status_error: # Check pool status pool_status = libvirt_pool.pool_state(pool_name) if ((pool_status == 'inactive' and func_name == "pool_define_as") or (pool_status == "active" and func_name == "pool_create_as")): logging.info("Expected pool status:%s" % pool_status) else: test.fail("Not an expected pool status: %s" % pool_status) # Check pool dumpxml dict_expect = { "auth_type": auth_type, "auth_username": auth_username, "secret_usage": secret_usage, "secret_uuid": sec_uuid } check_auth_in_xml(dict_expect) finally: # Clean up logging.info("Start to cleanup") # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) if os.path.exists(img_file): os.remove(img_file) virsh.secret_undefine(sec_uuid, ignore_status=True) libvirt.setup_or_cleanup_iscsi(is_setup=False) if libvirt_pool.pool_exists(pool_name): libvirt_pool.delete_pool(pool_name)
def run(test, params, env): """ Test command: virsh secret-define <file> secret-undefine <secret> The testcase is to define or modify a secret from an XML file, then undefine it """ # MAIN TEST CODE ### # Process cartesian parameters secret_ref = params.get("secret_ref") ephemeral = params.get("ephemeral_value", "no") private = params.get("private_value", "no") modify_volume = ("yes" == params.get("secret_modify_volume", "no")) remove_uuid = ("yes" == params.get("secret_remove_uuid", "no")) if secret_ref == "secret_valid_uuid": # Generate valid uuid cmd = "uuidgen" status, uuid = process.getstatusoutput(cmd) if status: test.cancel("Failed to generate valid uuid") elif secret_ref == "secret_invalid_uuid": uuid = params.get(secret_ref) # libvirt acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") get_value_acl = "yes" == params.get("get_value_acl", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") get_value_error = "yes" == params.get("get_value_error", "no") define_readonly = "yes" == params.get("secret_define_readonly", "no") undefine_readonly = "yes" == params.get("secret_undefine_readonly", "no") expect_msg = params.get("secret_err_msg", "") if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True} # Get a full path of tmpfile, the tmpfile need not exist tmp_dir = data_dir.get_tmp_dir() volume_path = os.path.join(tmp_dir, "secret_volume") secret_xml_obj = SecretXML(ephemeral, private) secret_xml_obj.uuid = uuid secret_xml_obj.volume = volume_path secret_xml_obj.usage = "volume" secret_obj_xmlfile = os.path.join(SECRET_DIR, uuid + ".xml") # Run the test try: if define_acl: process.run("chmod 666 %s" % secret_xml_obj.xml, shell=True) cmd_result = virsh.secret_define(secret_xml_obj.xml, **acl_dargs) else: cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True, readonly=define_readonly) libvirt.check_exit_status(cmd_result, define_error) if cmd_result.exit_status: if define_readonly: if not re.search(expect_msg, cmd_result.stderr.strip()): test.fail("Fail to get expect err msg: %s" % expect_msg) else: logging.info("Get expect err msg: %s", expect_msg) return # Check ephemeral attribute exist = os.path.exists(secret_obj_xmlfile) if (ephemeral == "yes" and exist) or \ (ephemeral == "no" and not exist): test.fail("The ephemeral attribute worked not expected") # Check private attrbute virsh.secret_set_value(uuid, SECRET_BASE64, debug=True) if get_value_acl: cmd_result = virsh.secret_get_value(uuid, **acl_dargs) else: cmd_result = virsh.secret_get_value(uuid, debug=True) libvirt.check_exit_status(cmd_result, get_value_error) status = cmd_result.exit_status err_msg = "The private attribute worked not expected" if private == "yes" and not status: test.fail(err_msg) if private == "no" and status: if not get_value_error: test.fail(err_msg) if modify_volume: volume_path = os.path.join(tmp_dir, "secret_volume_modify") secret_xml_obj.volume = volume_path cmd_result = virsh.secret_define(secret_xml_obj.xml, debug=True) if cmd_result.exit_status == 0: test.fail("Expect fail on redefine after modify " "volume, but success indeed") if remove_uuid: secret_xml_obj2 = SecretXML(ephemeral, private) secret_xml_obj2.volume = volume_path secret_xml_obj2.usage = "volume" cmd_result = virsh.secret_define(secret_xml_obj2.xml, debug=True) if cmd_result.exit_status == 0: test.fail("Expect fail on redefine after remove " "uuid, but success indeed") if undefine_acl: cmd_result = virsh.secret_undefine(uuid, **acl_dargs) else: cmd_result = virsh.secret_undefine(uuid, debug=True, readonly=undefine_readonly) libvirt.check_exit_status(cmd_result, undefine_error) if undefine_readonly: if not re.search(expect_msg, cmd_result.stderr.strip()): test.fail("Fail to get expect err msg: %s" % expect_msg) else: logging.info("Get expect err msg: %s", expect_msg) finally: # cleanup virsh.secret_undefine(uuid, ignore_status=True) if os.path.exists(volume_path): os.unlink(volume_path) if os.path.exists(secret_obj_xmlfile): os.unlink(secret_obj_xmlfile)
sec_xml.usage = secret_usage_type sec_xml.target = secret_usage_target sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout)[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid == "": raise exceptions.TestError("Failed to get secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) ret = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(ret) # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # If we use qcow2 disk format, should format iscsi disk first. if device_format == "qcow2": cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" % (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size)) process.run(cmd, shell=True)
def run(test, params, env): """ Do test for vol-download and vol-upload Basic steps are 1. Create pool with type defined in cfg 2. Create image with writing data in it 3. Get md5 value before operation 4. Do vol-download/upload with options(offset, length) 5. Check md5 value after operation """ pool_type = params.get("vol_download_upload_pool_type") pool_name = params.get("vol_download_upload_pool_name") pool_target = params.get("vol_download_upload_pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_download_upload_vol_name") file_name = params.get("vol_download_upload_file_name") file_path = os.path.join(data_dir.get_tmp_dir(), file_name) offset = params.get("vol_download_upload_offset") length = params.get("vol_download_upload_length") capacity = params.get("vol_download_upload_capacity") allocation = params.get("vol_download_upload_allocation") frmt = params.get("vol_download_upload_format") operation = params.get("vol_download_upload_operation") create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes")) setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") b_luks_encrypt = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.error("API acl test not supported in current" " libvirt version.") try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest", pre_disk_vol=["50M"]) # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if pool_type == "disk": vol_name = utlv.new_disk_vol_name(pool_name) if vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % vol_name utlv.update_polkit_rule(params, vol_pat, new_value) if create_vol: if b_luks_encrypt: if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS format not supported in " "current libvirt version") params['sec_volume'] = os.path.join(pool_target, vol_name) luks_sec_uuid = utlv.create_secret(params) ret = virsh.secret_set_value(luks_sec_uuid, encryption_password, encode=True) utlv.check_exit_status(ret) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(capacity) vol_arg['allocation'] = int(allocation) create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name) vol_list = virsh.vol_list(pool_name).stdout.strip() # iscsi volume name is different from others if pool_type == "iscsi": vol_name = vol_list.split('\n')[2].split()[0] vol_path = virsh.vol_path(vol_name, pool_name, ignore_status=False).stdout.strip() logging.debug("vol_path is %s", vol_path) # Add command options if pool_type is not None: options = " --pool %s" % pool_name if offset is not None: options += " --offset %s" % offset offset = int(offset) else: offset = 0 if length is not None: options += " --length %s" % length length = int(length) else: length = 0 logging.debug("%s options are %s", operation, options) if operation == "upload": # write date to file write_file(file_path) # Set length for calculate the offset + length in the following # func get_pre_post_digest() and digest() if length == 0: length = 1048576 def get_pre_post_digest(): """ Get pre region and post region digest if have offset and length :return: pre digest and post digest """ # Get digest of pre region before offset if offset != 0: digest_pre = digest(vol_path, 0, offset) else: digest_pre = 0 logging.debug("pre region digest read from %s 0-%s is %s", vol_path, offset, digest_pre) # Get digest of post region after offset+length digest_post = digest(vol_path, offset + length, 0) logging.debug("post region digest read from %s %s-0 is %s", vol_path, offset + length, digest_post) return (digest_pre, digest_post) # Get pre and post digest before operation for compare (ori_pre_digest, ori_post_digest) = get_pre_post_digest() ori_digest = digest(file_path, 0, 0) logging.debug("ori digest read from %s is %s", file_path, ori_digest) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume upload result = virsh.vol_upload(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation (aft_pre_digest, aft_post_digest) = get_pre_post_digest() aft_digest = digest(vol_path, offset, length) logging.debug("aft digest read from %s is %s", vol_path, aft_digest) # Compare the pre and post part before and after if ori_pre_digest == aft_pre_digest and \ ori_post_digest == aft_post_digest: logging.info("file pre and aft digest match") else: test.fail("file pre or post digests do not" "match, in %s", operation) if operation == "download": # Write date to volume write_file(vol_path) # Record the digest value before operation ori_digest = digest(vol_path, offset, length) logging.debug("original digest read from %s is %s", vol_path, ori_digest) process.run("touch %s" % file_path, ignore_status=True, shell=True) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume download result = virsh.vol_download(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation aft_digest = digest(file_path, 0, 0) logging.debug("new digest read from %s is %s", file_path, aft_digest) if result.exit_status != 0: test.fail("Fail to %s volume: %s" % (operation, result.stderr)) # Compare the change part on volume and file if ori_digest == aft_digest: logging.info("file digests match, volume %s suceed", operation) else: test.fail("file digests do not match, volume %s failed" % operation) finally: pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest") for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) if os.path.isfile(file_path): os.remove(file_path)
sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout)[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" %
def run(test, params, env): """ Test disk encryption option. 1.Prepare backend storage (blkdev/iscsi/gluster/ceph) 2.Use luks format to encrypt the backend storage 3.Prepare a disk xml indicating to the backend storage with valid/invalid luks password 4.Start VM with disk hot/cold plugged 5.Check some disk operations in VM 6.Check backend storage is still in luks format 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def encrypt_dev(device, params): """ Encrypt device with luks format :param device: Storage deivce to be encrypted. :param params: From the dict to get encryption password. """ password = params.get("luks_encrypt_passwd", "password") size = params.get("luks_size", "500M") cmd = ("qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (password, device, size)) if process.system(cmd, shell=True): test.fail("Can't create a luks encrypted img by qemu-img") def check_dev_format(device, fmt="luks"): """ Check if device is in luks format :param device: Storage deivce to be checked. :param fmt: Expected disk format. :return: If device's format equals to fmt, return True, else return False. """ cmd_result = process.run("qemu-img" + ' -h', ignore_status=True, shell=True, verbose=False) if b'-U' in cmd_result.stdout: cmd = ("qemu-img info -U %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) else: cmd = ("qemu-img info %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.fail("device %s is not in %s format. err is: %s" % (device, fmt, cmd_result.stderr)) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test" .format(added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") # Backend storage options. storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") # Luks encryption info, luks_encrypt_passwd is the password used to encrypt # luks image, and luks_secret_passwd is the password set to luks secret, you # can set a wrong password to luks_secret_passwd for negative tests luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Backend storage auth info use_auth_usage = "yes" == params.get("use_auth_usage") if use_auth_usage: use_auth_uuid = False else: use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") status_error = "yes" == params.get("status_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") encryption_in_source = "yes" == params.get("encryption_in_source", "no") auth_in_source = "yes" == params.get("auth_in_source", "no") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} pvt = None if ((encryption_in_source or auth_in_source) and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot put <encryption> or <auth> inside disk <source> " "in this libvirt version.") # Start VM and get all partions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Setup backend storage if backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": if enable_auth: chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml if use_auth_uuid: disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} elif use_auth_usage: disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target} else: iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, portal_ip=iscsi_host) device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = {"attrs": {"protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num)}, "hosts": [{"name": iscsi_host, "port": iscsi_port}]} elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = libvirt.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name) device_source = "gluster://%s/%s/%s" % (gluster_host_ip, gluster_vol_name, gluster_img_name) disk_src_dict = {"attrs": {"protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name)}, "hosts": [{"name": gluster_host_ip, "port": "24007"}]} elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) if enable_auth: # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = {"sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret"} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = {"auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name, ceph_mon_ip, key_file) else: device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip) disk_src_dict = {"attrs": {"protocol": "rbd", "name": ceph_disk_name}, "hosts": [{"name": ceph_host_ip, "port": ceph_host_port}]} elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = data_dir.get_tmp_dir() pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} else: test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.") logging.debug("device source is: %s", device_source) luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, debug=True) encrypt_dev(device_source, params) libvirt.check_exit_status(ret) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) if auth_in_source: disk_source.auth = disk_xml.new_auth(**disk_auth_dict) else: disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) if encryption_in_source: disk_source.encryption = disk_encryption else: disk_xml.encryption = disk_encryption disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) vmxml.sync() try: vm.start() vm.wait_for_login() except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) if check_partitions and not status_error: if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") check_dev_format(device_source) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": libvirt.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout)[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb")
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") pool_target = params.get("pool_target", "/dev/disk/by-path") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") vg_name = params.get("virt_disk_vg_name", "vg_test_0") lv_name = params.get("virt_disk_lv_name", "lv_test_0") driver_packed = params.get("driver_packed", "on") disk_packed = "yes" == params.get("disk_packed", "no") scsi_packed = "yes" == params.get("scsi_packed", "no") # Indicate the PPC platform on_ppc = False if platform.platform().count('ppc64'): on_ppc = True if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): test.cancel("'volume' type disk doesn't support in" " current libvirt version.") if pool_type == "iscsi-direct": if not libvirt_version.version_compare(4, 7, 0): test.cancel("iscsi-direct pool is not supported in" " current libvirt version.") if ((disk_packed or scsi_packed) and not libvirt_version.version_compare(6, 3, 0)): test.cancel("The virtio packed attribute is not supported in" " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Fix no more PCI slots issue in certain cases. vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) machine_type = params.get("machine_type", "pc") if machine_type == 'q35': vm_dump_xml.remove_all_device_by_type('controller') machine_list = vm_dump_xml.os.machine.split("-") vm_dump_xml.set_os_attrs( **{"machine": machine_list[0] + "-q35-" + machine_list[2]}) q35_pcie_dict0 = { 'controller_model': 'pcie-root', 'controller_type': 'pci', 'controller_index': 0 } q35_pcie_dict1 = { 'controller_model': 'pcie-root-port', 'controller_type': 'pci' } vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0)) # Add enough controllers to match multiple times disk attaching requirements for i in list(range(1, 12)): q35_pcie_dict1.update({'controller_index': "%d" % i}) vm_dump_xml.add_device( libvirt.create_controller_xml(q35_pcie_dict1)) vm_dump_xml.sync() virsh_dargs = {'debug': True, 'ignore_status': True} try: start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login() else: if not vm.is_dead(): vm.destroy() if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target with open(secret_xml.xml) as f: logging.debug("Define secret by XML: %s", f.read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: test.error("Fail to get new created secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode( chap_passwd.encode(encoding)).decode(encoding) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target if disk_type == "block": iscsi_target = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size="1G", chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) else: iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size='1G', chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = pool_target if chap_auth: pool_src_xml.auth_type = "chap" pool_src_xml.auth_username = chap_user pool_src_xml.secret_usage = secret_usage_target poolxml.set_source(pool_src_xml) if pool_type == "iscsi-direct": iscsi_initiator = params.get('iscsi_initiator') pool_src_xml.iqn_name = iscsi_initiator poolxml.set_source(pool_src_xml) # Create iscsi/iscsi-direct pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) xml = virsh.pool_dumpxml(disk_src_pool) logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)", str(cmd_result.stdout.strip())) if len(vol_list) > 1: return vol_list[1] else: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: vol_name, vol_path = vol_info else: test.error("Failed to get volume info") # Snapshot doesn't support raw disk format, create a qcow2 volume # disk for snapshot operation. if pool_type == "iscsi": process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'), shell=True, verbose=True) else: # Get iscsi URL to create a qcow2 volume disk disk_path = ("iscsi://[%s]/%s/%s" % (disk_src_host, iscsi_target, lun_num)) blk_source = "/mnt/test.qcow2" process.run('qemu-img create -f qcow2 %s %s' % (blk_source, '100M'), shell=True, verbose=True) process.run('qemu-img convert -O qcow2 %s %s' % (blk_source, disk_path), shell=True, verbose=True) # Create block device if disk_type == "block": logging.debug("iscsi dev name: %s", iscsi_target) lv_utils.vg_create(vg_name, iscsi_target) device_source = libvirt.create_local_disk("lvm", size="10M", vgname=vg_name, lvname=lv_name) logging.debug("New created volume: %s", lv_name) # Create iscsi network disk XML disk_params = { 'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly } disk_params_src = {} if disk_type == "network": disk_params_src = { 'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': disk_src_host, 'source_host_port': disk_src_port } elif disk_type == "volume": if pool_type == "iscsi": disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_mode': disk_src_mode } # iscsi-direct pool don't include source_mode option else: disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'driver_type': 'qcow2' } elif disk_type == "block": disk_params_src = { 'source_file': device_source, 'driver_type': 'raw' } # Start guest with packed attribute in disk if disk_packed: disk_params_src['driver_packed'] = driver_packed # Start guest with packed attribute in scsi controller if scsi_packed: scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.model = "virtio-scsi" scsi_controller.driver = {'packed': driver_packed} vm_dump_xml.add_device(scsi_controller) vm_dump_xml.sync() else: test.cancel("Unsupported disk type in this test") disk_params.update(disk_params_src) if chap_auth and disk_type != "volume": disk_params_auth = { 'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target } disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) attach_option = params.get("attach_option", "") cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Wait for domain is stable vm.wait_for_login().close() domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create # virsh snapshot-revert is not supported on combined internal and external snapshots # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173 snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: test.fail("Failed getting snapshots list for %s" % vm_name) try: virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) except process.CmdError: test.fail("Failed getting snapshots info for %s" % vm_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) virsh.snapshot_create_as(vm_name, snapshot_name2, ignore_status=False, debug=True) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) if snapshot_name2 not in cmd_result: test.error("Snapshot %s not found" % snapshot_name2) elif domain_operation == "start_with_packed": expect_xml_line = "packed=\"%s\"" % driver_packed libvirt.check_dumpxml(vm, expect_xml_line) expect_qemu_line = "packed=%s" % driver_packed libvirt.check_qemu_cmd_line(expect_qemu_line) elif domain_operation == "": logging.debug("No domain operation provided, so skip it") else: logging.error("Unsupported operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): test.error("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() # Here the script needs wait for a while for the guest to # recognize the hotplugged disk on PPC if on_ppc: time.sleep(10) cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError) as e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: test.error("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target, wait_remove_event=True) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False) finally: # Clean up snapshot # Shut down before cleaning up snapshots if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) # Restore vm vmxml_backup.sync("--snapshots-metadata") # Destroy pool and undefine secret, which may not exist try: if disk_type == "volume": virsh.pool_destroy(disk_src_pool) if disk_type == "block": clean_up_lvm(iscsi_target, vg_name, lv_name) if chap_auth: virsh.secret_undefine(secret_uuid) except Exception: pass libvirt.setup_or_cleanup_iscsi(is_setup=False)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" + " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" + " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, chap_user=chap_user, chap_passwd=chap_passwd) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get volume name") # Create iscsi network disk XML disk_params = { 'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly } disk_params_src = {} if disk_type == "network": disk_params_src = { 'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/1", 'source_host_name': disk_src_host, 'source_host_port': disk_src_port } elif disk_type == "volume": disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'source_mode': disk_src_mode } else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = { 'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target } disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") disk_xml_f = open(disk_xml) disk_xml_content = disk_xml_f.read() disk_xml_f.close() logging.debug("Attach disk by XML: %s", disk_xml_content) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2, **virsh_dargs) libvirt.check_exit_status(cmd_result) pass else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def run(test, params, env): """ Test virsh domblkthreshold option. 1.Prepare backend storage (file/luks/iscsi/gluster/ceph/nbd) 2.Start VM 3.Set domblkthreshold on target device in VM 4.Trigger one threshold event 5.Check threshold event is received as expected 6.Clean up test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} block_threshold_timeout = params.get("block_threshold_timeout", "120") event_type = params.get("event_type", "block-threshold") block_threshold_option = params.get("block_threshold_option", "--loop") def set_vm_block_domblkthreshold(vm_name, target_device, threshold, **dargs): """ Set VM block threshold on specific target device. :param vm_name: VM name. :param target_device: target device in VM :param threshold: threshold value with specific unit such as 100M :param dargs: mutable parameter dict """ ret = virsh.domblkthreshold(vm_name, target_device, threshold, **dargs) libvirt.check_exit_status(ret) def trigger_block_threshold_event(vm_domain, target): """ Trigger block threshold event. :param vm_domain: VM name :param target: Disk dev in VM. """ try: session = vm_domain.wait_for_login() time.sleep(10) cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " " mount /dev/{0} /mnt && " " dd if=/dev/urandom of=/mnt/bigfile bs=1M count=101" .format(target)) status, output = session.cmd_status_output(cmd) if status: test.error("Failed to mount and fill data in VM: %s" % output) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) raise def check_threshold_event(vm_name, event_type, event_timeout, options, **dargs): """ Check threshold event. :param vm_name: VM name :param event_type: event type. :param event_timeout: event timeout value :param options: event option :dargs: dynamic parameters. """ ret = virsh.event(vm_name, event_type, event_timeout, options, **dargs) logging.debug(ret.stdout_text) libvirt.check_exit_status(ret) def create_vol(p_name, vol_params): """ Create volume. :param p_name: Pool name. :param vol_params: Volume parameters dict. """ # Clean up dirty volumes if pool has. pv = libvirt_storage.PoolVolume(p_name) vol_name_list = pv.list_volumes() for vol_name in vol_name_list: pv.delete_volume(vol_name) volxml = vol_xml.VolXML() v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) def trigger_block_commit(vm_name, target, blockcommit_options, **virsh_dargs): """ Trigger blockcommit. :param vm_name: VM name :param target: Disk dev in VM. :param blockcommit_options: blockcommit option :param virsh_dargs: additional parameters """ result = virsh.blockcommit(vm_name, target, blockcommit_options, ignore_status=False, **virsh_dargs) def trigger_block_copy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs): """ Trigger blockcopy :param vm_name: string, VM name :param target: string, target disk :param dest_path: string, the path of copied disk :param blockcopy_options: string, some options applied :param virsh_dargs: additional options """ result = virsh.blockcopy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs) libvirt.check_exit_status(result) def trigger_mirror_threshold_event(vm_domain, target): """ Trigger mirror mode block threshold event. :param vm_domain: VM name :param target: Disk target in VM. """ try: session = vm_domain.wait_for_login() # Sleep 10 seconds to let wait for events thread start first in main thread time.sleep(10) cmd = ("dd if=/dev/urandom of=file bs=1G count=3") status, output = session.cmd_status_output(cmd) if status: test.error("Failed to fill data in VM target: %s with %s" % (target, output)) except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) raise except Exception as ex: raise def get_mirror_source_index(vm_name, dev_index=0): """ Get mirror source index :param vm_name: VM name :param dev_index: Disk device index. :return mirror source index in integer """ disk_list = vm_xml.VMXML.get_disk_source(vm_name) disk_mirror = disk_list[dev_index].find('mirror') if disk_mirror is None: test.fail("Failed to get disk mirror") disk_mirror_source = disk_mirror.find('source') return int(disk_mirror_source.get('index')) # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") # Backend storage auth info storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") use_auth_usage = "yes" == params.get("use_auth_usage") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") mirror_mode_blockcommit = "yes" == params.get("mirror_mode_blockcommit", "no") mirror_mode_blockcopy = "yes" == params.get("mirror_mode_blockcopy", "no") default_snapshot_test = "yes" == params.get("default_snapshot_test", "no") block_threshold_value = params.get("block_threshold_value", "100M") snapshot_external_disks = [] tmp_dir = data_dir.get_tmp_dir() dest_path = params.get("dest_path", "/var/lib/libvirt/images/newclone") pvt = None # Initialize one NbdExport object nbd = None img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) if ((backend_storage_type == "luks") and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot support <encryption> inside disk in this libvirt version.") # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Additional disk images. disks_img = [] try: # Clean up dirty secrets in test environments if there are. utils_secret.clean_up_secrets() # Setup backend storage if backend_storage_type == "file": image_filename = params.get("image_filename", "raw.img") disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename) device_source = libvirt.create_local_disk(backend_storage_type, disk_path, storage_size, device_format) disks_img.append({"format": device_format, "source": disk_path, "path": disk_path}) disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} # Setup backend storage elif backend_storage_type == "luks": luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Create secret luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, ignore_status=False, debug=True) image_filename = params.get("image_filename", "raw.img") device_source = os.path.join(data_dir.get_tmp_dir(), image_filename) disks_img.append({"format": device_format, "source": device_source, "path": device_source}) disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} cmd = ("qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (luks_encrypt_passwd, device_source, storage_size)) if process.system(cmd, shell=True): test.error("Can't create a luks encrypted img by qemu-img") elif backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, ignore_status=False, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target} device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = {"attrs": {"protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num)}, "hosts": [{"name": iscsi_host, "port": iscsi_port}]} elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) device_source = "gluster://%s/%s/%s" % (gluster_host_ip, gluster_vol_name, gluster_img_name) cmd = ("qemu-img create -f %s " "%s %s" % (device_format, device_source, storage_size)) if process.system(cmd, shell=True): test.error("Can't create a gluster type img by qemu-img") disk_src_dict = {"attrs": {"protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name)}, "hosts": [{"name": gluster_host_ip, "port": "24007"}]} elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = {"sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret"} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = {"auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name, ceph_mon_ip, key_file) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("pre clean up rbd disk if exists: %s", cmd_result) # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s %s" % (device_format, img_file, storage_size)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) if ceph_client_name and ceph_client_key: disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) rbd_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name, device_format, img_file, disk_path)) process.run(rbd_cmd, ignore_status=False, shell=True) disk_src_dict = {"attrs": {"protocol": "rbd", "name": ceph_disk_name}, "hosts": [{"name": ceph_host_ip, "port": ceph_host_port}]} elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = data_dir.get_tmp_dir() pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) # Set virt_use_nfs virt_use_nfs = params.get("virt_use_nfs", "off") result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.error("Failed to set virt_use_nfs value") nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name # Create one image on nfs server libvirt.create_local_disk("file", device_source, '1', "raw") disks_img.append({"format": device_format, "source": device_source, "path": device_source}) disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} # Create dir based pool,and then create one volume on it. elif backend_storage_type == "dir": pool_name = params.get("pool_name", "dir_pool") pool_target = params.get("pool_target") pool_type = params.get("pool_type") emulated_image = params.get("emulated_image") image_name = params.get("dir_image_name", "luks_1.img") # Create and start dir_based pool. pvt = libvirt.PoolVolumeTest(test, params) if not os.path.exists(pool_target): os.mkdir(pool_target) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) sp = libvirt_storage.StoragePool() if not sp.is_pool_active(pool_name): sp.set_pool_autostart(pool_name) sp.start_pool(pool_name) # Create one volume on the pool. volume_name = params.get("vol_name") volume_alloc = params.get("vol_alloc") volume_cap_unit = params.get("vol_cap_unit") volume_cap = params.get("vol_cap") volume_target_path = params.get("sec_volume") volume_target_format = params.get("target_format") volume_target_encypt = params.get("target_encypt", "") volume_target_label = params.get("target_label") vol_params = {"name": volume_name, "capacity": int(volume_cap), "allocation": int(volume_alloc), "format": volume_target_format, "path": volume_target_path, "label": volume_target_label, "capacity_unit": volume_cap_unit} try: # If Libvirt version is lower than 2.5.0 # Creating luks encryption volume is not supported,so skip it. create_vol(pool_name, vol_params) except AssertionError as info: err_msgs = ("create: invalid option") if str(info).count(err_msgs): test.cancel("Creating luks encryption volume " "is not supported on this libvirt version") else: test.error("Failed to create volume." "Error: %s" % str(info)) disk_src_dict = {'attrs': {'file': volume_target_path}} device_source = volume_target_path elif backend_storage_type == "nbd": # Get server hostname. hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip() # Setup backend storage nbd_server_host = hostname nbd_server_port = params.get("nbd_server_port") image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img") # Create NbdExport object nbd = NbdExport(image_path, image_format=device_format, port=nbd_server_port) nbd.start_nbd_server() # Prepare disk source xml source_attrs_dict = {"protocol": "nbd"} disk_src_dict = {} disk_src_dict.update({"attrs": source_attrs_dict}) disk_src_dict.update({"hosts": [{"name": nbd_server_host, "port": nbd_server_port}]}) device_source = "nbd://%s:%s/%s" % (nbd_server_host, nbd_server_port, image_path) logging.debug("device source is: %s", device_source) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) if disk_encryption_dict: disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) disk_xml.encryption = disk_encryption disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml except mirror_mode_blockcommit or mirror_mode_blockcopy if (not mirror_mode_blockcommit and not mirror_mode_blockcopy): vmxml.add_device(disk_xml) try: vmxml.sync() vm.start() vm.wait_for_login().close() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s", str(xml_error)) except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error: logging.info("VM failed to start as expected: %s", str(details)) else: test.fail("VM should start but failed: %s" % str(details)) func_name = trigger_block_threshold_event # Additional operations before set block threshold if backend_storage_type == "file": logging.info("Create snapshot...") snap_opt = " %s --disk-only " snap_opt += "%s,snapshot=external,file=%s" if default_snapshot_test: for index in range(1, 5): snapshot_name = "snapshot_%s" % index snap_path = "%s/%s_%s.snap" % (tmp_dir, vm_name, index) snapshot_external_disks.append(snap_path) snap_option = snap_opt % (snapshot_name, device_target, snap_path) virsh.snapshot_create_as(vm_name, snap_option, ignore_status=False, debug=True) if mirror_mode_blockcommit: if not libvirt_version.version_compare(6, 6, 0): test.cancel("Set threshold for disk mirroring feature is not supported on current version") vmxml.del_device(disk_xml) virsh.snapshot_create_as(vm_name, "--disk-only --no-metadata", ignore_status=False, debug=True) # Do active blockcommit in background. blockcommit_options = "--active" mirror_blockcommit_thread = threading.Thread(target=trigger_block_commit, args=(vm_name, 'vda', blockcommit_options,), kwargs={'debug': True}) mirror_blockcommit_thread.start() device_target = "vda[1]" func_name = trigger_mirror_threshold_event if mirror_mode_blockcopy: if not libvirt_version.version_compare(6, 6, 0): test.cancel("Set threshold for disk mirroring feature is not supported on current version") # Do transient blockcopy in backgroud. blockcopy_options = "--transient-job " # Do cleanup if os.path.exists(dest_path): libvirt.delete_local_disk("file", dest_path) mirror_blockcopy_thread = threading.Thread(target=trigger_block_copy, args=(vm_name, 'vda', dest_path, blockcopy_options,), kwargs={'debug': True}) mirror_blockcopy_thread.start() mirror_blockcopy_thread.join(10) device_target = "vda[%d]" % get_mirror_source_index(vm_name) func_name = trigger_mirror_threshold_event set_vm_block_domblkthreshold(vm_name, device_target, block_threshold_value, **{"debug": True}) cli_thread = threading.Thread(target=func_name, args=(vm, device_target)) cli_thread.start() check_threshold_event(vm_name, event_type, block_threshold_timeout, block_threshold_option, **{"debug": True}) finally: # Delete snapshots. if virsh.domain_exists(vm_name): #To delete snapshot, destroy VM first. if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) vmxml_backup.sync("--snapshots-metadata") if os.path.exists(img_file): libvirt.delete_local_disk("file", img_file) for img in disks_img: if os.path.exists(img["path"]): libvirt.delete_local_disk("file", img["path"]) for disk in snapshot_external_disks: libvirt.delete_local_disk('file', disk) if os.path.exists(dest_path): libvirt.delete_local_disk("file", dest_path) # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) elif backend_storage_type == "nfs": result = process.run("setsebool virt_use_nfs off", shell=True) if result.exit_status: logging.info("Failed to restore virt_use_nfs value") elif backend_storage_type == "nbd": if nbd: try: nbd.cleanup() except Exception as ndbEx: logging.info("Clean Up nbd failed: %s" % str(ndbEx)) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") # Indicate the PPC platform on_ppc = False if platform.platform().count('ppc64'): on_ppc = True if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): test.cancel("'volume' type disk doesn't support in" " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login() else: if not vm.is_dead(): vm.destroy() if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target with open(secret_xml.xml) as f: logging.debug("Define secret by XML: %s", f.read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: test.error("Fail to get new created secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size='1G', chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)", str(cmd_result.stdout.strip())) if len(vol_list) > 1: return vol_list[1] else: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: vol_name, vol_path = vol_info else: test.error("Failed to get volume info") # Snapshot doesn't support raw disk format, create a qcow2 volume # disk for snapshot operation. process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'), shell=True) # Create iscsi network disk XML disk_params = {'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly} disk_params_src = {} if disk_type == "network": disk_params_src = {'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': disk_src_host, 'source_host_port': disk_src_port} elif disk_type == "volume": disk_params_src = {'source_pool': disk_src_pool, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_mode': disk_src_mode} else: test.cancel("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = {'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target} disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) attach_option = params.get("attach_option", "") cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Wait for domain is stable vm.wait_for_login().close() domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: test.fail("Failed getting snapshots list for %s" % vm_name) try: virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) except process.CmdError: test.fail("Failed getting snapshots info for %s" % vm_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2) sn_create_op = ("%s --disk-only --diskspec %s,file=%s" % (snapshot_name2, disk_target, snapshot_file)) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) if snapshot_name2 not in cmd_result: test.error("Snapshot %s not found" % snapshot_name2) elif domain_operation == "": logging.debug("No domain operation provided, so skip it") else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): test.error("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() # Here the script needs wait for a while for the guest to # recognize the hotplugged disk on PPC if on_ppc: time.sleep(10) cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError) as e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: test.error("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False) finally: # Clean up snapshot # Shut down before cleaning up snapshots if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) # Restore vm vmxml_backup.sync("--snapshots-metadata") # Destroy pool and undefine secret, which may not exist try: if disk_type == "volume": virsh.pool_destroy(disk_src_pool) if chap_auth: virsh.secret_undefine(secret_uuid) except Exception: pass libvirt.setup_or_cleanup_iscsi(is_setup=False)
def run(test, params, env): """ Test disk encryption option. 1.Prepare test environment, destroy or suspend a VM. 2.Prepare tgtd and secret config. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def check_save_restore(save_file): """ Test domain save and restore. """ # Save the domain. ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Restore the domain. ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) def check_snapshot(): """ Test domain snapshot operation. """ snapshot1 = "s1" snapshot2 = "s2" ret = virsh.snapshot_create_as(vm_name, snapshot1) libvirt.check_exit_status(ret) ret = virsh.snapshot_create_as( vm_name, "%s --disk-only --diskspec vda," "file=/tmp/testvm-snap1" % snapshot2) libvirt.check_exit_status(ret, True) ret = virsh.snapshot_create_as( vm_name, "%s --memspec file=%s,snapshot=external" " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2)) libvirt.check_exit_status(ret, True) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] elif target.startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Cann't see added partition in VM") return False cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test".format( added_part)) s, o = session.cmd_status_output(cmd) logging.info("Check disk operation in VM:\n%s", o) if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_qemu_cmd(): """ Check qemu-kvm command line options """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if driver_iothread: cmd += " | grep iothread=iothread%s" % driver_iothread if process.system(cmd, ignore_status=True, shell=True): test.fail("Can't see disk option '%s' " "in command line" % cmd) # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") # Controller specific attributes. cntlr_type = params.get('controller_type', None) cntlr_model = params.get('controller_model', None) cntlr_index = params.get('controller_index', None) controller_addr_options = params.get('controller_addr_options', None) driver_iothread = params.get("driver_iothread") # iscsi options. iscsi_target = params.get("iscsi_target") iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") emulated_size = params.get("iscsi_image_size", "1") uuid = params.get("uuid", "") auth_uuid = "yes" == params.get("auth_uuid", "") auth_usage = "yes" == params.get("auth_usage", "") status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error", "no") test_save_snapshot = "yes" == params.get("test_save_snapshot", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") secret_uuid = "" # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: chap_user = "" chap_passwd = "" if auth_uuid or auth_usage: auth_place_in_location = params.get("auth_place_in_location") if 'source' in auth_place_in_location and not libvirt_version.version_compare( 3, 9, 0): test.cancel( "place auth in source is not supported in current libvirt version" ) auth_type = params.get("auth_type") secret_usage_target = params.get("secret_usage_target") secret_usage_type = params.get("secret_usage_type") chap_user = params.get("iscsi_user") chap_passwd = params.get("iscsi_password") sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "iSCSI secret" sec_xml.auth_type = auth_type sec_xml.auth_username = chap_user sec_xml.usage = secret_usage_type sec_xml.target = secret_usage_target sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid == "": test.error("Failed to get secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode( chap_passwd.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(ret) # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # If we use qcow2 disk format, should format iscsi disk first. if device_format == "qcow2": cmd = ( "qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" % (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size)) process.run(cmd, shell=True) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} # For lun type device, iothread attribute need to be set in controller. if driver_iothread and device != "lun": driver_dict.update({"iothread": driver_iothread}) vmxml.iothreads = int(driver_iothread) elif driver_iothread: vmxml.iothreads = int(driver_iothread) disk_xml.driver = driver_dict # Check if we want to use a faked uuid. if not uuid: uuid = secret_uuid auth_dict = {} if auth_uuid: auth_dict = { "auth_user": chap_user, "secret_type": secret_usage_type, "secret_uuid": uuid } elif auth_usage: auth_dict = { "auth_user": chap_user, "secret_type": secret_usage_type, "secret_usage": secret_usage_target } disk_source = disk_xml.new_disk_source( **{ "attrs": { "protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num) }, "hosts": [{ "name": iscsi_host, "port": iscsi_port }] }) if auth_dict: disk_auth = disk_xml.new_auth(**auth_dict) if 'source' in auth_place_in_location: disk_source.auth = disk_auth if 'disk' in auth_place_in_location: disk_xml.auth = disk_auth disk_xml.source = disk_source # Sync VM xml. vmxml.add_device(disk_xml) # After virtio 1.0 is enabled, lun type device need use virtio-scsi # instead of virtio, so additional controller is needed. # Add controller. if device == "lun": ctrl = Controller(type_name=cntlr_type) if cntlr_model is not None: ctrl.model = cntlr_model if cntlr_index is not None: ctrl.index = cntlr_index ctrl_addr_dict = {} for addr_option in controller_addr_options.split(','): if addr_option != "": addr_part = addr_option.split('=') ctrl_addr_dict.update( {addr_part[0].strip(): addr_part[1].strip()}) ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict) # If driver_iothread is true, need add iothread attribute in controller. if driver_iothread: ctrl_driver_dict = {} ctrl_driver_dict.update({"iothread": driver_iothread}) ctrl.driver = ctrl_driver_dict logging.debug("Controller XML is:%s", ctrl) if cntlr_type: vmxml.del_controller(cntlr_type) else: vmxml.del_controller("scsi") vmxml.add_device(ctrl) try: # Start the VM and check status. vmxml.sync() vm.start() if status_error: test.fail("VM started unexpectedly.") # Check Qemu command line if test_qemu_cmd: check_qemu_cmd() except virt_vm.VMStartError as e: if status_error: if re.search(uuid, str(e)): pass else: test.fail("VM failed to start." "Error: %s" % str(e)) except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % xml_error) else: # Check partitions in VM. if check_partitions: if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") # Test domain save/restore/snapshot. if test_save_snapshot: save_file = os.path.join(data_dir.get_tmp_dir(), "%.save" % vm_name) check_save_restore(save_file) check_snapshot() if os.path.exists(save_file): os.remove(save_file) finally: # Delete snapshots. libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Delete the tmp files. libvirt.setup_or_cleanup_iscsi(is_setup=False) # Clean up secret if secret_uuid: virsh.secret_undefine(secret_uuid)
def prepare_ceph_disk(ceph_params, remote_virsh_dargs, test, runner_on_target): """ Prepare one image on remote ceph server with enabled or disabled auth And expose it to VM by network access :param ceph_params: parameter to setup ceph. :param remote_virsh_dargs: parameter to remote virsh. :param test: test itself. """ # Ceph server config parameters virsh_dargs = {'debug': True, 'ignore_status': True} prompt = ceph_params.get("prompt", r"[\#\$]\s*$") ceph_disk = "yes" == ceph_params.get("ceph_disk") mon_host = ceph_params.get('mon_host') client_name = ceph_params.get('client_name') client_key = ceph_params.get("client_key") vol_name = ceph_params.get("vol_name") disk_img = ceph_params.get("disk_img") key_file = ceph_params.get("key_file") disk_format = ceph_params.get("disk_format") key_opt = "" # Auth and secret config parameters. auth_user = ceph_params.get("auth_user") auth_key = ceph_params.get("auth_key") auth_type = ceph_params.get("auth_type") auth_usage = ceph_params.get("secret_usage") secret_uuid = ceph_params.get("secret_uuid") # Remote host parameters. remote_ip = ceph_params.get("server_ip") remote_user = ceph_params.get("server_user", "root") remote_pwd = ceph_params.get("server_pwd") # Clean up dirty secrets in test environments if there are. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: # Clean up dirty secrets on remote host. try: remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs) remote_dirty_secret_list = get_secret_list(remote_virsh) for dirty_secret_uuid in remote_dirty_secret_list: remote_virsh.secret_undefine(dirty_secret_uuid) except (process.CmdError, remote.SCPError) as detail: raise exceptions.TestError(detail) finally: logging.debug('clean up secret on remote host') remote_virsh.close_session() with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.uuid = secret_uuid sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.fail("Failed to get secret uuid") # Set secret value ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Create secret on remote host. local_path = sec_xml.xml remote_path = '/var/lib/libvirt/images/new_secret.xml' remote_folder = '/var/lib/libvirt/images' cmd = 'mkdir -p %s && chmod 777 %s && touch %s' % ( remote_folder, remote_folder, remote_path) cmd_result = remote.run_remote_cmd(cmd, ceph_params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) if status: test.fail("Failed to run '%s' on the remote: %s" % (cmd, output)) remote.scp_to_remote(remote_ip, '22', remote_user, remote_pwd, local_path, remote_path, limit="", log_filename=None, timeout=600, interface=None) cmd = "/usr/bin/virsh secret-define --file %s" % remote_path cmd_result = remote.run_remote_cmd(cmd, ceph_params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) if status: test.fail("Failed to run '%s' on the remote: %s" % (cmd, output)) # Set secret value on remote host. cmd = "/usr/bin/virsh secret-set-value --secret %s --base64 %s" % ( secret_uuid, auth_key) cmd_result = remote.run_remote_cmd(cmd, ceph_params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) if status: test.fail("Failed to run '%s' on the remote: %s" % (cmd, output)) # Delete the disk if it exists disk_src_name = "%s/%s" % (vol_name, disk_img) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Convert the disk format first_disk_device = ceph_params.get('first_disk') blk_source = first_disk_device['source'] disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) disk_cmd = ("rbd -m %s %s info %s || qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) return (key_opt, secret_uuid)
def run(test, params, env): """ Test SCSI3 Persistent Reservation functions. 1.Prepare iscsi backend storage. 2.Prepare disk xml. 3.Hot/cold plug the disk to vm. 4.Check if SCSI3 Persistent Reservation commands can be issued to that disk. 5.Recover test environment. 6.Confirm the test result. """ def get_delta_parts(vm, old_parts): """ Get the newly added partitions/blockdevs in vm. :param vm: The vm to be operated. :param old_parts: The original partitions/blockdevs in vm. :return: Newly added partitions/blockdevs. """ session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) new_parts = list(set(new_parts).difference(set(old_parts))) session.close() return new_parts def check_pr_cmds(vm, blk_dev): """ Check if SCSI3 Persistent Reservation commands can be used in vm. :param vm: The vm to be checked. :param blk_dev: The block device in vm to be checked. """ session = vm.wait_for_login() cmd = ("sg_persist --no-inquiry -v --out --register-ignore --param-sark 123aaa /dev/{0} &&" "sg_persist --no-inquiry --in -k /dev/{0} &&" "sg_persist --no-inquiry -v --out --reserve --param-rk 123aaa --prout-type 5 /dev/{0} &&" "sg_persist --no-inquiry --in -r /dev/{0} &&" "sg_persist --no-inquiry -v --out --release --param-rk 123aaa --prout-type 5 /dev/{0} &&" "sg_persist --no-inquiry --in -r /dev/{0} &&" "sg_persist --no-inquiry -v --out --register --param-rk 123aaa --prout-type 5 /dev/{0} &&" "sg_persist --no-inquiry --in -k /dev/{0}" .format(blk_dev)) cmd_status, cmd_output = session.cmd_status_output(cmd) session.close() if cmd_status == 127: test.error("sg3_utils not installed in test image") elif cmd_status != 0: test.fail("persistent reservation failed for /dev/%s" % blk_dev) else: logging.info("persistent reservation successful for /dev/%s" % blk_dev) def start_or_stop_qemu_pr_helper(is_start=True, path_to_sock="/var/run/qemu-pr-helper.sock"): """ Start or stop qemu-pr-helper daemon :param is_start: Set True to start, False to stop. """ service_mgr = service.ServiceManager() if is_start: service_mgr.start('qemu-pr-helper') time.sleep(2) shutil.chown(path_to_sock, "qemu", "qemu") else: service_mgr.stop('qemu-pr-helper') def ppc_controller_update(): """ Update controller of ppc vm to 'virtio-scsi' to support 'scsi' type :return: """ if params.get('machine_type') == 'pseries' and device_bus == 'scsi': if not vmxml.get_controllers(device_bus, 'virtio-scsi'): vmxml.del_controller(device_bus) ppc_controller = Controller('controller') ppc_controller.type = device_bus ppc_controller.index = '0' ppc_controller.model = 'virtio-scsi' vmxml.add_device(ppc_controller) vmxml.sync() # Check if SCSI3 Persistent Reservations supported by # current libvirt versions. if not libvirt_version.version_compare(4, 4, 0): test.cancel("The <reservations> tag supported by libvirt from version " "4.4.0") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Disk specific attributes device = params.get("virt_disk_device", "lun") device_target = params.get("virt_disk_device_target", "sdb") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "block") device_bus = params.get("virt_disk_device_bus", "scsi") # Iscsi options iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") emulated_size = params.get("iscsi_image_size", "1G") auth_uuid = "yes" == params.get("auth_uuid") auth_usage = "yes" == params.get("auth_usage") # SCSI3 PR options reservations_managed = "yes" == params.get("reservations_managed", "yes") reservations_source_type = params.get("reservations_source_type", "unix") reservations_source_path = params.get("reservations_source_path", "/var/run/qemu-pr-helper.sock") reservations_source_mode = params.get("reservations_source_mode", "client") secret_uuid = "" # Case step options hotplug_disk = "yes" == params.get("hotplug_disk", "no") # Start vm and get all partions in vm if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: chap_user = "" chap_passwd = "" if auth_uuid or auth_usage: auth_in_source = "yes" == params.get("auth_in_source", "no") if auth_in_source and not libvirt_version.version_compare(3, 9, 0): test.cancel("place auth in source is not supported in " "current libvirt version.") auth_type = params.get("auth_type", "chap") secret_usage_target = params.get("secret_usage_target", "libvirtiscsi") secret_usage_type = params.get("secret_usage_type", "iscsi") chap_user = params.get("iscsi_user", "redhat") chap_passwd = params.get("iscsi_password", "redhat") sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "iSCSI secret" sec_xml.auth_type = auth_type sec_xml.auth_username = chap_user sec_xml.usage = secret_usage_type sec_xml.target = secret_usage_target sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid == "": test.error("Failed to get secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode(str(chap_passwd).encode(encoding)).decode(encoding) ret = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(ret) # Setup iscsi target blk_dev = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=True, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # Add disk xml vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict auth_dict = {} if auth_uuid: auth_dict = {"auth_user": chap_user, "secret_type": secret_usage_type, "secret_uuid": secret_uuid} elif auth_usage: auth_dict = {"auth_user": chap_user, "secret_type": secret_usage_type, "secret_usage": secret_usage_target} disk_source = disk_xml.new_disk_source( **{"attrs": {"dev": blk_dev}}) if auth_dict: disk_auth = disk_xml.new_auth(**auth_dict) if auth_in_source: disk_source.auth = disk_auth else: disk_xml.auth = disk_auth if reservations_managed: reservations_dict = {"reservations_managed": "yes"} else: start_or_stop_qemu_pr_helper(path_to_sock=reservations_source_path) reservations_dict = {"reservations_managed": "no", "reservations_source_type": reservations_source_type, "reservations_source_path": reservations_source_path, "reservations_source_mode": reservations_source_mode} disk_source.reservations = disk_xml.new_reservations(**reservations_dict) disk_xml.source = disk_source # Update controller of ppc vms ppc_controller_update() if not hotplug_disk: vmxml.add_device(disk_xml) try: # Start the VM and check status vmxml.sync() vm.start() vm.wait_for_login().close() time.sleep(5) if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result) new_parts = get_delta_parts(vm, old_parts) if len(new_parts) != 1: logging.error("Expected 1 dev added but has %s" % len(new_parts)) new_part = new_parts[0] check_pr_cmds(vm, new_part) result = virsh.detach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True, wait_remove_event=True) libvirt.check_exit_status(result) except virt_vm.VMStartError as e: test.fail("VM failed to start." "Error: %s" % str(e)) except xcepts.LibvirtXMLError as xml_error: test.fail("Failed to define VM:\n%s" % xml_error) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Delete the tmp files. libvirt.setup_or_cleanup_iscsi(is_setup=False) # Clean up secret if secret_uuid: virsh.secret_undefine(secret_uuid) # Stop qemu-pr-helper daemon start_or_stop_qemu_pr_helper(is_start=False)
def run(test, params, env): """ Test virsh blockcopy --xml option. 1.Prepare backend storage (file/block/iscsi/ceph/nbd) 2.Start VM 3.Prepare target xml 4.Execute virsh blockcopy --xml command 5.Check VM xml after operation accomplished 6.Clean up test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} ignore_check = False def check_blockcopy_xml(vm_name, source_image, ignore_check=False): """ Check blockcopy xml in VM. :param vm_name: VM name :param source_image: source image name. :param ignore_check: default is False. """ if ignore_check: return source_imge_list = [] blklist = virsh.domblklist(vm_name).stdout_text.splitlines() for line in blklist: if line.strip().startswith(('hd', 'vd', 'sd', 'xvd')): source_imge_list.append(line.split()[-1]) logging.debug('domblklist %s:\n%s', vm_name, source_imge_list) if not any(source_image in s for s in source_imge_list): test.fail("Cannot find expected source image: %s" % source_image) # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") blockcopy_option = params.get("blockcopy_option") # Backend storage auth info storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") use_auth_usage = "yes" == params.get("use_auth_usage") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") auth_sec_uuid = "" disk_auth_dict = {} size = "1" status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") # Initialize one NbdExport object nbd = None img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Additional disk images. disks_img = [] try: # Clean up dirty secrets in test environments if there are. utils_secret.clean_up_secrets() # Setup backend storage if backend_storage_type == "file": image_filename = params.get("image_filename", "raw.img") disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename) if blockcopy_option in ['reuse_external']: device_source = libvirt.create_local_disk( backend_storage_type, disk_path, storage_size, device_format) else: device_source = disk_path disks_img.append({ "format": device_format, "source": disk_path, "path": disk_path }) disk_src_dict = { 'attrs': { 'file': device_source, 'type_name': 'file' } } checkout_device_source = image_filename elif backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} checkout_device_source = device_source elif device_type == "network": chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = { "sec_usage": "iscsi", "sec_target": auth_sec_usage } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml disk_auth_dict = { "auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target } device_source = "iscsi://%s:%s/%s/%s" % ( iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = { "attrs": { "protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num) }, "hosts": [{ "name": iscsi_host, "port": iscsi_port }] } checkout_device_source = 'emulated-iscsi' elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") size = "0.15" key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm whether it needs delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = { "sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret" } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, ignore_status=False, debug=True) disk_auth_dict = { "auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid } else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % ( ceph_disk_name, ceph_mon_ip, key_file) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("pre clean up rbd disk if exists: %s", cmd_result) if blockcopy_option in ['reuse_external']: # Create an local image and make FS on it. libvirt.create_local_disk("file", img_file, storage_size, device_format) # Convert the image to remote storage disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) if ceph_client_name and ceph_client_key: disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) rbd_cmd = ( "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name, device_format, img_file, disk_path)) process.run(rbd_cmd, ignore_status=False, shell=True) disk_src_dict = { "attrs": { "protocol": "rbd", "name": ceph_disk_name }, "hosts": [{ "name": ceph_host_ip, "port": ceph_host_port }] } checkout_device_source = ceph_disk_name elif backend_storage_type == "nbd": # Get server hostname. hostname = socket.gethostname().strip() # Setup backend storage nbd_server_host = hostname nbd_server_port = params.get("nbd_server_port") image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img") # Create NbdExport object nbd = NbdExport(image_path, image_format=device_format, port=nbd_server_port) nbd.start_nbd_server() # Prepare disk source xml source_attrs_dict = {"protocol": "nbd"} disk_src_dict = {} disk_src_dict.update({"attrs": source_attrs_dict}) disk_src_dict.update({ "hosts": [{ "name": nbd_server_host, "port": nbd_server_port }] }) device_source = "nbd://%s:%s/%s" % (nbd_server_host, nbd_server_port, image_path) checkout_device_source = image_path if blockcopy_option in ['pivot']: ignore_check = True logging.debug("device source is: %s", device_source) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) auth_in_source = True if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) disk_source.auth = disk_xml.new_auth(**disk_auth_dict) disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml device_source_path = os.path.join(data_dir.get_tmp_dir(), "source.raw") tmp_device_source = libvirt.create_local_disk("file", path=device_source_path, size=size, disk_format="raw") s_attach = virsh.attach_disk(vm_name, tmp_device_source, device_target, "--config", debug=True) libvirt.check_exit_status(s_attach) try: vm.start() vm.wait_for_login().close() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s", str(xml_error)) except virt_vm.VMStartError as details: # VM cannot be started if status_error: logging.info("VM failed to start as expected: %s", str(details)) else: test.fail("VM should start but failed: %s" % str(details)) # Additional operations before set block threshold options = params.get("options", "--pivot --transient-job --verbose --wait") result = virsh.blockcopy(vm_name, device_target, "--xml %s" % disk_xml.xml, options=options, debug=True) libvirt.check_exit_status(result) check_source_image = None if blockcopy_option in ['pivot']: check_source_image = checkout_device_source else: check_source_image = tmp_device_source check_blockcopy_xml(vm_name, check_source_image, ignore_check) finally: # Delete snapshots. if virsh.domain_exists(vm_name): #To Delete snapshot, destroy vm first. if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) vmxml_backup.sync("--snapshots-metadata") if os.path.exists(img_file): libvirt.delete_local_disk("file", img_file) for img in disks_img: if os.path.exists(img["path"]): libvirt.delete_local_disk("file", img["path"]) # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result.stdout_text) if os.path.exists(key_file): os.remove(key_file) elif backend_storage_type == "nbd": if nbd: try: nbd.cleanup() except Exception as ndbEx: logging.error("Clean Up nbd failed: %s" % str(ndbEx)) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid)
def run(test, params, env): """ Do test for vol-download and vol-upload Basic steps are 1. Create pool with type defined in cfg 2. Create image with writing data in it 3. Get md5 value before operation 4. Do vol-download/upload with options(offset, length) 5. Check md5 value after operation """ pool_type = params.get("vol_download_upload_pool_type") pool_name = params.get("vol_download_upload_pool_name") pool_target = params.get("vol_download_upload_pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_download_upload_vol_name") file_name = params.get("vol_download_upload_file_name") file_path = os.path.join(data_dir.get_tmp_dir(), file_name) offset = params.get("vol_download_upload_offset") length = params.get("vol_download_upload_length") capacity = params.get("vol_download_upload_capacity") allocation = params.get("vol_download_upload_allocation") frmt = params.get("vol_download_upload_format") operation = params.get("vol_download_upload_operation") create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes")) setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") b_luks_encrypt = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} sparse_option_support = "yes" == params.get("sparse_option_support", "yes") with_clusterSize = "yes" == params.get("with_clusterSize") vol_clusterSize = params.get("vol_clusterSize", "64") vol_clusterSize_unit = params.get("vol_clusterSize_unit") vol_format = params.get("vol_format", "qcow2") libvirt_version.is_libvirt_feature_supported(params) # libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.error("API acl test not supported in current" " libvirt version.") # Destroy VM. if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest", pre_disk_vol=["50M"]) # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if pool_type == "disk": vol_name = utlv.new_disk_vol_name(pool_name) if vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % vol_name utlv.update_polkit_rule(params, vol_pat, new_value) if create_vol: if b_luks_encrypt: if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS format not supported in " "current libvirt version") params['sec_volume'] = os.path.join(pool_target, vol_name) luks_sec_uuid = utlv.create_secret(params) ret = virsh.secret_set_value(luks_sec_uuid, encryption_password, encode=True) utlv.check_exit_status(ret) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(capacity) vol_arg['allocation'] = int(allocation) if with_clusterSize: vol_arg['format'] = vol_format vol_arg['clusterSize'] = int(vol_clusterSize) vol_arg['clusterSize_unit'] = vol_clusterSize_unit create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name) virsh.pool_refresh(pool_name, debug=True) vol_list = virsh.vol_list(pool_name, debug=True).stdout.strip() # iscsi volume name is different from others if pool_type == "iscsi": # Due to BZ 1843791, the volume cannot be obtained sometimes. if len(vol_list.splitlines()) < 3: test.fail("Failed to get iscsi type volume.") vol_name = vol_list.split('\n')[2].split()[0] vol_path = virsh.vol_path(vol_name, pool_name, ignore_status=False).stdout.strip() logging.debug("vol_path is %s", vol_path) # Add command options if pool_type is not None: options = " --pool %s" % pool_name if offset is not None: options += " --offset %s" % offset offset = int(offset) else: offset = 0 if length is not None: options += " --length %s" % length length = int(length) else: length = 0 logging.debug("%s options are %s", operation, options) if operation == "upload": # write data to file write_file(file_path) # Set length for calculate the offset + length in the following # func get_pre_post_digest() and digest() if length == 0: length = 1048576 def get_pre_post_digest(): """ Get pre region and post region digest if have offset and length :return: pre digest and post digest """ # Get digest of pre region before offset if offset != 0: digest_pre = digest(vol_path, 0, offset) else: digest_pre = 0 logging.debug("pre region digest read from %s 0-%s is %s", vol_path, offset, digest_pre) # Get digest of post region after offset+length digest_post = digest(vol_path, offset + length, 0) logging.debug("post region digest read from %s %s-0 is %s", vol_path, offset + length, digest_post) return (digest_pre, digest_post) # Get pre and post digest before operation for compare (ori_pre_digest, ori_post_digest) = get_pre_post_digest() ori_digest = digest(file_path, 0, 0) logging.debug("ori digest read from %s is %s", file_path, ori_digest) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume upload result = virsh.vol_upload(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation (aft_pre_digest, aft_post_digest) = get_pre_post_digest() aft_digest = digest(vol_path, offset, length) logging.debug("aft digest read from %s is %s", vol_path, aft_digest) # Compare the pre and post part before and after if ori_pre_digest == aft_pre_digest and \ ori_post_digest == aft_post_digest: logging.info("file pre and aft digest match") else: test.fail("file pre or post digests do not" "match, in %s", operation) if operation == "download": # Write data to volume write_file(vol_path) # Record the digest value before operation ori_digest = digest(vol_path, offset, length) logging.debug("original digest read from %s is %s", vol_path, ori_digest) process.run("touch %s" % file_path, ignore_status=True, shell=True) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume download result = virsh.vol_download(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation aft_digest = digest(file_path, 0, 0) logging.debug("new digest read from %s is %s", file_path, aft_digest) if operation != "mix": if result.exit_status != 0: test.fail("Fail to %s volume: %s" % (operation, result.stderr)) # Compare the change part on volume and file if ori_digest == aft_digest: logging.info("file digests match, volume %s succeed", operation) else: test.fail("file digests do not match, volume %s failed" % operation) if operation == "mix": target = params.get("virt_disk_device_target", "vdb") disk_file_path = os.path.join(pool_target, file_name) # Create one disk xml and attach it to VM. custom_disk_xml = create_disk('file', disk_file_path, 'raw', 'file', 'disk', target, 'virtio') ret = virsh.attach_device(vm_name, custom_disk_xml.xml, flagstr="--config", debug=True) libvirt.check_exit_status(ret) if vm.is_dead(): vm.start() # Write 100M data into disk. data_size = 100 write_disk(test, vm, target, data_size) data_size_in_bytes = data_size * 1024 * 1024 # Refresh directory pool. virsh.pool_refresh(pool_name, debug=True) # Download volume to local with sparse option. download_spare_file = "download-sparse.raw" download_file_path = os.path.join(data_dir.get_tmp_dir(), download_spare_file) options += " --sparse" result = virsh.vol_download(file_name, download_file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) libvirt.check_exit_status(result) #Check download image size. one_g_in_bytes = 1073741824 download_img_info = utils_misc.get_image_info(download_file_path) download_disk_size = int(download_img_info['dsize']) if (download_disk_size < data_size_in_bytes or download_disk_size >= one_g_in_bytes): test.fail("download image size:%d is less than the generated " "data size:%d or greater than or equal to 1G." % (download_disk_size, data_size_in_bytes)) # Create one upload sparse image file. upload_sparse_file = "upload-sparse.raw" upload_file_path = os.path.join(pool_target, upload_sparse_file) libvirt.create_local_disk('file', upload_file_path, '1', 'raw') # Refresh directory pool. virsh.pool_refresh(pool_name, debug=True) # Do volume upload, upload sparse file which download last time. result = virsh.vol_upload(upload_sparse_file, download_file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) upload_img_info = utils_misc.get_image_info(upload_file_path) upload_disk_size = int(upload_img_info['dsize']) if (upload_disk_size < data_size_in_bytes or upload_disk_size >= one_g_in_bytes): test.fail("upload image size:%d is less than the generated " "data size:%d or greater than or equal to 1G." % (upload_disk_size, data_size_in_bytes)) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest") for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) if os.path.isfile(file_path): os.remove(file_path)
def run(test, params, env): """ Test the tpm virtual devices 1. prepare a guest with different tpm devices 2. check whether the guest can be started 3. check the xml and qemu cmd line, even swtpm for vtpm 4. check tpm usage in guest os """ # Tpm passthrough supported since libvirt 1.0.5. if not libvirt_version.version_compare(1, 0, 5): test.cancel("Tpm device is not supported " "on current libvirt version.") # Tpm passthrough supported since qemu 2.12.0-49. if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False): test.cancel("Tpm device is not supported " "on current qemu version.") tpm_model = params.get("tpm_model") backend_type = params.get("backend_type") backend_version = params.get("backend_version") device_path = params.get("device_path") tpm_num = int(params.get("tpm_num", 1)) # After first start of vm with vtpm, do operations, check it still works vm_operate = params.get("vm_operate") # Sub-operation(e.g.domrename) under vm_operate(e.g.restart) vm_oprt = params.get("vm_oprt") secret_uuid = params.get("secret_uuid") secret_value = params.get("secret_value") # Change encryption state: from plain to encrypted, or reverse. encrypt_change = params.get("encrypt_change") secret_uuid = params.get("secret_uuid") prepare_secret = ("yes" == params.get("prepare_secret", "no")) remove_dev = ("yes" == params.get("remove_dev", "no")) multi_vms = ("yes" == params.get("multi_vms", "no")) # Remove swtpm state file rm_statefile = ("yes" == params.get("rm_statefile", "no")) test_suite = ("yes" == params.get("test_suite", "no")) restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no")) no_backend = ("yes" == params.get("no_backend", "no")) status_error = ("yes" == params.get("status_error", "no")) err_msg = params.get("xml_errmsg", "") loader = params.get("loader", "") nvram = params.get("nvram", "") uefi_disk_url = params.get("uefi_disk_url", "") download_file_path = os.path.join(data_dir.get_tmp_dir(), "uefi_disk.qcow2") # Check tpm chip on host for passthrough testing if backend_type == "passthrough": dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True) logging.debug("dmesg info about tpm:\n %s", dmesg_info) dmesg_error = re.search("No TPM chip found|TPM is disabled", dmesg_info) if dmesg_error: test.cancel(dmesg_error.group()) else: # Try to check host tpm chip version tpm_v = None if re.search("2.0 TPM", dmesg_info): tpm_v = "2.0" if not utils_package.package_install("tpm2-tools"): # package_install() return 'True' if succeed test.error("Failed to install tpm2-tools on host") else: if re.search("1.2 TPM", dmesg_info): tpm_v = "1.2" # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first if not utils_package.package_install("tpm-tools"): test.error("Failed to install tpm-tools on host") # Check host env for vtpm testing elif backend_type == "emulator": if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False): test.cancel("vtpm(emulator backend) is not supported " "on current qemu version.") # Install swtpm pkgs on host for vtpm emulation if not utils_package.package_install("swtpm*"): test.error("Failed to install swtpm swtpm-tools on host") def replace_os_disk(vm_xml, vm_name, nvram): """ Replace os(nvram) and disk(uefi) for x86 vtpm test :param vm_xml: current vm's xml :param vm_name: current vm name :param nvram: nvram file path of vm """ # Add loader, nvram in <os> nvram = nvram.replace("<VM_NAME>", vm_name) dict_os_attrs = {"loader_readonly": "yes", "secure": "yes", "loader_type": "pflash", "loader": loader, "nvram": nvram} vm_xml.set_os_attrs(**dict_os_attrs) logging.debug("Set smm=on in VMFeaturesXML") # Add smm in <features> features_xml = vm_xml.features features_xml.smm = "on" vm_xml.features = features_xml vm_xml.sync() # Replace disk with an uefi image if not utils_package.package_install("wget"): test.error("Failed to install wget on host") if uefi_disk_url.count("EXAMPLE"): test.error("Please provide the URL %s" % uefi_disk_url) else: download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path)) process.system(download_cmd, verbose=False, shell=True) vm = env.get_vm(vm_name) uefi_disk = {'disk_source_name': download_file_path} libvirt.set_vm_disk(vm, uefi_disk) vm_names = params.get("vms").split() vm_name = vm_names[0] vm = env.get_vm(vm_name) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() os_xml = getattr(vm_xml, "os") host_arch = platform.machine() if backend_type == "emulator" and host_arch == 'x86_64': if not utils_package.package_install("OVMF"): test.error("Failed to install OVMF or edk2-ovmf pkgs on host") if os_xml.xmltreefile.find('nvram') is None: replace_os_disk(vm_xml, vm_name, nvram) vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy() vm2 = None if multi_vms: if len(vm_names) > 1: vm2_name = vm_names[1] vm2 = env.get_vm(vm2_name) vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name) vm2_xml_backup = vm2_xml.copy() else: # Clone additional vms if needed try: utils_path.find_command("virt-clone") except utils_path.CmdNotFoundError: if not utils_package.package_install(["virt-install"]): test.cancel("Failed to install virt-install on host") vm2_name = "vm2_" + utils_misc.generate_random_string(5) ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name, True, timeout=360, debug=True) if ret_clone.exit_status: test.error("Need more than one domains, but error occured when virt-clone.") vm2 = vm.clone(vm2_name) vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name) if vm2.is_alive(): vm2.destroy() service_mgr = service.ServiceManager() def check_dumpxml(vm_name): """ Check whether the added devices are shown in the guest xml :param vm_name: current vm name """ logging.info("------Checking guest dumpxml------") if tpm_model: pattern = '<tpm model="%s">' % tpm_model else: # The default tpm model is "tpm-tis" pattern = '<tpm model="tpm-tis">' # Check tpm model xml_after_adding_device = VMXML.new_from_dumpxml(vm_name) logging.debug("xml after add tpm dev is %s", xml_after_adding_device) if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s tpm device xml " "in the guest xml file." % tpm_model) # Check backend type pattern = '<backend type="%s"' % backend_type if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend type xml for tpm dev " "in the guest xml file." % backend_type) # Check backend version if backend_version: check_ver = backend_version if backend_version != 'none' else '2.0' pattern = '"emulator" version="%s"' % check_ver if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s backend version xml for tpm dev " "in the guest xml file." % check_ver) # Check device path if backend_type == "passthrough": pattern = '<device path="/dev/tpm0"' if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s device path xml for tpm dev " "in the guest xml file." % device_path) # Check encryption secret if prepare_secret: pattern = '<encryption secret="%s" />' % encryption_uuid if pattern not in astring.to_text(xml_after_adding_device): test.fail("Can not find the %s secret uuid xml for tpm dev " "in the guest xml file." % encryption_uuid) logging.info('------PASS on guest dumpxml check------') def check_qemu_cmd_line(vm, vm_name, domid): """ Check whether the added devices are shown in the qemu cmd line :param vm: current vm :param vm_name: current vm name :param domid: domain id for checking vtpm socket file """ logging.info("------Checking qemu cmd line------") if not vm.get_pid(): test.fail('VM pid file missing.') with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file: cmdline = cmdline_file.read() logging.debug("Qemu cmd line info:\n %s", cmdline) # Check tpm model pattern_list = ["-device.%s" % tpm_model] # Check backend type if backend_type == "passthrough": dev_num = re.search(r"\d+", device_path).group() backend_segment = "id=tpm-tpm%s" % dev_num else: # emulator backend backend_segment = "id=tpm-tpm0,chardev=chrtpm" pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment)) # Check chardev socket for vtpm if backend_type == "emulator": pattern_list.append("-chardev.socket,id=chrtpm," "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)) for pattern in pattern_list: if not re.search(pattern, cmdline): if not remove_dev: test.fail("Can not find the %s for tpm device " "in qemu cmd line." % pattern) elif remove_dev: test.fail("%s still exists after remove vtpm and restart" % pattern) logging.info("------PASS on qemu cmd line check------") def check_swtpm(domid, domuuid, vm_name): """ Check swtpm cmdline and files for vtpm. :param domid: domain id for checking vtpm files :param domuuid: domain uuid for checking vtpm state file :param vm_name: current vm name """ logging.info("------Checking swtpm cmdline and files------") # Check swtpm cmdline swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name) if not swtpm_pid: if not remove_dev: test.fail('swtpm pid file missing.') else: return elif remove_dev: test.fail('swtpm pid file still exists after remove vtpm and restart') with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file: cmdline = cmdline_file.read() logging.debug("Swtpm cmd line info:\n %s", cmdline) pattern_list = ["--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"] if prepare_secret: pattern_list.extend(["--key", "--migration-key"]) for pattern in pattern_list: if not re.search(pattern, cmdline): test.fail("Can not find the %s for tpm device " "in swtpm cmd line." % pattern) # Check swtpm files file_list = ["/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)] file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid) file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name) file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" % (domid, vm_name)) for swtpm_file in file_list: if not os.path.exists(swtpm_file): test.fail("Swtpm file: %s does not exist" % swtpm_file) logging.info("------PASS on Swtpm cmdline and files check------") def get_tpm2_tools_cmd(session=None): """ Get tpm2-tools pkg version and return corresponding getrandom cmd :session: guest console session :return: tpm2_getrandom cmd usage """ cmd = 'rpm -q tpm2-tools' get_v_tools = session.cmd(cmd) if session else process.run(cmd).stdout_text v_tools_list = get_v_tools.strip().split('-') if session: logging.debug("The tpm2-tools version is %s", v_tools_list[2]) v_tools = int(v_tools_list[2].split('.')[0]) return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex" def get_host_tpm_bef(tpm_v): """ Test host tpm function and identify its real version before passthrough Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or tpm2-tools to try the function. :param tpm_v: host tpm version get from dmesg info :return: host tpm version """ logging.info("------Checking host tpm device before passthrough------") # Try tcsd tool for suspected tpm1.2 chip on host tpm_real_v = tpm_v if tpm_v != "2.0": if not service_mgr.start('tcsd'): # service_mgr.start() return 'True' if succeed if tpm_v == "1.2": test.fail("Host tcsd.serivce start failed") else: # Means tpm_v got nothing from dmesg, log failure here and # go to next 'if' to try tpm2.0 tools. logging.info("Host tcsd.serivce start failed") else: tpm_real_v = "1.2" logging.info("Host tpm version info:") result = process.run("tpm_version", ignore_status=False) logging.debug("[host]# tpm_version\n %s", result.stdout) time.sleep(2) service_mgr.stop('tcsd') if tpm_v != "1.2": # Try tpm2.0 tools if not utils_package.package_install("tpm2-tools"): test.error("Failed to install tpm2-tools on host") tpm2_getrandom_cmd = get_tpm2_tools_cmd() if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status: test.cancel("Both tcsd and tpm2-tools can not work, " "pls check your host tpm version and test env.") else: tpm_real_v = "2.0" logging.info("------PASS on host tpm device check------") return tpm_real_v def test_host_tpm_aft(tpm_real_v): """ Test host tpm function after passthrough :param tpm_real_v: host tpm real version indentified from testing """ logging.info("------Checking host tpm device after passthrough------") if tpm_real_v == "1.2": if service_mgr.start('tcsd'): time.sleep(2) service_mgr.stop('tcsd') test.fail("Host tpm should not work after passthrough to guest.") else: logging.info("Expected failure: Tpm is being used by guest.") elif tpm_real_v == "2.0": tpm2_getrandom_cmd = get_tpm2_tools_cmd() if not process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status: test.fail("Host tpm should not work after passthrough to guest.") else: logging.info("Expected failure: Tpm is being used by guest.") logging.info("------PASS on host tpm device check------") def test_guest_tpm(expect_version, session, expect_fail): """ Test tpm function in guest :param expect_version: guest tpm version, as host version, or emulator specified :param session: Guest session to be tested :param expect_fail: guest tpm is expectedly fail to work """ logging.info("------Checking guest tpm device work------") if expect_version == "1.2": # Install tpm-tools and test by tcsd method if not utils_package.package_install(["tpm-tools"], session, 360): test.error("Failed to install tpm-tools package in guest") else: status, output = session.cmd_status_output("systemctl start tcsd") logging.debug("Command output: %s", output) if status: if expect_fail: test.cancel("tpm-crb passthrough only works with host tpm2.0, " "but your host tpm version is 1.2") else: test.fail("Failed to start tcsd.service in guest") else: dev_output = session.cmd_output("ls /dev/|grep tpm") logging.debug("Command output: %s", dev_output) status, output = session.cmd_status_output("tpm_version") logging.debug("Command output: %s", output) if status: test.fail("Guest tpm can not work") else: # If expect_version is tpm2.0, install and test by tpm2-tools if not utils_package.package_install(["tpm2-tools"], session, 360): test.error("Failed to install tpm2-tools package in guest") else: tpm2_getrandom_cmd = get_tpm2_tools_cmd(session) status1, output1 = session.cmd_status_output("ls /dev/|grep tpm") logging.debug("Command output: %s", output1) status2, output2 = session.cmd_status_output(tpm2_getrandom_cmd) logging.debug("Command output: %s", output2) if status1 or status2: if not expect_fail: test.fail("Guest tpm can not work") else: d_status, d_output = session.cmd_status_output("date") if d_status: test.fail("Guest OS doesn't work well") logging.debug("Command output: %s", d_output) elif expect_fail: test.fail("Expect fail but guest tpm still works") logging.info("------PASS on guest tpm device work check------") def run_test_suite_in_guest(session): """ Run kernel test suite for guest tpm. :param session: Guest session to be tested """ logging.info("------Checking kernel test suite for guest tpm------") boot_info = session.cmd('uname -r').strip().split('.') kernel_version = '.'.join(boot_info[:2]) # Download test suite per current guest kernel version parent_path = "https://cdn.kernel.org/pub/linux/kernel" if float(kernel_version) < 5.3: major_version = "5" file_version = "5.3" else: major_version = boot_info[0] file_version = kernel_version src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version, file_version) download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz") output = session.cmd_output(download_cmd, timeout=480) logging.debug("Command output: %s", output) # Install neccessary pkgs to build test suite if not utils_package.package_install(["tar", "make", "gcc", "rsync", "python2"], session, 360): test.fail("Failed to install specified pkgs in guest OS.") # Unzip the downloaded test suite status, output = session.cmd_status_output("tar xvJf /root/linux.tar.xz -C /root") if status: test.fail("Uzip failed: %s" % output) # Specify using python2 to run the test suite per supporting test_path = "/root/linux-%s/tools/testing/selftests" % file_version sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path output = session.cmd_output(sed_cmd) logging.debug("Command output: %s", output) # Build and and run the .sh files of test suite status, output = session.cmd_status_output("make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360) logging.debug("Command output: %s", output) if status: test.fail("Failed to run test suite in guest OS.") for test_sh in ["test_smoke.sh", "test_space.sh"]: pattern = "ok .* selftests: tpm2: %s" % test_sh if not re.search(pattern, output) or ("not ok" in output): test.fail("test suite check failed.") logging.info("------PASS on kernel test suite check------") def reuse_by_vm2(tpm_dev): """ Try to add same tpm to a second guest, when it's being used by one guest. :param tpm_dev: tpm device to be added into guest xml """ logging.info("------Trying to add same tpm to a second domain------") vm2_xml.remove_all_device_by_type('tpm') vm2_xml.add_device(tpm_dev) vm2_xml.sync() ret = virsh.start(vm2_name, ignore_status=True, debug=True) if backend_type == "passthrough": if ret.exit_status: logging.info("Expected failure when try to passthrough a tpm" " that being used by another guest") return test.fail("Reuse a passthroughed tpm should not succeed.") elif ret.exit_status: # emulator backend test.fail("Vtpm for each guest should not interfere with each other") try: tpm_real_v = None sec_uuids = [] new_name = "" virsh_dargs = {"debug": True, "ignore_status": False} vm_xml.remove_all_device_by_type('tpm') tpm_dev = Tpm() if tpm_model: tpm_dev.tpm_model = tpm_model if not no_backend: backend = tpm_dev.Backend() if backend_type != 'none': backend.backend_type = backend_type if backend_type == "passthrough": tpm_real_v = get_host_tpm_bef(tpm_v) logging.debug("The host tpm real version is %s", tpm_real_v) if device_path: backend.device_path = device_path if backend_type == "emulator": if backend_version != 'none': backend.backend_version = backend_version if prepare_secret: auth_sec_dict = {"sec_ephemeral": "no", "sec_private": "yes", "sec_desc": "sample vTPM secret", "sec_usage": "vtpm", "sec_name": "VTPM_example"} encryption_uuid = libvirt.create_secret(auth_sec_dict) if secret_value != 'none': virsh.secret_set_value(encryption_uuid, "open sesame", encode=True, debug=True) sec_uuids.append(encryption_uuid) if encrypt_change != 'encrpt': # plain_to_encrypt will not add encryption on first start if secret_uuid == 'invalid': encryption_uuid = encryption_uuid[:-1] backend.encryption_secret = encryption_uuid if secret_uuid == "change": auth_sec_dict["sec_desc"] = "sample2 vTPM secret" auth_sec_dict["sec_name"] = "VTPM_example2" new_encryption_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(new_encryption_uuid, "open sesame", encode=True, debug=True) sec_uuids.append(new_encryption_uuid) if secret_uuid == 'nonexist': backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tpm_dev.backend = backend logging.debug("tpm dev xml to add is:\n %s", tpm_dev) for num in range(tpm_num): vm_xml.add_device(tpm_dev, True) ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True) expected_match = "" if not err_msg: expected_match = "Domain .*%s.* defined from %s" % (vm_name, vm_xml.xml) libvirt.check_result(ret, err_msg, "", False, expected_match) if err_msg: # Stop test when get expected failure return if vm_operate != "restart": check_dumpxml(vm_name) # For default model, no need start guest to test if tpm_model: expect_fail = False try: vm.start() except VMStartError as detail: if secret_value == 'none' or secret_uuid == 'nonexist': logging.debug("Expected failure: %s", detail) return else: test.fail(detail) domuuid = vm.get_uuid() if vm_operate or restart_libvirtd: # Make sure OS works before vm operate or restart libvirtd session = vm.wait_for_login() test_guest_tpm("2.0", session, False) session.close() if restart_libvirtd: utils_libvirtd.libvirtd_restart() swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid if vm_operate == "resume": virsh.suspend(vm_name, **virsh_dargs) time.sleep(3) virsh.resume(vm_name, **virsh_dargs) elif vm_operate == "snapshot": virsh.snapshot_create_as(vm_name, "sp1 --memspec file=/tmp/testvm_sp1", **virsh_dargs) elif vm_operate in ["restart", "create"]: vm.destroy() if vm_operate == "create": virsh.undefine(vm_name, options="--nvram", **virsh_dargs) if os.path.exists(swtpm_statedir): test.fail("Swtpm state dir: %s still exist after vm undefine" % swtpm_statedir) virsh.create(vm_xml.xml, **virsh_dargs) else: if vm_oprt == "domrename": new_name = "vm_" + utils_misc.generate_random_string(5) virsh.domrename(vm_name, new_name, **virsh_dargs) new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache) vm = new_vm vm_name = new_name elif secret_value == 'change': logging.info("Changing secret value...") virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True) elif not restart_libvirtd: # remove_dev or do other vm operations during restart vm_xml.remove_all_device_by_type('tpm') if secret_uuid == "change" or encrypt_change: # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary if encrypt_change == 'plain': # Change from encrypted state to plain:redefine a tpm dev without encryption tpm_dev = Tpm() tpm_dev.tpm_model = tpm_model backend = tpm_dev.Backend() backend.backend_type = backend_type backend.backend_version = backend_version else: # Use a new secret's uuid if secret_uuid == "change": encryption_uuid = new_encryption_uuid backend.encryption_secret = encryption_uuid tpm_dev.backend = backend logging.debug("The new tpm dev xml to add for restart vm is:\n %s", tpm_dev) vm_xml.add_device(tpm_dev, True) if encrypt_change in ['encrpt', 'plain']: # Avoid sync() undefine removing the state file vm_xml.define() else: vm_xml.sync() if rm_statefile: swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir logging.debug("Removing state file: %s", swtpm_statefile) os.remove(swtpm_statefile) ret = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error and ret.exit_status != 0: return if not remove_dev: check_dumpxml(vm_name) elif vm_operate == 'managedsave': virsh.managedsave(vm_name, **virsh_dargs) time.sleep(5) if secret_value == 'change': logging.info("Changing secret value...") virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True) if rm_statefile: swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir logging.debug("Removing state file: %s", swtpm_statefile) os.remove(swtpm_statefile) ret = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) if status_error and ret.exit_status != 0: return domid = vm.get_id() check_qemu_cmd_line(vm, vm_name, domid) if backend_type == "passthrough": if tpm_real_v == "1.2" and tpm_model == "tpm-crb": expect_fail = True expect_version = tpm_real_v test_host_tpm_aft(tpm_real_v) else: # emulator backend if remove_dev: expect_fail = True expect_version = backend_version check_swtpm(domid, domuuid, vm_name) session = vm.wait_for_login() if test_suite: run_test_suite_in_guest(session) else: test_guest_tpm(expect_version, session, expect_fail) session.close() if multi_vms: reuse_by_vm2(tpm_dev) if backend_type != "passthrough": #emulator backend check_dumpxml(vm2_name) domid = vm2.get_id() domuuid = vm2.get_uuid() check_qemu_cmd_line(vm2, vm2_name, domid) check_swtpm(domid, domuuid, vm2_name) session = vm2.wait_for_login() test_guest_tpm(backend_version, session, expect_fail) session.close() finally: # Remove renamed domain if it exists if new_name: virsh.remove_domain(new_name, "--nvram", debug=True) if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name) # Remove snapshot if exists if vm_operate == "snapshot": snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") if os.path.exists("/tmp/testvm_sp1"): os.remove("/tmp/testvm_sp1") # Clear guest os if test_suite: session = vm.wait_for_login() logging.info("Removing dir /root/linux-*") output = session.cmd_output("rm -rf /root/linux-*") logging.debug("Command output:\n %s", output) session.close() if vm_operate == "create": vm.define(vm_xml.xml) vm_xml_backup.sync(options="--nvram --managed-save") # Remove swtpm log file in case of impact on later runs if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name) for sec_uuid in set(sec_uuids): virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True) if vm2: if len(vm_names) > 1: vm2_xml_backup.sync(options="--nvram") else: virsh.remove_domain(vm2_name, "--nvram --remove-all-storage", debug=True) if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name): os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") # Indicate the PPC platform on_ppc = False if platform.platform().count('ppc64'): on_ppc = True if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size='1G', chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout)) if len(vol_list) > 1: return vol_list[1] else: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: vol_name, vol_path = vol_info else: raise error.TestError("Failed to get volume info") # Snapshot doesn't support raw disk format, create a qcow2 volume # disk for snapshot operation. process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'), shell=True) # Create iscsi network disk XML disk_params = { 'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly } disk_params_src = {} if disk_type == "network": disk_params_src = { 'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': disk_src_host, 'source_host_port': disk_src_port } elif disk_type == "volume": disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_mode': disk_src_mode } else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = { 'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target } disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") disk_xml_f = open(disk_xml) disk_xml_content = disk_xml_f.read() disk_xml_f.close() logging.debug("Attach disk by XML: %s", disk_xml_content) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Wait for domain is stable vm.wait_for_login().close() domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: error.TestFail("Failed getting snapshots list for %s" % vm_name) try: virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) except process.CmdError: error.TestFail("Failed getting snapshots info for %s" % vm_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) snapshot_file = os.path.join(test.tmpdir, snapshot_name2) sn_create_op = ("%s --disk-only --diskspec %s,file=%s" % (snapshot_name2, disk_target, snapshot_file)) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) if snapshot_name2 not in cmd_result: raise error.TestError("Snapshot %s not found" % snapshot_name2) else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() # Here the script needs wait for a while for the guest to # recognize the hotplugged disk on PPC if on_ppc: time.sleep(10) cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml") def config_ceph(): """ Write the configs to the file. """ src_host = disk_src_host.split() src_port = disk_src_port.split() conf_str = "mon_host = " hosts = [] for host, port in zip(src_host, src_port): hosts.append("%s:%s" % (host, port)) with open(disk_src_config, 'w') as f: f.write(conf_str + ','.join(hosts) + '\n') def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip(disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ( "--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool( pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err) def create_vol(vol_params): """ Create volume. :param p_name. Pool name. :param vol_params. Volume parameters dict. :return: True if create successfully. """ pvt = libvirt.PoolVolumeTest(test, params) if create_by_xml: pvt.pre_vol_by_xml(pool_name, **vol_params) else: pvt.pre_vol(vol_name, None, '2G', None, pool_name) def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) process.run(cmd, shell=True) if disk_src_name: cmd += " | grep file=rbd:%s:" % disk_src_name if auth_user and auth_key: cmd += ('id=%s:auth_supported=cephx' % auth_user) if disk_src_config: cmd += " | grep 'conf=%s'" % disk_src_config elif mon_host: hosts = '\:6789\;'.join(mon_host.split()) cmd += " | grep 'mon_host=%s'" % hosts if driver_iothread: cmd += " | grep iothread%s" % driver_iothread # Run the command process.run(cmd, shell=True) def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = [ "disk name='%s' snapshot='external' type='file'" % target_dev ] xml_dom_exp = [ "source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name ] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot or test_disk_readonly: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml") def check_blockcopy(target): """ Block copy operation test. """ blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd") if os.path.exists(blk_file): os.remove(blk_file) blk_mirror = ("mirror type='file' file='%s' " "format='raw' job='copy'" % blk_file) # Do blockcopy ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_result(ret, skip_if=unsupported_err) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count(blk_mirror): test.fail("Can't see block job in domain xml") # Abort ret = virsh.blockjob(vm_name, target, "--abort") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if dom_xml.count(blk_mirror): test.fail("Failed to abort block job") if os.path.exists(blk_file): os.remove(blk_file) # Sleep for a while after abort operation. time.sleep(5) # Do blockcopy again ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_exit_status(ret) # Wait for complete def wait_func(): ret = virsh.blockjob(vm_name, target, "--info") return ret.stderr.count("Block Copy: [100 %]") timeout = params.get("blockjob_timeout", 600) utils_misc.wait_for(wait_func, int(timeout)) # Pivot ret = virsh.blockjob(vm_name, target, "--pivot") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count("source file='%s'" % blk_file): test.fail("Failed to pivot block job") # Remove the disk file. if os.path.exists(blk_file): os.remove(blk_file) def check_in_vm(vm_obj, target, old_parts, read_only=False): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm_obj.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;" " touch /mnt/testfile; umount /mnt)".format(added_part)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info("Check disk operation in VM:\n, %s, %s", s, o) # Readonly fs, check the error messages. # The command may return True, read-only # messges can be found from the command output if read_only: if "Read-only file system" not in o: return False else: return True # Other errors if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def clean_up_volume_snapshots(): """ Get all snapshots for rbd_vol.img volume,unprotect and then clean up them. """ cmd = ("rbd -m {0} {1} info {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) if process.run(cmd, ignore_status=True, shell=True).exit_status: return # Get snapshot list. cmd = ("rbd -m {0} {1} snap" " list {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text snap_names = [] if snaps_out: for line in snaps_out.rsplit("\n"): if line.startswith("SNAPID") or line == "": continue snap_line = line.rsplit() if len(snap_line) == 4: snap_names.append(snap_line[1]) logging.debug("Find snapshots: %s", snap_names) # Unprotect snapshot first,otherwise it will fail to purge volume for snap_name in snap_names: cmd = ("rbd -m {0} {1} snap" " unprotect {2}@{3}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name)) process.run(cmd, ignore_status=True, shell=True) # Purge volume,and then delete volume. cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) process.run(cmd, ignore_status=True, shell=True) def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = { "s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file) } for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append( first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = results_stdout_52lts( secret_list_result).strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_config = params.get("disk_source_config") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") disk_src_pool = params.get("disk_source_pool") disk_format = params.get("disk_format", "raw") driver_iothread = params.get("driver_iothread") snap_name = params.get("disk_snap_name") attach_device = "yes" == params.get("attach_device", "no") attach_disk = "yes" == params.get("attach_disk", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") test_blockcopy = "yes" == params.get("test_blockcopy", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_vm_parts = "yes" == params.get("test_vm_parts", "no") additional_guest = "yes" == params.get("additional_guest", "no") create_snapshot = "yes" == params.get("create_snapshot", "no") convert_image = "yes" == params.get("convert_image", "no") create_volume = "yes" == params.get("create_volume", "no") create_by_xml = "yes" == params.get("create_by_xml", "no") client_key = params.get("client_key") client_name = params.get("client_name") auth_key = params.get("auth_key") auth_user = params.get("auth_user") auth_type = params.get("auth_type") auth_usage = params.get("secret_usage") pool_name = params.get("pool_name") pool_type = params.get("pool_type") vol_name = params.get("vol_name") cloned_vol_name = params.get("cloned_volume", "cloned_test_volume") create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume") vol_cap = params.get("vol_cap") vol_cap_unit = params.get("vol_cap_unit") start_vm = "yes" == params.get("start_vm", "no") test_disk_readonly = "yes" == params.get("test_disk_readonly", "no") test_disk_internal_snapshot = "yes" == params.get( "test_disk_internal_snapshot", "no") test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no") disk_snapshot_with_sanlock = "yes" == params.get( "disk_internal_with_sanlock", "no") auth_place_in_source = params.get("auth_place_in_source") # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(mon_host) # After libvirt 3.9.0, auth element can be put into source part. if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0): test.cancel( "place auth in source is not supported in current libvirt version") # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) if additional_guest: guest_name = "%s_%s" % (vm_name, '1') timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout, ignore_status=False) additional_vm = vm.clone(guest_name) if start_vm: virsh.start(guest_name) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) key_opt = "" secret_uuid = None snapshot_path = None key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) front_end_img_file = os.path.join(data_dir.get_tmp_dir(), "%s_frontend_test.img" % vm_name) # Construct a unsupported error message list to skip these kind of tests unsupported_err = [] if driver_iothread: unsupported_err.append('IOThreads not supported') if test_snapshot: unsupported_err.append('live disk snapshot not supported') if test_disk_readonly: if not libvirt_version.version_compare(5, 0, 0): unsupported_err.append('Could not create file: Permission denied') unsupported_err.append('Permission denied') else: unsupported_err.append( 'unsupported configuration: external snapshot ' + 'for readonly disk vdb is not supported') if test_disk_internal_snapshot: unsupported_err.append( 'unsupported configuration: internal snapshot for disk ' + 'vdb unsupported for storage type raw') if test_blockcopy: unsupported_err.append('block copy is not supported') if attach_disk: unsupported_err.append('No such file or directory') if create_volume: unsupported_err.append("backing 'volume' disks isn't yet supported") unsupported_err.append('this function is not supported') try: # Clean up dirty secrets in test environments if there have. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Prepare test environment. qemu_config = LibvirtQemuConfig() if disk_snapshot_with_sanlock: # Install necessary package:sanlock,libvirt-lock-sanlock if not utils_package.package_install(["sanlock"]): test.error("fail to install sanlock") if not utils_package.package_install(["libvirt-lock-sanlock"]): test.error("fail to install libvirt-lock-sanlock") # Set virt_use_sanlock result = process.run("setsebool -P virt_use_sanlock 1", shell=True) if result.exit_status: test.error("Failed to set virt_use_sanlock value") # Update lock_manager in qemu.conf qemu_config.lock_manager = 'sanlock' # Update qemu-sanlock.conf. san_lock_config = LibvirtSanLockConfig() san_lock_config.user = '******' san_lock_config.group = 'sanlock' san_lock_config.host_id = 1 san_lock_config.auto_disk_leases = True process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True) san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock" san_lock_config.require_lease_for_disks = False # Start sanlock service and restart libvirtd to enforce changes. result = process.run("systemctl start wdmd", shell=True) if result.exit_status: test.error("Failed to start wdmd service") result = process.run("systemctl start sanlock", shell=True) if result.exit_status: test.error("Failed to start sanlock service") utils_libvirtd.Libvirtd().restart() # Prepare lockspace and lease file for sanlock in order. sanlock_cmd_dict = OrderedDict() sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS" sanlock_cmd_dict[ "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0" sanlock_cmd_dict[ "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS" sanlock_cmd_dict[ "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock" sanlock_cmd_dict[ "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock" sanlock_cmd_dict[ "sanlock direct init -r TEST_LS:test-disk-resource-lock:" + "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock" sanlock_cmd_dict[ "chown sanlock:sanlock " + "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc" sanlock_cmd_dict[ "sanlock client add_lockspace -s TEST_LS:1:" + "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0" for sanlock_cmd in sanlock_cmd_dict.keys(): result = process.run(sanlock_cmd, shell=True) if result.exit_status: test.error(sanlock_cmd_dict[sanlock_cmd]) # Create one lease device and add it to VM. san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) lease_device = Lease() lease_device.lockspace = 'TEST_LS' lease_device.key = 'test-disk-resource-lock' lease_device.target = { 'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock' } san_lock_vmxml.add_device(lease_device) san_lock_vmxml.sync() # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb") # To be compatible with create_disk_xml function, # some parameters need to be updated. params.update({ "type_name": params.get("disk_type", "network"), "target_bus": params.get("disk_target_bus"), "target_dev": targetdev, "secret_uuid": secret_uuid, "source_protocol": params.get("disk_source_protocol"), "source_name": disk_src_name, "source_host_name": disk_src_host, "source_host_port": disk_src_port }) # Prepare disk image if convert_image: first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) elif create_volume: vol_params = { "name": vol_name, "capacity": int(vol_cap), "capacity_unit": vol_cap_unit, "format": disk_format } create_pool() create_vol(vol_params) check_vol(vol_params) else: # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" % (disk_format, img_file, img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_cmd = ( "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) # Create disk snapshot if needed. if create_snapshot: snap_cmd = ("rbd -m %s %s snap create %s@%s" % (mon_host, key_opt, disk_src_name, snap_name)) process.run(snap_cmd, ignore_status=False, shell=True) if test_json_pseudo_protocol: # Create one frontend image with the rbd backing file. json_str = ('json:{"file.driver":"rbd",' '"file.filename":"rbd:%s:mon_host=%s"}' % (disk_src_name, mon_host)) # pass different json string according to the auth config if auth_user and auth_key: json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key)) disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" % (json_str, front_end_img_file)) disk_path = front_end_img_file process.run(disk_cmd, ignore_status=False, shell=True) # If hot plug, start VM first, and then wait the OS boot. # Otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() if attach_device: if create_volume: params.update({"source_pool": pool_name}) params.update({"type_name": "volume"}) # No need auth options for volume if "auth_user" in params: params.pop("auth_user") if "auth_type" in params: params.pop("auth_type") if "secret_type" in params: params.pop("secret_type") if "secret_uuid" in params: params.pop("secret_uuid") if "secret_usage" in params: params.pop("secret_usage") # After 3.9.0,the auth element can be place in source part. if auth_place_in_source: params.update({"auth_in_source": auth_place_in_source}) xml_file = libvirt.create_disk_xml(params) if additional_guest: # Copy xml_file for additional guest VM. shutil.copyfile(xml_file, additional_xml_file) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) if additional_guest: # Make sure the additional VM is running if additional_vm.is_dead(): additional_vm.start() additional_vm.wait_for_login().close() ret = virsh.attach_device(guest_name, additional_xml_file, "", debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif attach_disk: opts = params.get("attach_option", "") ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_readonly: params.update({'readonly': "yes"}) xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_internal_snapshot: xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif disk_snapshot_with_sanlock: if vm.is_dead(): vm.start() snapshot_path = make_snapshot() if vm.is_alive(): vm.destroy() elif not create_volume: libvirt.set_vm_disk(vm, params) if test_blockcopy: logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): vm.start() # Wait for vm is running vm.wait_for_login(timeout=600).close() if additional_guest: if additional_vm.is_dead(): additional_vm.start() # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check partitions in vm if test_vm_parts: if not check_in_vm( vm, targetdev, old_parts, read_only=create_snapshot): test.fail("Failed to check vm partitions") if additional_guest: if not check_in_vm(additional_vm, targetdev, old_parts): test.fail("Failed to check vm partitions") # Save and restore operation if test_save_restore: check_save_restore() if test_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option) if test_blockcopy: check_blockcopy(targetdev) if test_disk_readonly: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, 'vdb') if test_disk_internal_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, targetdev) # Detach the device. if attach_device: xml_file = libvirt.create_disk_xml(params) ret = virsh.detach_device(vm_name, xml_file) libvirt.check_exit_status(ret) if additional_guest: ret = virsh.detach_device(guest_name, xml_file) libvirt.check_exit_status(ret) elif attach_disk: ret = virsh.detach_disk(vm_name, targetdev) libvirt.check_exit_status(ret) # Check disk in vm after detachment. if attach_device or attach_disk: session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) if len(new_parts) != len(old_parts): test.fail("Disk still exists in vm" " after detachment") session.close() except virt_vm.VMStartError as details: for msg in unsupported_err: if msg in str(details): test.cancel(str(details)) else: test.fail("VM failed to start." "Error: %s" % str(details)) finally: # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) if additional_guest: virsh.remove_domain(guest_name, "--remove-all-storage", ignore_stauts=True) # Remove the snapshot. if create_snapshot: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) elif create_volume: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name))) process.run(cmd, ignore_status=True, shell=True) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format( mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume))) process.run(cmd, ignore_status=True, shell=True) clean_up_volume_snapshots() else: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Delete tmp files. if os.path.exists(key_file): os.remove(key_file) if os.path.exists(img_file): os.remove(img_file) # Clean up volume, pool if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout): virsh.vol_delete(vol_name, pool_name) if pool_name and pool_name in virsh.pool_state_dict(): virsh.pool_destroy(pool_name, **virsh_dargs) virsh.pool_undefine(pool_name, **virsh_dargs) # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) logging.info("Restoring vm...") vmxml_backup.sync() if disk_snapshot_with_sanlock: # Restore virt_use_sanlock setting. process.run("setsebool -P virt_use_sanlock 0", shell=True) # Restore qemu config qemu_config.restore() utils_libvirtd.Libvirtd().restart() # Force shutdown sanlock service. process.run("sanlock client shutdown -f 1", shell=True) # Clean up lockspace folder process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True) if snapshot_path is not None: for snapshot in snapshot_path: if os.path.exists(snapshot): os.remove(snapshot)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml") def config_ceph(): """ Write the configs to the file. """ src_host = disk_src_host.split() src_port = disk_src_port.split() conf_str = "mon_host = " hosts = [] for host, port in zip(src_host, src_port): hosts.append("%s:%s" % (host, port)) with open(disk_src_config, 'w') as f: f.write(conf_str + ','.join(hosts) + '\n') def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip( disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool(pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err) def create_vol(vol_params): """ Create volume. :param p_name. Pool name. :param vol_params. Volume parameters dict. :return: True if create successfully. """ pvt = libvirt.PoolVolumeTest(test, params) if create_by_xml: pvt.pre_vol_by_xml(pool_name, **vol_params) else: pvt.pre_vol(vol_name, None, '2G', None, pool_name) def check_vol(vol_params): """ Check volume information. """ pv = libvirt_storage.PoolVolume(pool_name) # Supported operation if vol_name not in pv.list_volumes(): test.fail("Volume %s doesn't exist" % vol_name) ret = virsh.vol_dumpxml(vol_name, pool_name) libvirt.check_exit_status(ret) # vol-info if not pv.volume_info(vol_name): test.fail("Can't see volume info") # vol-key ret = virsh.vol_key(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume key isn't correct") # vol-path ret = virsh.vol_path(vol_name, pool_name) libvirt.check_exit_status(ret) if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip(): test.fail("Volume path isn't correct") # vol-pool ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if pool_name not in ret.stdout.strip(): test.fail("Volume pool isn't correct") # vol-name ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name)) libvirt.check_exit_status(ret) if vol_name not in ret.stdout.strip(): test.fail("Volume name isn't correct") # vol-resize ret = virsh.vol_resize(vol_name, "2G", pool_name) libvirt.check_exit_status(ret) # Not supported operation # vol-clone ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-create-from volxml = vol_xml.VolXML() vol_params.update({"name": "%s" % create_from_cloned_volume}) v_xml = volxml.new_vol(**vol_params) v_xml.xmltreefile.write() ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-wipe ret = virsh.vol_wipe(vol_name, pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-upload ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'], "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) # vol-download ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name) libvirt.check_result(ret, skip_if=unsupported_err) def check_qemu_cmd(): """ Check qemu command line options. """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) process.run(cmd, shell=True) if disk_src_name: cmd += " | grep file=rbd:%s:" % disk_src_name if auth_user and auth_key: cmd += ('id=%s:auth_supported=cephx' % auth_user) if disk_src_config: cmd += " | grep 'conf=%s'" % disk_src_config elif mon_host: hosts = '\:6789\;'.join(mon_host.split()) cmd += " | grep 'mon_host=%s'" % hosts if driver_iothread: cmd += " | grep iothread%s" % driver_iothread # Run the command process.run(cmd, shell=True) def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def check_snapshot(snap_option, target_dev='vda'): """ Test snapshot operation. """ snap_name = "s1" snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem") snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk") xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev] xml_dom_exp = ["source file='%s'" % snap_disk, "backingStore type='network' index='1'", "source protocol='rbd' name='%s'" % disk_src_name] if snap_option.count("disk-only"): options = ("%s --diskspec %s,file=%s --disk-only" % (snap_name, target_dev, snap_disk)) elif snap_option.count("disk-mem"): options = ("%s --memspec file=%s --diskspec %s,file=" "%s" % (snap_name, snap_mem, target_dev, snap_disk)) xml_snap_exp.append("memory snapshot='external' file='%s'" % snap_mem) else: options = snap_name ret = virsh.snapshot_create_as(vm_name, options) if test_disk_internal_snapshot or test_disk_readonly: libvirt.check_result(ret, expected_fails=unsupported_err) else: libvirt.check_result(ret, skip_if=unsupported_err) # check xml file. if not ret.exit_status: snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name, debug=True).stdout.strip() dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() # Delete snapshots. libvirt.clean_up_snapshots(vm_name) if os.path.exists(snap_mem): os.remove(snap_mem) if os.path.exists(snap_disk): os.remove(snap_disk) if not all([x in snap_xml for x in xml_snap_exp]): test.fail("Failed to check snapshot xml") if not all([x in dom_xml for x in xml_dom_exp]): test.fail("Failed to check domain xml") def check_blockcopy(target): """ Block copy operation test. """ blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd") if os.path.exists(blk_file): os.remove(blk_file) blk_mirror = ("mirror type='file' file='%s' " "format='raw' job='copy'" % blk_file) # Do blockcopy ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_result(ret, skip_if=unsupported_err) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count(blk_mirror): test.fail("Can't see block job in domain xml") # Abort ret = virsh.blockjob(vm_name, target, "--abort") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if dom_xml.count(blk_mirror): test.fail("Failed to abort block job") if os.path.exists(blk_file): os.remove(blk_file) # Sleep for a while after abort operation. time.sleep(5) # Do blockcopy again ret = virsh.blockcopy(vm_name, target, blk_file) libvirt.check_exit_status(ret) # Wait for complete def wait_func(): ret = virsh.blockjob(vm_name, target, "--info") return ret.stderr.count("Block Copy: [100 %]") timeout = params.get("blockjob_timeout", 600) utils_misc.wait_for(wait_func, int(timeout)) # Pivot ret = virsh.blockjob(vm_name, target, "--pivot") libvirt.check_exit_status(ret) dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip() if not dom_xml.count("source file='%s'" % blk_file): test.fail("Failed to pivot block job") # Remove the disk file. if os.path.exists(blk_file): os.remove(blk_file) def check_in_vm(vm_obj, target, old_parts, read_only=False): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm_obj.wait_for_login() new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;" " touch /mnt/testfile; umount /mnt)" .format(added_part)) s, o = session.cmd_status_output(cmd, timeout=60) session.close() logging.info("Check disk operation in VM:\n, %s, %s", s, o) # Readonly fs, check the error messages. # The command may return True, read-only # messges can be found from the command output if read_only: if "Read-only file system" not in o: return False else: return True # Other errors if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def clean_up_volume_snapshots(): """ Get all snapshots for rbd_vol.img volume,unprotect and then clean up them. """ cmd = ("rbd -m {0} {1} info {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) if process.run(cmd, ignore_status=True, shell=True).exit_status: return # Get snapshot list. cmd = ("rbd -m {0} {1} snap" " list {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text snap_names = [] if snaps_out: for line in snaps_out.rsplit("\n"): if line.startswith("SNAPID") or line == "": continue snap_line = line.rsplit() if len(snap_line) == 4: snap_names.append(snap_line[1]) logging.debug("Find snapshots: %s", snap_names) # Unprotect snapshot first,otherwise it will fail to purge volume for snap_name in snap_names: cmd = ("rbd -m {0} {1} snap" " unprotect {2}@{3}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name)) process.run(cmd, ignore_status=True, shell=True) # Purge volume,and then delete volume. cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name))) process.run(cmd, ignore_status=True, shell=True) def make_snapshot(): """ make external snapshots. :return external snapshot path list """ logging.info("Making snapshot...") first_disk_source = vm.get_first_disk_devices()['source'] snapshot_path_list = [] snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2") snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3") snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4") snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4") snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5") snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5") # Attempt to take different types of snapshots. snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata", "s2": "s2 --memspec %s --no-metadata" % snapshot2_file, "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file, "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file), "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)} for snapshot_name in sorted(snapshots_param_dict.keys()): ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name], **virsh_dargs) libvirt.check_exit_status(ret) if snapshot_name != 's4' and snapshot_name != 's5': snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name)) return snapshot_path_list def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = results_stdout_52lts(secret_list_result).strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_config = params.get("disk_source_config") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") disk_src_pool = params.get("disk_source_pool") disk_format = params.get("disk_format", "raw") driver_iothread = params.get("driver_iothread") snap_name = params.get("disk_snap_name") attach_device = "yes" == params.get("attach_device", "no") attach_disk = "yes" == params.get("attach_disk", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") test_blockcopy = "yes" == params.get("test_blockcopy", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_vm_parts = "yes" == params.get("test_vm_parts", "no") additional_guest = "yes" == params.get("additional_guest", "no") create_snapshot = "yes" == params.get("create_snapshot", "no") convert_image = "yes" == params.get("convert_image", "no") create_volume = "yes" == params.get("create_volume", "no") create_by_xml = "yes" == params.get("create_by_xml", "no") client_key = params.get("client_key") client_name = params.get("client_name") auth_key = params.get("auth_key") auth_user = params.get("auth_user") auth_type = params.get("auth_type") auth_usage = params.get("secret_usage") pool_name = params.get("pool_name") pool_type = params.get("pool_type") vol_name = params.get("vol_name") cloned_vol_name = params.get("cloned_volume", "cloned_test_volume") create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume") vol_cap = params.get("vol_cap") vol_cap_unit = params.get("vol_cap_unit") start_vm = "yes" == params.get("start_vm", "no") test_disk_readonly = "yes" == params.get("test_disk_readonly", "no") test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no") test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no") disk_snapshot_with_sanlock = "yes" == params.get("disk_internal_with_sanlock", "no") # Create /etc/ceph/ceph.conf file to suppress false warning error message. process.run("mkdir -p /etc/ceph", ignore_status=True, shell=True) cmd = ("echo 'mon_host = {0}' >/etc/ceph/ceph.conf" .format(mon_host)) process.run(cmd, ignore_status=True, shell=True) # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) if additional_guest: guest_name = "%s_%s" % (vm_name, '1') timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout, ignore_status=False) additional_vm = vm.clone(guest_name) if start_vm: virsh.start(guest_name) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) key_opt = "" secret_uuid = None snapshot_path = None key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name) front_end_img_file = os.path.join(data_dir.get_tmp_dir(), "%s_frontend_test.img" % vm_name) # Construct a unsupported error message list to skip these kind of tests unsupported_err = [] if driver_iothread: unsupported_err.append('IOThreads not supported') if test_snapshot: unsupported_err.append('live disk snapshot not supported') if test_disk_readonly: if not libvirt_version.version_compare(5, 0, 0): unsupported_err.append('Could not create file: Permission denied') unsupported_err.append('Permission denied') else: unsupported_err.append('unsupported configuration: external snapshot ' + 'for readonly disk vdb is not supported') if test_disk_internal_snapshot: unsupported_err.append('unsupported configuration: internal snapshot for disk ' + 'vdb unsupported for storage type raw') if test_blockcopy: unsupported_err.append('block copy is not supported') if attach_disk: unsupported_err.append('No such file or directory') if create_volume: unsupported_err.append("backing 'volume' disks isn't yet supported") unsupported_err.append('this function is not supported') try: # Clean up dirty secrets in test environments if there have. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Prepare test environment. qemu_config = LibvirtQemuConfig() if disk_snapshot_with_sanlock: # Install necessary package:sanlock,libvirt-lock-sanlock if not utils_package.package_install(["sanlock"]): test.error("fail to install sanlock") if not utils_package.package_install(["libvirt-lock-sanlock"]): test.error("fail to install libvirt-lock-sanlock") # Set virt_use_sanlock result = process.run("setsebool -P virt_use_sanlock 1", shell=True) if result.exit_status: test.error("Failed to set virt_use_sanlock value") # Update lock_manager in qemu.conf qemu_config.lock_manager = 'sanlock' # Update qemu-sanlock.conf. san_lock_config = LibvirtSanLockConfig() san_lock_config.user = '******' san_lock_config.group = 'sanlock' san_lock_config.host_id = 1 san_lock_config.auto_disk_leases = True process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True) san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock" san_lock_config.require_lease_for_disks = False # Start sanlock service and restart libvirtd to enforce changes. result = process.run("systemctl start wdmd", shell=True) if result.exit_status: test.error("Failed to start wdmd service") result = process.run("systemctl start sanlock", shell=True) if result.exit_status: test.error("Failed to start sanlock service") utils_libvirtd.Libvirtd().restart() # Prepare lockspace and lease file for sanlock in order. sanlock_cmd_dict = OrderedDict() sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS" sanlock_cmd_dict["sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0" sanlock_cmd_dict["chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS" sanlock_cmd_dict["restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock" sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock" sanlock_cmd_dict["sanlock direct init -r TEST_LS:test-disk-resource-lock:" + "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock" sanlock_cmd_dict["chown sanlock:sanlock " + "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc" sanlock_cmd_dict["sanlock client add_lockspace -s TEST_LS:1:" + "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0" for sanlock_cmd in sanlock_cmd_dict.keys(): result = process.run(sanlock_cmd, shell=True) if result.exit_status: test.error(sanlock_cmd_dict[sanlock_cmd]) # Create one lease device and add it to VM. san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) lease_device = Lease() lease_device.lockspace = 'TEST_LS' lease_device.key = 'test-disk-resource-lock' lease_device.target = {'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'} san_lock_vmxml.add_device(lease_device) san_lock_vmxml.sync() # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.error("Failed to get secret uuid") # Set secret value auth_key = params.get("auth_key") ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("Failed to install ceph-common") if disk_src_config: config_ceph() disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) targetdev = params.get("disk_target", "vdb") # To be compatible with create_disk_xml function, # some parameters need to be updated. params.update({ "type_name": params.get("disk_type", "network"), "target_bus": params.get("disk_target_bus"), "target_dev": targetdev, "secret_uuid": secret_uuid, "source_protocol": params.get("disk_source_protocol"), "source_name": disk_src_name, "source_host_name": disk_src_host, "source_host_port": disk_src_port}) # Prepare disk image if convert_image: first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) elif create_volume: vol_params = {"name": vol_name, "capacity": int(vol_cap), "capacity_unit": vol_cap_unit, "format": disk_format} create_pool() create_vol(vol_params) check_vol(vol_params) else: # Create an local image and make FS on it. disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" % (disk_format, img_file, img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O" " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) # Create disk snapshot if needed. if create_snapshot: snap_cmd = ("rbd -m %s %s snap create %s@%s" % (mon_host, key_opt, disk_src_name, snap_name)) process.run(snap_cmd, ignore_status=False, shell=True) if test_json_pseudo_protocol: # Create one frontend image with the rbd backing file. json_str = ('json:{"file.driver":"rbd",' '"file.filename":"rbd:%s:mon_host=%s"}' % (disk_src_name, mon_host)) # pass different json string according to the auth config if auth_user and auth_key: json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key)) disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" % (json_str, front_end_img_file)) disk_path = front_end_img_file process.run(disk_cmd, ignore_status=False, shell=True) # If hot plug, start VM first, and then wait the OS boot. # Otherwise stop VM if running. if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() if attach_device: if create_volume: params.update({"source_pool": pool_name}) params.update({"type_name": "volume"}) # No need auth options for volume if "auth_user" in params: params.pop("auth_user") if "auth_type" in params: params.pop("auth_type") if "secret_type" in params: params.pop("secret_type") if "secret_uuid" in params: params.pop("secret_uuid") if "secret_usage" in params: params.pop("secret_usage") xml_file = libvirt.create_disk_xml(params) if additional_guest: # Copy xml_file for additional guest VM. shutil.copyfile(xml_file, additional_xml_file) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) if additional_guest: # Make sure the additional VM is running if additional_vm.is_dead(): additional_vm.start() additional_vm.wait_for_login().close() ret = virsh.attach_device(guest_name, additional_xml_file, "", debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif attach_disk: opts = params.get("attach_option", "") ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_readonly: params.update({'readonly': "yes"}) xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif test_disk_internal_snapshot: xml_file = libvirt.create_disk_xml(params) opts = params.get("attach_option", "") ret = virsh.attach_device(vm_name, xml_file, flagstr=opts, debug=True) libvirt.check_result(ret, skip_if=unsupported_err) elif disk_snapshot_with_sanlock: if vm.is_dead(): vm.start() snapshot_path = make_snapshot() if vm.is_alive(): vm.destroy() elif not create_volume: libvirt.set_vm_disk(vm, params) if test_blockcopy: logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): vm.start() # Wait for vm is running vm.wait_for_login(timeout=600).close() if additional_guest: if additional_vm.is_dead(): additional_vm.start() # Check qemu command line if test_qemu_cmd: check_qemu_cmd() # Check partitions in vm if test_vm_parts: if not check_in_vm(vm, targetdev, old_parts, read_only=create_snapshot): test.fail("Failed to check vm partitions") if additional_guest: if not check_in_vm(additional_vm, targetdev, old_parts): test.fail("Failed to check vm partitions") # Save and restore operation if test_save_restore: check_save_restore() if test_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option) if test_blockcopy: check_blockcopy(targetdev) if test_disk_readonly: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, 'vdb') if test_disk_internal_snapshot: snap_option = params.get("snapshot_option", "") check_snapshot(snap_option, targetdev) # Detach the device. if attach_device: xml_file = libvirt.create_disk_xml(params) ret = virsh.detach_device(vm_name, xml_file) libvirt.check_exit_status(ret) if additional_guest: ret = virsh.detach_device(guest_name, xml_file) libvirt.check_exit_status(ret) elif attach_disk: ret = virsh.detach_disk(vm_name, targetdev) libvirt.check_exit_status(ret) # Check disk in vm after detachment. if attach_device or attach_disk: session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) if len(new_parts) != len(old_parts): test.fail("Disk still exists in vm" " after detachment") session.close() except virt_vm.VMStartError as details: for msg in unsupported_err: if msg in str(details): test.cancel(str(details)) else: test.fail("VM failed to start." "Error: %s" % str(details)) finally: # Remove /etc/ceph/ceph.conf file if exists. if os.path.exists('/etc/ceph/ceph.conf'): os.remove('/etc/ceph/ceph.conf') # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) if additional_guest: virsh.remove_domain(guest_name, "--remove-all-storage", ignore_stauts=True) # Remove the snapshot. if create_snapshot: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap" " purge {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) elif create_volume: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name))) process.run(cmd, ignore_status=True, shell=True) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume))) process.run(cmd, ignore_status=True, shell=True) clean_up_volume_snapshots() else: cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}" "".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Delete tmp files. if os.path.exists(key_file): os.remove(key_file) if os.path.exists(img_file): os.remove(img_file) # Clean up volume, pool if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout): virsh.vol_delete(vol_name, pool_name) if pool_name and pool_name in virsh.pool_state_dict(): virsh.pool_destroy(pool_name, **virsh_dargs) virsh.pool_undefine(pool_name, **virsh_dargs) # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) logging.info("Restoring vm...") vmxml_backup.sync() if disk_snapshot_with_sanlock: # Restore virt_use_sanlock setting. process.run("setsebool -P virt_use_sanlock 0", shell=True) # Restore qemu config qemu_config.restore() utils_libvirtd.Libvirtd().restart() # Force shutdown sanlock service. process.run("sanlock client shutdown -f 1", shell=True) # Clean up lockspace folder process.run("rm -rf /var/lib/libvirt/sanlock/*", shell=True) if snapshot_path is not None: for snapshot in snapshot_path: if os.path.exists(snapshot): os.remove(snapshot)
def run(test, params, env): """ Test disk encryption option. 1.Prepare backend storage (blkdev/iscsi/gluster/ceph) 2.Use luks format to encrypt the backend storage 3.Prepare a disk xml indicating to the backend storage with valid/invalid luks password 4.Start VM with disk hot/cold plugged 5.Check some disk operations in VM 6.Check backend storage is still in luks format 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def encrypt_dev(device, params): """ Encrypt device with luks format :param device: Storage deivce to be encrypted. :param params: From the dict to get encryption password. """ password = params.get("luks_encrypt_passwd", "password") size = params.get("luks_size", "500M") cmd = ( "qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (password, device, size)) if process.system(cmd, shell=True): test.fail("Can't create a luks encrypted img by qemu-img") def check_dev_format(device, fmt="luks"): """ Check if device is in luks format :param device: Storage deivce to be checked. :param fmt: Expected disk format. :return: If device's format equals to fmt, return True, else return False. """ cmd_result = process.run("qemu-img" + ' -h', ignore_status=True, shell=True, verbose=False) if b'-U' in cmd_result.stdout: cmd = ("qemu-img info -U %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) else: cmd = ("qemu-img info %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.fail("device %s is not in %s format. err is: %s" % (device, fmt, cmd_result.stderr)) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test".format( added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") # Backend storage options. storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") # Luks encryption info, luks_encrypt_passwd is the password used to encrypt # luks image, and luks_secret_passwd is the password set to luks secret, you # can set a wrong password to luks_secret_passwd for negative tests luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Backend storage auth info use_auth_usage = "yes" == params.get("use_auth_usage") if use_auth_usage: use_auth_uuid = False else: use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") status_error = "yes" == params.get("status_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") encryption_in_source = "yes" == params.get("encryption_in_source", "no") auth_in_source = "yes" == params.get("auth_in_source", "no") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} pvt = None if ((encryption_in_source or auth_in_source) and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot put <encryption> or <auth> inside disk <source> " "in this libvirt version.") # Start VM and get all partions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Setup backend storage if backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": if enable_auth: chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = { "sec_usage": "iscsi", "sec_target": auth_sec_usage } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml if use_auth_uuid: disk_auth_dict = { "auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid } elif use_auth_usage: disk_auth_dict = { "auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target } else: iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, portal_ip=iscsi_host) device_source = "iscsi://%s:%s/%s/%s" % ( iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = { "attrs": { "protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num) }, "hosts": [{ "name": iscsi_host, "port": iscsi_port }] } elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = libvirt.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) device_source = "gluster://%s/%s/%s" % ( gluster_host_ip, gluster_vol_name, gluster_img_name) disk_src_dict = { "attrs": { "protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name) }, "hosts": [{ "name": gluster_host_ip, "port": "24007" }] } elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) if enable_auth: # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = { "sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret" } auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = { "auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid } cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % ( ceph_disk_name, ceph_mon_ip, key_file) else: device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip) disk_src_dict = { "attrs": { "protocol": "rbd", "name": ceph_disk_name }, "hosts": [{ "name": ceph_host_ip, "port": ceph_host_port }] } elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = data_dir.get_tmp_dir() pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name disk_src_dict = { 'attrs': { 'file': device_source, 'type_name': 'file' } } else: test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.") logging.debug("device source is: %s", device_source) luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, debug=True) encrypt_dev(device_source, params) libvirt.check_exit_status(ret) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) if auth_in_source: disk_source.auth = disk_xml.new_auth(**disk_auth_dict) else: disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) disk_encryption_dict = { "encryption": "luks", "secret": { "type": "passphrase", "uuid": luks_sec_uuid } } disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) if encryption_in_source: disk_source.encryption = disk_encryption else: disk_xml.encryption = disk_encryption disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) vmxml.sync() try: vm.start() vm.wait_for_login() except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) if check_partitions and not status_error: if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") check_dev_format(device_source) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": libvirt.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocal = params.get("disk_source_protocal", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" + " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocal secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, chap_user=chap_user, chap_passwd=chap_passwd) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.hostname = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_host poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get volume name") # Create iscsi network disk XML disk_params = {'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly} disk_params_src = {} if disk_type == "network": disk_params_src = {'source_protocol': disk_src_protocal, 'source_name': iscsi_target + "/1", 'source_host_name': disk_src_host, 'source_host_port': disk_src_port} elif disk_type == "volume": disk_params_src = {'source_pool': disk_src_pool, 'source_volume': vol_name, 'source_mode': disk_src_mode} else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = {'auth_user': chap_user, 'secret_type': disk_src_protocal, 'secret_usage': secret_xml.target} disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") # Attach the iscsi network disk to domain logging.debug("Attach disk by XML: %s", open(disk_xml).read()) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstrs=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): vm.start() cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2, **virsh_dargs) libvirt.check_exit_status(cmd_result) pass else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def run(test, params, env): """ Test nbd disk option. 1.Prepare backend storage 2.Use nbd to export the backend storage with or without TLS 3.Prepare a disk xml indicating to the backend storage 4.Start VM with disk hotplug/coldplug 5.Start snapshot or save/restore operations on ndb disk 6.Check some behaviours on VM 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': False} def check_disk_save_restore(save_file): """ Check domain save and restore operation. :param save_file: the path to saved file """ # Save the domain. ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Restore the domain. ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) def check_snapshot(): """ Check domain snapshot operations. """ # Cleaup dirty data if exists if os.path.exists(snapshot_name1_file): os.remove(snapshot_name1_file) if os.path.exists(snapshot_name2_mem_file): os.remove(snapshot_name2_mem_file) if os.path.exists(snapshot_name2_disk_file): os.remove(snapshot_name2_disk_file) device_target = 'vda' snapshot_name1_option = "--diskspec %s,file=%s,snapshot=external --disk-only --atomic" % ( device_target, snapshot_name1_file) ret = virsh.snapshot_create_as(vm_name, "%s %s" % (snapshot_name1, snapshot_name1_option), debug=True) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name, debug=True) if snapshot_name1 not in snap_lists: test.fail("Snapshot %s doesn't exist" % snapshot_name1) # Check file can be created after snapshot def _check_file_create(filename): """ Check whether file with specified filename exists or not. :param filename: finename """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) cmd = ("echo" " teststring > /tmp/{0}".format(filename)) status, output = session.cmd_status_output(cmd) if status != 0: test.fail("Failed to touch one file on VM internal") except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) raise finally: if session: session.close() _check_file_create("disk.txt") # Create memory snapshot. snapshot_name2_mem_option = "--memspec file=%s,snapshot=external" % ( snapshot_name2_mem_file) snapshot_name2_disk_option = "--diskspec %s,file=%s,snapshot=external --atomic" % ( device_target, snapshot_name2_disk_file) snapshot_name2_option = "%s %s" % (snapshot_name2_mem_option, snapshot_name2_disk_option) ret = virsh.snapshot_create_as(vm_name, "%s %s" % (snapshot_name2, snapshot_name2_option), debug=True) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name, debug=True) if snapshot_name2 not in snap_lists: test.fail("Snapshot: %s doesn't exist" % snapshot_name2) _check_file_create("mem.txt") def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test".format( added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdb") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") image_path = params.get("emulated_image") # Get config parameters status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") tls_enabled = "yes" == params.get("enable_tls", "no") enable_private_key_encryption = "yes" == params.get( "enable_private_key_encryption", "no") private_key_encrypt_passphrase = params.get("private_key_password") domain_operation = params.get("domain_operation") secret_uuid = None # Get snapshot attributes. snapshot_name1 = params.get("snapshot_name1") snapshot_name1_file = params.get("snapshot_name1_file") snapshot_name2 = params.get("snapshot_name2") snapshot_name2_mem_file = params.get("snapshot_name2_mem_file") snapshot_name2_disk_file = params.get("snapshot_name2_disk_file") # Initialize one NbdExport object nbd = None # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) # Get server hostname. hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip() # Setup backend storage nbd_server_host = hostname nbd_server_port = params.get("nbd_server_port") image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img") export_name = params.get("export_name", None) deleteExisted = "yes" == params.get("deleteExisted", "yes") tls_bit = "no" if tls_enabled: tls_bit = "yes" # Create secret if enable_private_key_encryption: # this feature is enabled after libvirt 6.6.0 if not libvirt_version.version_compare(6, 6, 0): test.cancel( "current libvirt version doesn't support client private key encryption" ) utils_secret.clean_up_secrets() private_key_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", private_key_sec_uuid) private_key_sec_passwd = params.get("private_key_password", "redhat") ret = virsh.secret_set_value(private_key_sec_uuid, private_key_sec_passwd, encode=True, use_file=True, debug=True) libvirt.check_exit_status(ret) secret_uuid = private_key_sec_uuid # Initialize special test environment config for snapshot operations. if domain_operation == "snap_shot": first_disk = vm.get_first_disk_devices() image_path = first_disk['source'] device_target = 'vda' # Remove previous xml disks = vmxml.get_devices(device_type="disk") for disk_ in disks: if disk_.target['dev'] == device_target: vmxml.del_device(disk_) break # Create NbdExport object nbd = NbdExport( image_path, image_format=device_format, port=nbd_server_port, export_name=export_name, tls=tls_enabled, deleteExisted=deleteExisted, private_key_encrypt_passphrase=private_key_encrypt_passphrase, secret_uuid=secret_uuid) nbd.start_nbd_server() # Prepare disk source xml source_attrs_dict = {"protocol": "nbd", "tls": "%s" % tls_bit} if export_name: source_attrs_dict.update({"name": "%s" % export_name}) disk_src_dict = {} disk_src_dict.update({"attrs": source_attrs_dict}) disk_src_dict.update( {"hosts": [{ "name": nbd_server_host, "port": nbd_server_port }]}) # Add disk xml. disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": 'raw'} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) try: vmxml.sync() vm.start() vm.wait_for_login() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % str(xml_error)) except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) # Hotplug disk. if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) # Check save and restore operation and its result if domain_operation == 'save_restore': save_file = "/tmp/%s.save" % vm_name check_disk_save_restore(save_file) # Check attached nbd disk if check_partitions and not status_error: logging.debug("wait seconds for starting in checking vm part") time.sleep(2) if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") # Check snapshot operation and its result if domain_operation == 'snap_shot': check_snapshot() # Unplug disk. if hotplug_disk: result = virsh.detach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True, wait_for_event=True) libvirt.check_exit_status(result, status_error) finally: if enable_private_key_encryption: utils_secret.clean_up_secrets() # Clean up backend storage and TLS try: if nbd: nbd.cleanup() # Clean up snapshots if exist if domain_operation == 'snap_shot': snap_lists = virsh.snapshot_list(vm_name, debug=True) for snap_name in snap_lists: virsh.snapshot_delete(vm_name, snap_name, "--metadata", debug=True, ignore_status=True) # Cleaup dirty data if exists if os.path.exists(snapshot_name1_file): os.remove(snapshot_name1_file) if os.path.exists(snapshot_name2_mem_file): os.remove(snapshot_name2_mem_file) if os.path.exists(snapshot_name2_disk_file): os.remove(snapshot_name2_disk_file) except Exception as ndbEx: logging.info("Clean Up nbd failed: %s" % str(ndbEx)) # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata")
def run(test, params, env): """ Test the pull-mode backup function Steps: 1. craete a vm with extra disk vdb 2. create some data on vdb 3. start a pull mode full backup on vdb 4. create some data on vdb 5. start a pull mode incremental backup 6. repeat step 5 to 7 7. check the full/incremental backup file data """ # Basic case config hotplug_disk = "yes" == params.get("hotplug_disk", "no") original_disk_size = params.get("original_disk_size", "100M") original_disk_type = params.get("original_disk_type", "local") original_disk_target = params.get("original_disk_target", "vdb") local_hostname = params.get("loal_hostname", "localhost") local_ip = params.get("local_ip", "127.0.0.1") local_user_name = params.get("local_user_name", "root") local_user_password = params.get("local_user_password", "redhat") tmp_dir = data_dir.get_tmp_dir() # Backup config scratch_type = params.get("scratch_type", "file") reuse_scratch_file = "yes" == params.get("reuse_scratch_file") prepare_scratch_file = "yes" == params.get("prepare_scratch_file") scratch_blkdev_path = params.get("scratch_blkdev_path") scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size) prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev") backup_rounds = int(params.get("backup_rounds", 3)) backup_error = "yes" == params.get("backup_error") expect_backup_canceled = "yes" == params.get("expect_backup_canceled") # NBD service config nbd_protocol = params.get("nbd_protocol", "unix") nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket") nbd_tcp_port = params.get("nbd_tcp_port", "10809") nbd_hostname = local_hostname set_exportname = "yes" == params.get("set_exportname") set_exportbitmap = "yes" == params.get("set_exportbitmap") # TLS service config tls_enabled = "yes" == params.get("tls_enabled") tls_x509_verify = "yes" == params.get("tls_x509_verify") custom_pki_path = "yes" == params.get("custom_pki_path") tls_client_ip = tls_server_ip = local_ip tls_client_cn = tls_server_cn = local_hostname tls_client_user = tls_server_user = local_user_name tls_client_pwd = tls_server_pwd = local_user_password tls_provide_client_cert = "yes" == params.get("tls_provide_client_cert") tls_error = "yes" == params.get("tls_error") # LUKS config scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted") luks_passphrase = params.get("luks_passphrase", "password") # Cancel the test if libvirt support related functions if not libvirt_version.version_compare(6, 0, 0): test.cancel("Current libvirt version doesn't support " "incremental backup.") if tls_enabled and not libvirt_version.version_compare(6, 6, 0): test.cancel("Current libvirt version doesn't support pull mode " "backup with tls nbd.") try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Make sure there is no checkpoint metadata before test utils_backup.clean_checkpoints(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() utils_backup.enable_inc_backup_for_vm(vm) # Prepare tls env if tls_enabled: # Prepare pki tls_config = { "qemu_tls": "yes", "auto_recover": "yes", "client_ip": tls_client_ip, "server_ip": tls_server_ip, "client_cn": tls_client_cn, "server_cn": tls_server_cn, "client_user": tls_client_user, "server_user": tls_server_user, "client_pwd": tls_client_pwd, "server_pwd": tls_server_pwd, } if custom_pki_path: pki_path = os.path.join(tmp_dir, "inc_bkup_pki") else: pki_path = "/etc/pki/libvirt-backup/" if tls_x509_verify: tls_config["client_ip"] = tls_client_ip tls_config["custom_pki_path"] = pki_path tls_obj = TLSConnection(tls_config) tls_obj.conn_setup(True, tls_provide_client_cert) logging.debug("TLS certs in: %s" % pki_path) # Set qemu.conf qemu_config = LibvirtQemuConfig() if tls_x509_verify: qemu_config.backup_tls_x509_verify = True else: qemu_config.backup_tls_x509_verify = False if custom_pki_path: qemu_config.backup_tls_x509_cert_dir = pki_path utils_libvirtd.Libvirtd().restart() # Prepare libvirt secret if scratch_luks_encrypted: utils_secret.clean_up_secrets() luks_secret_uuid = libvirt.create_secret(params) virsh.secret_set_value(luks_secret_uuid, luks_passphrase, encode=True, debug=True) # Prepare the disk to be backuped. disk_params = {} disk_path = "" if original_disk_type == "local": image_name = "{}_image.qcow2".format(original_disk_target) disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, original_disk_size, "qcow2") disk_params = { "device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": original_disk_target, "source_file": disk_path } if original_disk_target: disk_params["target_dev"] = original_disk_target elif original_disk_type == "iscsi": iscsi_host = '127.0.0.1' iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=original_disk_size, portal_ip=iscsi_host) disk_path = ("iscsi://[%s]/%s/%s" % (iscsi_host, iscsi_target, lun_num)) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'iscsi', 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': iscsi_host, 'source_host_port': '3260' } disk_params.update(disk_params_src) elif original_disk_type == "gluster": gluster_vol_name = "gluster_vol" gluster_pool_name = "gluster_pool" gluster_img_name = "gluster.qcow2" gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) disk_path = 'gluster://%s/%s/%s' % ( gluster_host_ip, gluster_vol_name, gluster_img_name) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'gluster', 'source_name': gluster_vol_name + "/%s" % gluster_img_name, 'source_host_name': gluster_host_ip, 'source_host_port': '24007' } disk_params.update(disk_params_src) else: test.error("The disk type '%s' not supported in this script.", original_disk_type) if hotplug_disk: vm.start() session = vm.wait_for_login().close() disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm_name, disk_xml, debug=True) else: disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True) vm.start() session = vm.wait_for_login() new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys()) session.close() if len(new_disks_in_vm) != 1: test.fail("Test disk not prepared in vm") # Use the newly added disk as the test disk test_disk_in_vm = "/dev/" + new_disks_in_vm[0] vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_disks = list(vmxml.get_disk_all().keys()) checkpoint_list = [] is_incremental = False backup_file_list = [] for backup_index in range(backup_rounds): # Prepare backup xml backup_params = {"backup_mode": "pull"} if backup_index > 0: is_incremental = True backup_params["backup_incremental"] = "checkpoint_" + str( backup_index - 1) # Set libvirt default nbd export name and bitmap name nbd_export_name = original_disk_target nbd_bitmap_name = "backup-" + original_disk_target backup_server_dict = {} if nbd_protocol == "unix": backup_server_dict["transport"] = "unix" backup_server_dict["socket"] = nbd_socket else: backup_server_dict["name"] = nbd_hostname backup_server_dict["port"] = nbd_tcp_port if tls_enabled: backup_server_dict["tls"] = "yes" backup_params["backup_server"] = backup_server_dict backup_disk_xmls = [] for vm_disk in vm_disks: backup_disk_params = {"disk_name": vm_disk} if vm_disk != original_disk_target: backup_disk_params["enable_backup"] = "no" else: backup_disk_params["enable_backup"] = "yes" backup_disk_params["disk_type"] = scratch_type # Custom nbd export name and bitmap name if required if set_exportname: nbd_export_name = original_disk_target + "_custom_exp" backup_disk_params["exportname"] = nbd_export_name if set_exportbitmap: nbd_bitmap_name = original_disk_target + "_custom_bitmap" backup_disk_params["exportbitmap"] = nbd_bitmap_name # Prepare nbd scratch file/dev params scratch_params = {"attrs": {}} scratch_path = None if scratch_type == "file": scratch_file_name = "scratch_file_%s" % backup_index scratch_path = os.path.join(tmp_dir, scratch_file_name) if prepare_scratch_file: libvirt.create_local_disk("file", scratch_path, original_disk_size, "qcow2") scratch_params["attrs"]["file"] = scratch_path elif scratch_type == "block": if prepare_scratch_blkdev: scratch_path = libvirt.setup_or_cleanup_iscsi( is_setup=True, image_size=scratch_blkdev_size) scratch_params["attrs"]["dev"] = scratch_path else: test.fail( "We do not support backup scratch type: '%s'" % scratch_type) if scratch_luks_encrypted: encryption_dict = { "encryption": "luks", "secret": { "type": "passphrase", "uuid": luks_secret_uuid } } scratch_params["encryption"] = encryption_dict logging.debug("scratch params: %s", scratch_params) backup_disk_params["backup_scratch"] = scratch_params backup_disk_xml = utils_backup.create_backup_disk_xml( backup_disk_params) backup_disk_xmls.append(backup_disk_xml) logging.debug("disk list %s", backup_disk_xmls) backup_xml = utils_backup.create_backup_xml( backup_params, backup_disk_xmls) logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml) # Prepare checkpoint xml checkpoint_name = "checkpoint_%s" % backup_index checkpoint_list.append(checkpoint_name) cp_params = {"checkpoint_name": checkpoint_name} cp_params["checkpoint_desc"] = params.get( "checkpoint_desc", "desc of cp_%s" % backup_index) disk_param_list = [] for vm_disk in vm_disks: cp_disk_param = {"name": vm_disk} if vm_disk != original_disk_target: cp_disk_param["checkpoint"] = "no" else: cp_disk_param["checkpoint"] = "bitmap" cp_disk_bitmap = params.get("cp_disk_bitmap") if cp_disk_bitmap: cp_disk_param["bitmap"] = cp_disk_bitmap + str( backup_index) disk_param_list.append(cp_disk_param) checkpoint_xml = utils_backup.create_checkpoint_xml( cp_params, disk_param_list) logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index, checkpoint_xml) # Create some data in vdb dd_count = "1" dd_seek = str(backup_index * 10 + 10) dd_bs = "1M" session = vm.wait_for_login() utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs, dd_seek, dd_count) session.close() # Start backup backup_options = backup_xml.xml + " " + checkpoint_xml.xml if reuse_scratch_file: backup_options += " --reuse-external" backup_result = virsh.backup_begin(vm_name, backup_options, ignore_status=True, debug=True) if backup_result.exit_status: raise utils_backup.BackupBeginError( backup_result.stderr.strip()) # If required, do some error operations during backup job error_operation = params.get("error_operation") if error_operation: if "destroy_vm" in error_operation: vm.destroy(gracefully=False) if "kill_qemu" in error_operation: utils_misc.safe_kill(vm.get_pid(), signal.SIGKILL) if utils_misc.wait_for( lambda: utils_backup.is_backup_canceled(vm_name), timeout=5): raise utils_backup.BackupCanceledError() elif expect_backup_canceled: test.fail("Backup job should be canceled but not.") backup_file_path = os.path.join( tmp_dir, "backup_file_%s.qcow2" % str(backup_index)) backup_file_list.append(backup_file_path) nbd_params = { "nbd_protocol": nbd_protocol, "nbd_export": nbd_export_name } if nbd_protocol == "unix": nbd_params["nbd_socket"] = nbd_socket elif nbd_protocol == "tcp": nbd_params["nbd_hostname"] = nbd_hostname nbd_params["nbd_tcp_port"] = nbd_tcp_port if tls_enabled: nbd_params["tls_dir"] = pki_path nbd_params["tls_server_ip"] = tls_server_ip if not is_incremental: # Do full backup try: utils_backup.pull_full_backup_to_file( nbd_params, backup_file_path) except Exception as details: if tls_enabled and tls_error: raise utils_backup.BackupTLSError(details) else: test.fail("Fail to get full backup data: %s" % details) logging.debug("Full backup to: %s", backup_file_path) else: # Do incremental backup utils_backup.pull_incremental_backup_to_file( nbd_params, backup_file_path, nbd_bitmap_name, original_disk_size) # Check if scratch file encrypted if scratch_luks_encrypted and scratch_path: cmd = "qemu-img info -U %s" % scratch_path result = process.run(cmd, shell=True, verbose=True).stdout_text.strip() if (not re.search("format.*luks", result, re.IGNORECASE) or not re.search("encrypted.*yes", result, re.IGNORECASE)): test.fail("scratch file/dev is not encrypted by LUKS") virsh.domjobabort(vm_name, debug=True) for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) # Compare the backup data and original data original_data_file = os.path.join(tmp_dir, "original_data.qcow2") cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path, original_data_file) process.run(cmd, shell=True, verbose=True) for backup_file in backup_file_list: if not utils_backup.cmp_backup_data(original_data_file, backup_file): test.fail("Backup and original data are not identical for" "'%s' and '%s'" % (disk_path, backup_file)) else: logging.debug("'%s' contains correct backup data", backup_file) except utils_backup.BackupBeginError as detail: if backup_error: logging.debug("Backup failed as expected.") else: test.fail("Backup failed to start: %s" % detail) except utils_backup.BackupTLSError as detail: if tls_error: logging.debug("Failed to get backup data as expected.") else: test.fail("Failed to get tls backup data: %s" % detail) except utils_backup.BackupCanceledError as detail: if expect_backup_canceled: logging.debug("Backup canceled as expected.") if not vm.is_alive(): logging.debug("Check if vm can be started again when backup " "canceled.") vm.start() vm.wait_for_login().close() else: test.fail("Backup job canceled: %s" % detail) finally: # Remove checkpoints clean_checkpoint_metadata = not vm.is_alive() if "error_operation" in locals() and error_operation is not None: if "kill_qemu" in error_operation: clean_checkpoint_metadata = True utils_backup.clean_checkpoints( vm_name, clean_metadata=clean_checkpoint_metadata) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync() # Remove iscsi devices if original_disk_type == "iscsi" or scratch_type == "block": libvirt.setup_or_cleanup_iscsi(False) # Remove gluster devices if original_disk_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) # Recover qemu.conf if "qemu_config" in locals(): qemu_config.restore() # Remove tls object if "tls_obj" in locals(): del tls_obj # Remove libvirt secret if "luks_secret_uuid" in locals(): virsh.secret_undefine(luks_secret_uuid, ignore_status=True)
def run(test, params, env): """ Test disk encryption option. 1.Prepare backend storage (blkdev/iscsi/gluster/ceph) 2.Use luks format to encrypt the backend storage 3.Prepare a disk xml indicating to the backend storage with valid/invalid luks password 4.Start VM with disk hot/cold plugged 5.Check some disk operations in VM 6.Check backend storage is still in luks format 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def encrypt_dev(device, params): """ Encrypt device with luks format :param device: Storage device to be encrypted. :param params: From the dict to get encryption password. """ password = params.get("luks_encrypt_passwd", "password") size = params.get("luks_size", "500M") preallocation = params.get("preallocation") cmd = ("qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (password, device, size)) # Add preallocation if it is given in params if preallocation: cmd = cmd.replace("key-secret=sec0", "key-secret=sec0,preallocation=%s" % preallocation) if process.system(cmd, shell=True): test.fail("Can't create a luks encrypted img by qemu-img") def check_dev_format(device, fmt="luks"): """ Check if device is in luks format :param device: Storage device to be checked. :param fmt: Expected disk format. :return: If device's format equals to fmt, return True, else return False. """ cmd_result = process.run("qemu-img" + ' -h', ignore_status=True, shell=True, verbose=False) if b'-U' in cmd_result.stdout: cmd = ("qemu-img info -U %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) else: cmd = ("qemu-img info %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.fail("device %s is not in %s format. err is: %s" % (device, fmt, cmd_result.stderr)) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test" .format(added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def create_vol(p_name, target_encrypt_params, vol_params): """ Create volume. :param p_name: Pool name. :param target_encrypt_params: encrypt parameters in dict. :param vol_params: Volume parameters dict. """ # Clean up dirty volumes if pool has. pv = libvirt_storage.PoolVolume(p_name) vol_name_list = pv.list_volumes() for vol_name in vol_name_list: pv.delete_volume(vol_name) volxml = vol_xml.VolXML() v_xml = volxml.new_vol(**vol_params) v_xml.encryption = volxml.new_encryption(**target_encrypt_params) v_xml.xmltreefile.write() ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) def get_secret_list(): """ Get secret list. :return secret list """ logging.info("Get secret list ...") secret_list_result = virsh.secret_list() secret_list = secret_list_result.stdout.strip().splitlines() # First two lines contain table header followed by entries # for each secret, such as: # # UUID Usage # -------------------------------------------------------------------------------- # b4e8f6d3-100c-4e71-9f91-069f89742273 ceph client.libvirt secret secret_list = secret_list[2:] result = [] # If secret list is empty. if secret_list: for line in secret_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) return result def check_top_image_in_xml(expected_top_image): """ check top image in src file :param expected_top_image: expect top image """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.devices.by_device_tag('disk') disk_xml = None for disk in disks: if disk.target['dev'] != device_target: continue else: disk_xml = disk.xmltreefile break logging.debug("disk xml in top: %s\n", disk_xml) src_file = disk_xml.find('source').get('file') if src_file is None: src_file = disk_xml.find('source').get('name') if src_file != expected_top_image: test.fail("Current top img %s is not the same with %s" % (src_file, expected_top_image)) # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_type = params.get("virt_disk_device_type", "file") device_format = params.get("virt_disk_device_format", "raw") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") volume_target_format = params.get("target_format", "raw") # Backend storage options. storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") # Luks encryption info, luks_encrypt_passwd is the password used to encrypt # luks image, and luks_secret_passwd is the password set to luks secret, you # can set a wrong password to luks_secret_passwd for negative tests luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Backend storage auth info use_auth_usage = "yes" == params.get("use_auth_usage") if use_auth_usage: use_auth_uuid = False else: use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") encryption_in_source = "yes" == params.get("encryption_in_source", "no") auth_in_source = "yes" == params.get("auth_in_source", "no") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} pvt = None duplicated_encryption = "yes" == params.get("duplicated_encryption", "no") slice_support_enable = "yes" == params.get("slice_support_enable", "no") block_copy_test = "yes" == params.get("block_copy_test", "no") if ((encryption_in_source or auth_in_source) and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot put <encryption> or <auth> inside disk <source> " "in this libvirt version.") # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Clean up dirty secrets in test environments if there are. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Create secret luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, debug=True) libvirt.check_exit_status(ret) # Setup backend storage if backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": if enable_auth: chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml if use_auth_uuid: disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} elif use_auth_usage: disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target} else: iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, portal_ip=iscsi_host) device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = {"attrs": {"protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num)}, "hosts": [{"name": iscsi_host, "port": iscsi_port}]} elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) device_source = "gluster://%s/%s/%s" % (gluster_host_ip, gluster_vol_name, gluster_img_name) disk_src_dict = {"attrs": {"protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name)}, "hosts": [{"name": gluster_host_ip, "port": "24007"}]} elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(TMP_DATA_DIR, "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) if enable_auth: # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = {"sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret"} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = {"auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name, ceph_mon_ip, key_file) else: device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("pre clean up rbd disk if exists: %s", cmd_result) disk_src_dict = {"attrs": {"protocol": "rbd", "name": ceph_disk_name}, "hosts": [{"name": ceph_host_ip, "port": ceph_host_port}]} elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = TMP_DATA_DIR pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} # Create dir based pool,and then create one volume on it. elif backend_storage_type == "dir": pool_name = params.get("pool_name", "dir_pool") pool_target = params.get("pool_target") pool_type = params.get("pool_type") emulated_image = params.get("emulated_image") image_name = params.get("dir_image_name", "luks_1.img") # Create and start dir_based pool. pvt = libvirt.PoolVolumeTest(test, params) if not os.path.exists(pool_target): os.mkdir(pool_target) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) sp = libvirt_storage.StoragePool() if not sp.is_pool_active(pool_name): sp.set_pool_autostart(pool_name) sp.start_pool(pool_name) # Create one volume on the pool. volume_name = params.get("vol_name") volume_alloc = params.get("vol_alloc") volume_cap_unit = params.get("vol_cap_unit") volume_cap = params.get("vol_cap") volume_target_path = params.get("sec_volume") volume_target_format = params.get("target_format") device_format = volume_target_format volume_target_encypt = params.get("target_encypt", "") volume_target_label = params.get("target_label") vol_params = {"name": volume_name, "capacity": int(volume_cap), "allocation": int(volume_alloc), "format": volume_target_format, "path": volume_target_path, "label": volume_target_label, "capacity_unit": volume_cap_unit} vol_encryption_params = {} vol_encryption_params.update({"format": "luks"}) vol_encryption_params.update({"secret": {"type": "passphrase", "uuid": luks_sec_uuid}}) try: # If target format is qcow2,need to create test image with "qemu-img create" if volume_target_format == "qcow2": option = params.get("luks_extra_elements") libvirt.create_local_disk("file", path=volume_target_path, extra=option, disk_format="qcow2", size="1") else: # If Libvirt version is lower than 2.5.0 # Creating luks encryption volume is not supported,so skip it. create_vol(pool_name, vol_encryption_params, vol_params) except AssertionError as info: err_msgs = ("create: invalid option") if str(info).count(err_msgs): test.cancel("Creating luks encryption volume " "is not supported on this libvirt version") else: test.error("Failed to create volume." "Error: %s" % str(info)) disk_src_dict = {'attrs': {'file': volume_target_path}} device_source = volume_target_path elif backend_storage_type == "file": tmp_dir = TMP_DATA_DIR image_name = params.get("file_image_name", "slice.img") device_source = os.path.join(tmp_dir, image_name) disk_src_dict = {'attrs': {'file': device_source}} else: test.cancel("Only iscsi/gluster/rbd/nfs/file can be tested for now.") logging.debug("device source is: %s", device_source) if backend_storage_type != "dir": encrypt_dev(device_source, params) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} print() driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) if auth_in_source: disk_source.auth = disk_xml.new_auth(**disk_auth_dict) else: disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) if encryption_in_source: disk_source.encryption = disk_encryption else: disk_xml.encryption = disk_encryption if duplicated_encryption: disk_xml.encryption = disk_encryption if slice_support_enable: if not libvirt_version.version_compare(6, 0, 0): test.cancel("Cannot put <slice> inside disk <source> " "in this libvirt version.") else: check_du_output = process.run("du -b %s" % device_source, shell=True).stdout_text slice_size = re.findall(r'[0-9]+', check_du_output) disk_source.slices = disk_xml.new_slices( **{"slice_type": "storage", "slice_offset": "0", "slice_size": slice_size[0]}) disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) try: vmxml.sync() vm.start() vm.wait_for_login() except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % str(xml_error)) except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) if check_partitions and not status_error: if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") if volume_target_format == "qcow2": check_dev_format(device_source, fmt="qcow2") else: check_dev_format(device_source) if block_copy_test: # Create a transient VM transient_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) virsh.undefine(vm_name, debug=True, ignore_status=False) virsh.create(transient_vmxml.xml, ignore_status=False, debug=True) expected_top_image = vm.get_blk_devices()[device_target].get('source') options = params.get("blockcopy_options") tmp_dir = TMP_DATA_DIR tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img") dest_path = os.path.join(tmp_dir, tmp_file) # Need cover a few scenarios:single blockcopy, blockcopy and abort combined virsh.blockcopy(vm_name, device_target, dest_path, options, ignore_status=False, debug=True) if encryption_in_source: virsh.blockjob(vm_name, device_target, " --pivot", ignore_status=False) expected_top_image = dest_path else: virsh.blockjob(vm_name, device_target, " --abort", ignore_status=False) check_top_image_in_xml(expected_top_image) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env): """ Test disk encryption option. 1.Prepare test environment, destroy or suspend a VM. 2.Prepare tgtd and secret config. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def check_save_restore(save_file): """ Test domain save and restore. """ # Save the domain. ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Restore the domain. ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) def check_snapshot(): """ Test domain snapshot operation. """ snapshot1 = "s1" snapshot2 = "s2" ret = virsh.snapshot_create_as(vm_name, snapshot1) libvirt.check_exit_status(ret) ret = virsh.snapshot_create_as(vm_name, "%s --disk-only --diskspec vda," "file=/tmp/testvm-snap1" % snapshot2) libvirt.check_exit_status(ret, True) ret = virsh.snapshot_create_as(vm_name, "%s --memspec file=%s,snapshot=external" " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2)) libvirt.check_exit_status(ret, True) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] elif target.startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Cann't see added partition in VM") return False cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test" .format(added_part)) s, o = session.cmd_status_output(cmd) logging.info("Check disk operation in VM:\n%s", o) if s != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_qemu_cmd(): """ Check qemu-kvm command line options """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if driver_iothread: cmd += " | grep iothread=iothread%s" % driver_iothread if process.system(cmd, ignore_status=True, shell=True): test.fail("Can't see disk option '%s' " "in command line" % cmd) def check_auth_plaintext(vm_name, password): """ Check if libvirt passed the plaintext of the chap authentication password to qemu. :param vm_name: The name of vm to be checked. :param password: The plaintext of password used for chap authentication. :return: True if using plaintext, False if not. """ cmd = ("ps -ef | grep -v grep | grep qemu-kvm | grep %s | grep %s" % (vm_name, password)) return process.system(cmd, ignore_status=True, shell=True) == 0 # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") # Controller specific attributes. cntlr_type = params.get('controller_type', None) cntlr_model = params.get('controller_model', None) cntlr_index = params.get('controller_index', None) controller_addr_options = params.get('controller_addr_options', None) driver_iothread = params.get("driver_iothread") # iscsi options. iscsi_target = params.get("iscsi_target") iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") emulated_size = params.get("iscsi_image_size", "1") uuid = params.get("uuid", "") auth_uuid = "yes" == params.get("auth_uuid", "") auth_usage = "yes" == params.get("auth_usage", "") status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error", "no") test_save_snapshot = "yes" == params.get("test_save_snapshot", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") secret_uuid = "" # Start vm and get all partions in vm. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: chap_user = "" chap_passwd = "" if auth_uuid or auth_usage: auth_place_in_location = params.get("auth_place_in_location") if 'source' in auth_place_in_location and not libvirt_version.version_compare(3, 9, 0): test.cancel("place auth in source is not supported in current libvirt version") auth_type = params.get("auth_type") secret_usage_target = params.get("secret_usage_target") secret_usage_type = params.get("secret_usage_type") chap_user = params.get("iscsi_user") chap_passwd = params.get("iscsi_password") sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "iSCSI secret" sec_xml.auth_type = auth_type sec_xml.auth_username = chap_user sec_xml.usage = secret_usage_type sec_xml.target = secret_usage_target sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid == "": test.error("Failed to get secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(ret) # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # If we use qcow2 disk format, should format iscsi disk first. if device_format == "qcow2": cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" % (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size)) process.run(cmd, shell=True) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} # For lun type device, iothread attribute need to be set in controller. if driver_iothread and device != "lun": driver_dict.update({"iothread": driver_iothread}) vmxml.iothreads = int(driver_iothread) elif driver_iothread: vmxml.iothreads = int(driver_iothread) disk_xml.driver = driver_dict # Check if we want to use a faked uuid. if not uuid: uuid = secret_uuid auth_dict = {} if auth_uuid: auth_dict = {"auth_user": chap_user, "secret_type": secret_usage_type, "secret_uuid": uuid} elif auth_usage: auth_dict = {"auth_user": chap_user, "secret_type": secret_usage_type, "secret_usage": secret_usage_target} disk_source = disk_xml.new_disk_source( **{"attrs": {"protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num)}, "hosts": [{"name": iscsi_host, "port": iscsi_port}]}) if auth_dict: disk_auth = disk_xml.new_auth(**auth_dict) if 'source' in auth_place_in_location: disk_source.auth = disk_auth if 'disk' in auth_place_in_location: disk_xml.auth = disk_auth disk_xml.source = disk_source # Sync VM xml. vmxml.add_device(disk_xml) # After virtio 1.0 is enabled, lun type device need use virtio-scsi # instead of virtio, so additional controller is needed. # Add controller. if device == "lun": ctrl = Controller(type_name=cntlr_type) if cntlr_model is not None: ctrl.model = cntlr_model if cntlr_index is not None: ctrl.index = cntlr_index ctrl_addr_dict = {} for addr_option in controller_addr_options.split(','): if addr_option != "": addr_part = addr_option.split('=') ctrl_addr_dict.update({addr_part[0].strip(): addr_part[1].strip()}) ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict) # If driver_iothread is true, need add iothread attribute in controller. if driver_iothread: ctrl_driver_dict = {} ctrl_driver_dict.update({"iothread": driver_iothread}) ctrl.driver = ctrl_driver_dict logging.debug("Controller XML is:%s", ctrl) if cntlr_type: vmxml.del_controller(cntlr_type) else: vmxml.del_controller("scsi") vmxml.add_device(ctrl) try: # Start the VM and check status. vmxml.sync() vm.start() if status_error: test.fail("VM started unexpectedly.") # Check Qemu command line if test_qemu_cmd: check_qemu_cmd() except virt_vm.VMStartError as e: if status_error: if re.search(uuid, str(e)): pass else: test.fail("VM failed to start." "Error: %s" % str(e)) except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % xml_error) else: # Check partitions in VM. if check_partitions: if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") # Test domain save/restore/snapshot. if test_save_snapshot: save_file = os.path.join(data_dir.get_tmp_dir(), "%.save" % vm_name) check_save_restore(save_file) check_snapshot() if os.path.exists(save_file): os.remove(save_file) # Test libvirt doesn't pass the plaintext of chap password to qemu, # this function is implemented in libvirt 4.3.0-1. if (libvirt_version.version_compare(4, 3, 0) and (auth_uuid or auth_usage) and chap_passwd): if(check_auth_plaintext(vm_name, chap_passwd)): test.fail("Libvirt should not pass plaintext of chap " "password to qemu-kvm.") finally: # Delete snapshots. libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Delete the tmp files. libvirt.setup_or_cleanup_iscsi(is_setup=False) # Clean up secret if secret_uuid: virsh.secret_undefine(secret_uuid)
def run(test, params, env): """ Test the pull-mode backup function Steps: 1. create a vm with extra disk vdb 2. create some data on vdb 3. start a pull mode full backup on vdb 4. monitor block-threshold event on scratch file/dev 5. create some data on vdb's same position as step 2 to trigger event 6. check the block-threshold event captured """ # Basic case config hotplug_disk = "yes" == params.get("hotplug_disk", "no") original_disk_size = params.get("original_disk_size", "100M") original_disk_type = params.get("original_disk_type", "local") original_disk_target = params.get("original_disk_target", "vdb") event_type = params.get("event_type") usage_threshold = params.get("usage_threshold", "100") tmp_dir = data_dir.get_tmp_dir() local_hostname = params.get("loal_hostname", "localhost") # Backup config scratch_type = params.get("scratch_type", "file") reuse_scratch_file = "yes" == params.get("reuse_scratch_file") scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size) # NBD service config nbd_protocol = params.get("nbd_protocol", "unix") nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket") nbd_tcp_port = params.get("nbd_tcp_port", "10809") nbd_hostname = local_hostname # LUKS config scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted") luks_passphrase = params.get("luks_passphrase", "password") # Open a new virsh session for event monitor virsh_session = aexpect.ShellSession(virsh.VIRSH_EXEC, auto_close=True) # Cancel the test if libvirt support related functions if not libvirt_version.version_compare(7, 0, 0): test.cancel("Current libvirt version doesn't support " "event monitor for incremental backup.") def get_backup_disk_index(vm_name, disk_name): """ Get the index of the backup disk to be monitored by the virsh event :param vm_name: vm name :param disk_name: virtual disk name, such as 'vdb' :return: the index of the virtual disk in backup xml """ backup_xml = virsh.backup_dumpxml(vm_name).stdout.strip() logging.debug("%s's current backup xml is: %s" % (vm_name, backup_xml)) backup_xml_dom = xml_utils.XMLTreeFile(backup_xml) index_xpath = "/disks/disk" for disk_element in backup_xml_dom.findall(index_xpath): if disk_element.get("name") == disk_name: return disk_element.get("index") def is_event_captured(virsh_session, re_pattern): """ Check if event captured :param virsh_session: the virsh session of the event monitor :param re_pattern: the re pattern used to represent the event :return: True means event captured, False means not """ ret_output = virsh_session.get_stripped_output() if (not re.search(re_pattern, ret_output, re.IGNORECASE)): return False logging.debug("event monitor output: %s", ret_output) return True try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Make sure thedisk_element.getre is no checkpoint metadata before test utils_backup.clean_checkpoints(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() utils_backup.enable_inc_backup_for_vm(vm) # Prepare libvirt secret if scratch_luks_encrypted: utils_secret.clean_up_secrets() luks_secret_uuid = libvirt.create_secret(params) virsh.secret_set_value(luks_secret_uuid, luks_passphrase, encode=True, debug=True) # Prepare the disk to be backuped. disk_params = {} disk_path = "" image_name = "{}_image.qcow2".format(original_disk_target) disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, original_disk_size, "qcow2") disk_params = { "device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": original_disk_target, "source_file": disk_path } disk_params["target_dev"] = original_disk_target disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True) vm.start() session = vm.wait_for_login() new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys()) session.close() if len(new_disks_in_vm) != 1: test.fail("Test disk not prepared in vm") # Use the newly added disk as the test disk test_disk_in_vm = "/dev/" + new_disks_in_vm[0] vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_disks = list(vmxml.get_disk_all().keys()) checkpoint_list = [] backup_file_list = [] # Prepare backup xml backup_params = {"backup_mode": "pull"} # Set libvirt default nbd export name and bitmap name nbd_export_name = original_disk_target nbd_bitmap_name = "backup-" + original_disk_target backup_server_dict = {} if nbd_protocol == "unix": backup_server_dict["transport"] = "unix" backup_server_dict["socket"] = nbd_socket else: backup_server_dict["name"] = nbd_hostname backup_server_dict["port"] = nbd_tcp_port backup_params["backup_server"] = backup_server_dict backup_disk_xmls = [] for vm_disk in vm_disks: backup_disk_params = {"disk_name": vm_disk} if vm_disk != original_disk_target: backup_disk_params["enable_backup"] = "no" else: backup_disk_params["enable_backup"] = "yes" backup_disk_params["disk_type"] = scratch_type # Prepare nbd scratch file/dev params scratch_params = {"attrs": {}} scratch_path = None if scratch_type == "file": scratch_file_name = "scratch_file" scratch_path = os.path.join(tmp_dir, scratch_file_name) if reuse_scratch_file: libvirt.create_local_disk("file", scratch_path, original_disk_size, "qcow2") scratch_params["attrs"]["file"] = scratch_path elif scratch_type == "block": scratch_path = libvirt.setup_or_cleanup_iscsi( is_setup=True, image_size=scratch_blkdev_size) scratch_params["attrs"]["dev"] = scratch_path else: test.fail("We do not support backup scratch type: '%s'" % scratch_type) if scratch_luks_encrypted: encryption_dict = { "encryption": "luks", "secret": { "type": "passphrase", "uuid": luks_secret_uuid } } scratch_params["encryption"] = encryption_dict logging.debug("scratch params: %s", scratch_params) backup_disk_params["backup_scratch"] = scratch_params backup_disk_xml = utils_backup.create_backup_disk_xml( backup_disk_params) backup_disk_xmls.append(backup_disk_xml) logging.debug("disk list %s", backup_disk_xmls) backup_xml = utils_backup.create_backup_xml(backup_params, backup_disk_xmls) logging.debug("Backup Xml: %s", backup_xml) # Prepare checkpoint xml checkpoint_name = "checkpoint" checkpoint_list.append(checkpoint_name) cp_params = {"checkpoint_name": checkpoint_name} cp_params["checkpoint_desc"] = params.get("checkpoint_desc", "desc of cp") disk_param_list = [] for vm_disk in vm_disks: cp_disk_param = {"name": vm_disk} if vm_disk != original_disk_target: cp_disk_param["checkpoint"] = "no" else: cp_disk_param["checkpoint"] = "bitmap" cp_disk_bitmap = params.get("cp_disk_bitmap") if cp_disk_bitmap: cp_disk_param["bitmap"] = cp_disk_bitmap disk_param_list.append(cp_disk_param) checkpoint_xml = utils_backup.create_checkpoint_xml( cp_params, disk_param_list) logging.debug("Checkpoint Xml: %s", checkpoint_xml) # Generate some random data in vm's test disk def dd_data_to_testdisk(): """ Generate some data to vm's test disk """ dd_count = "1" dd_seek = "10" dd_bs = "1M" session = vm.wait_for_login() utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs, dd_seek, dd_count) session.close() dd_data_to_testdisk() # Start backup backup_options = backup_xml.xml + " " + checkpoint_xml.xml if reuse_scratch_file: backup_options += " --reuse-external" backup_result = virsh.backup_begin(vm_name, backup_options, debug=True) # Start to monitor block-threshold of backup disk's scratch file/dev backup_disk_index = get_backup_disk_index(vm_name, original_disk_target) if not backup_disk_index: test.fail("Backup xml has no index for disks.") backup_disk_obj = original_disk_target + "[%s]" % backup_disk_index virsh.domblkthreshold( vm_name, original_disk_target + "[%s]" % backup_disk_index, usage_threshold) event_cmd = "event %s %s --loop" % (vm_name, event_type) virsh_session.sendline(event_cmd) # Generate some random data to same position of vm's test disk dd_data_to_testdisk() # Check if the block-threshold event captured by monitor if event_type == "block-threshold": event_pattern = (".*block-threshold.*%s.*%s\[%s\].* %s .*" % (vm_name, original_disk_target, backup_disk_index, usage_threshold)) if not utils_misc.wait_for( lambda: is_event_captured(virsh_session, event_pattern), 10): test.fail("Event not captured by event monitor") # Abort backup job virsh.domjobabort(vm_name, debug=True) finally: # Remove checkpoints if "checkpoint_list" in locals() and checkpoint_list: for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync() # Remove libvirt secret if "luks_secret_uuid" in locals(): virsh.secret_undefine(luks_secret_uuid, ignore_status=True) # Remove iscsi devices if scratch_type == "block": libvirt.setup_or_cleanup_iscsi(False) # Remove scratch file if "scratch_path" in locals(): if scratch_type == "file" and os.path.exists(scratch_path): os.remove(scratch_path)
def run(test, params, env): """ Test disk encryption option. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare pool, volume. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def create_pool(p_name, p_type, p_target): """ Define and start a pool. :param p_name. Pool name. :param p_type. Pool type. :param p_target. Pool target path. """ p_xml = pool_xml.PoolXML(pool_type=p_type) p_xml.name = p_name p_xml.target_path = p_target if not os.path.exists(p_target): os.mkdir(p_target) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(p_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(p_name, **virsh_dargs) libvirt.check_exit_status(ret) def create_vol(p_name, target_encrypt_params, vol_params): """ Create volume. :param p_name. Pool name. :param target_encrypt_params encrypt parameters in dict. :param vol_params. Volume parameters dict. :return: True if create successfully. """ volxml = vol_xml.VolXML() v_xml = volxml.new_vol(**vol_params) v_xml.encryption = volxml.new_encryption(**target_encrypt_params) v_xml.xmltreefile.write() ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) def create_secret(vol_path): """ Create secret. :param vol_path. volume path. :return: secret id if create successfully. """ sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "volume secret" sec_xml.usage = 'volume' sec_xml.volume = vol_path sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) # Get secret uuid. try: encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout)[0].lstrip() except IndexError, e: test.error("Fail to get newly created secret uuid") logging.debug("Secret uuid %s", encryption_uuid) # Set secret value. secret_string = base64.b64encode(secret_password_no_encoded) ret = virsh.secret_set_value(encryption_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(ret) return encryption_uuid