def build_disk_xml(disk_img, disk_format, host_ip): """ Try to rebuild disk xml """ # Delete existed disks first. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks_dev = vmxml.get_devices(device_type="disk") for disk in disks_dev: vmxml.del_device(disk) if default_pool: disk_xml = Disk(type_name="file") else: disk_xml = Disk(type_name="network") disk_xml.device = "disk" driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"} if driver_iothread: driver_dict.update({"iothread": driver_iothread}) disk_xml.driver = driver_dict disk_xml.target = {"dev": "vda", "bus": "virtio"} if default_pool: utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool, "glusterfs") utils.run("setsebool virt_use_fusefs on") virsh.pool_refresh("default") source_dict = {"file": "%s/%s" % (default_pool, disk_img)} disk_xml.source = disk_xml.new_disk_source( **{"attrs": source_dict}) else: source_dict = { "protocol": "gluster", "name": "%s/%s" % (vol_name, disk_img) } host_dict = {"name": host_ip, "port": "24007"} if transport: host_dict.update({"transport": transport}) disk_xml.source = disk_xml.new_disk_source(**{ "attrs": source_dict, "hosts": [host_dict] }) # set domain options if dom_iothreads: try: vmxml.iothreads = int(dom_iothreads) except ValueError: # 'iothreads' may not invalid number in negative tests logging.debug("Can't convert '%s' to integer type" % dom_iothreads) # Add the new disk xml. vmxml.add_device(disk_xml) vmxml.sync()
def build_disk_xml(disk_img, disk_format, host_ip): """ Try to rebuild disk xml """ # Delete existed disks first. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks_dev = vmxml.get_devices(device_type="disk") for disk in disks_dev: vmxml.del_device(disk) if default_pool: disk_xml = Disk(type_name="file") else: disk_xml = Disk(type_name="network") disk_xml.device = "disk" driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"} if driver_iothread: driver_dict.update({"iothread": driver_iothread}) disk_xml.driver = driver_dict disk_xml.target = {"dev": "vda", "bus": "virtio"} if default_pool: utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool, "glusterfs") process.run("setsebool virt_use_fusefs on", shell=True) virsh.pool_refresh("default") source_dict = {"file": "%s/%s" % (default_pool, disk_img)} disk_xml.source = disk_xml.new_disk_source( **{"attrs": source_dict}) else: source_dict = {"protocol": "gluster", "name": "%s/%s" % (vol_name, disk_img)} host_dict = {"name": host_ip, "port": "24007"} if transport: host_dict.update({"transport": transport}) disk_xml.source = disk_xml.new_disk_source( **{"attrs": source_dict, "hosts": [host_dict]}) # set domain options if dom_iothreads: try: vmxml.iothreads = int(dom_iothreads) except ValueError: # 'iothreads' may not invalid number in negative tests logging.debug("Can't convert '%s' to integer type" % dom_iothreads) # Add the new disk xml. vmxml.add_device(disk_xml) vmxml.sync()
def build_disk_xml(disk_img, disk_format, host_ip): """ Try to rebuild disk xml """ # Delete existed disks first. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks_dev = vmxml.get_devices(device_type="disk") for disk in disks_dev: vmxml.del_device(disk) if default_pool: disk_xml = Disk(type_name="file") else: disk_xml = Disk(type_name="network") disk_xml.device = "disk" driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"} disk_xml.driver = driver_dict disk_xml.target = {"dev": "vda", "bus": "virtio"} if default_pool: utils.run( "mount -t glusterfs %s:%s %s; setsebool virt_use_fusefs on" % (host_ip, vol_name, default_pool)) virsh.pool_refresh("default") source_dict = {"file": "%s/%s" % (default_pool, disk_img)} disk_xml.source = disk_xml.new_disk_source( **{"attrs": source_dict}) else: source_dict = { "protocol": "gluster", "name": "%s/%s" % (vol_name, disk_img) } host_dict = {"name": host_ip, "port": "24007"} if transport: host_dict.update({"transport": transport}) disk_xml.source = disk_xml.new_disk_source(**{ "attrs": source_dict, "hosts": [host_dict] }) # Add the new disk xml. vmxml.add_device(disk_xml) vmxml.sync()
def build_disk_xml(disk_img, disk_format, host_ip): """ Try to rebuild disk xml """ # Delete existed disks first. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks_dev = vmxml.get_devices(device_type="disk") for disk in disks_dev: vmxml.del_device(disk) if default_pool: disk_xml = Disk(type_name="file") else: disk_xml = Disk(type_name="network") disk_xml.device = "disk" driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"} disk_xml.driver = driver_dict disk_xml.target = {"dev": "vda", "bus": "virtio"} if default_pool: utils.run("mount -t glusterfs %s:%s %s; setsebool virt_use_fusefs on" % (host_ip, vol_name, default_pool)) virsh.pool_refresh("default") source_dict = {"file": "%s/%s" % (default_pool, disk_img)} disk_xml.source = disk_xml.new_disk_source( **{"attrs": source_dict}) else: source_dict = {"protocol": "gluster", "name": "%s/%s" % (vol_name, disk_img)} host_dict = {"name": host_ip, "port": "24007"} if transport: host_dict.update({"transport": transport}) disk_xml.source = disk_xml.new_disk_source( **{"attrs": source_dict, "hosts": [host_dict]}) # Add the new disk xml. vmxml.add_device(disk_xml) vmxml.sync()
def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(pool_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)", str(cmd_result.stdout.strip())) try: return vol_list[1] except IndexError: return None
def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)", str(cmd_result.stdout.strip())) if len(vol_list) > 1: return vol_list[1] else: return None
def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout)) if len(vol_list) > 1: return vol_list[1] else: return None
def run(test, params, env): """ Test the virsh pool commands with acl, initiate a pool then do following operations. (1) Undefine a given type pool (2) Define the pool from xml (3) Build given type pool (4) Start pool (5) Destroy pool (6) Refresh pool after start it (7) Run vol-list with the pool (9) Delete pool For negative cases, redo failed step to make the case run continue. Run cleanup at last restore env. """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name vol_path = os.path.join(pool_target, vol_name) define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") start_acl = "yes" == params.get("start_acl", "no") destroy_acl = "yes" == params.get("destroy_acl", "no") build_acl = "yes" == params.get("build_acl", "no") delete_acl = "yes" == params.get("delete_acl", "no") refresh_acl = "yes" == params.get("refresh_acl", "no") vol_list_acl = "yes" == params.get("vol_list_acl", "no") src_pool_error = "yes" == params.get("src_pool_error", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") start_error = "yes" == params.get("start_error", "no") destroy_error = "yes" == params.get("destroy_error", "no") build_error = "yes" == params.get("build_error", "no") delete_error = "yes" == params.get("delete_error", "no") refresh_error = "yes" == params.get("refresh_error", "no") vol_list_error = "yes" == params.get("vol_list_error", "no") # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm cleanup_env = [False, False, False] # libvirt acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" + " libvirt version.") acl_dargs = { 'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True } def check_exit_status(result, expect_error=False): """ Check the exit status of virsh commands. :param result: Virsh command result object :param expect_error: Boolean value, expect command success or fail """ if not expect_error: if result.exit_status != 0: raise error.TestFail(result.stderr) else: logging.debug("Command output:\n%s", result.stdout.strip()) elif expect_error and result.exit_status == 0: raise error.TestFail("Expect fail, but run successfully.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) # Run Testcase try: _pool = libvirt_storage.StoragePool() # Init a pool for test result = utils_test.libvirt.define_pool(pool_name, pool_type, pool_target, cleanup_env) check_exit_status(result, src_pool_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (1) # Undefine pool if undefine_acl: result = virsh.pool_undefine(pool_name, **acl_dargs) else: result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result, undefine_error) if undefine_error: check_pool_list(pool_name, "--all", False) # Redo under negative case to keep case continue result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) else: check_pool_list(pool_name, "--all", True) # Step (2) # Define pool from XML file if define_acl: result = virsh.pool_define(pool_xml, **acl_dargs) else: result = virsh.pool_define(pool_xml) check_exit_status(result, define_error) if define_error: # Redo under negative case to keep case continue result = virsh.pool_define(pool_xml) check_exit_status(result) # Step (3) # Buid pool, this step may fail for 'disk' and 'logical' types pool if pool_type not in ["disk", "logical"]: option = "" # Options --overwrite and --no-overwrite can only be used to # build a filesystem pool, but it will fail for now # if pool_type == "fs": # option = '--overwrite' if build_acl: result = virsh.pool_build(pool_name, option, **acl_dargs) else: result = virsh.pool_build(pool_name, option, ignore_status=True) check_exit_status(result, build_error) if build_error: # Redo under negative case to keep case continue result = virsh.pool_build(pool_name, option, ignore_status=True) check_exit_status(result) # Step (4) # Pool start if start_acl: result = virsh.pool_start(pool_name, **acl_dargs) else: result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result, start_error) if start_error: # Redo under negative case to keep case continue result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (5) # Pool destroy if destroy_acl: result = virsh.pool_destroy(pool_name, **acl_dargs) else: result = virsh.pool_destroy(pool_name) if result: if destroy_error: raise error.TestFail("Expect fail, but run successfully.") else: if not destroy_error: raise error.TestFail("Pool %s destroy failed, not expected." % pool_name) else: # Redo under negative case to keep case continue if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (6) # Pool refresh for 'dir' type pool # Pool start result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) if pool_type == "dir": os.mknod(vol_path) if refresh_acl: result = virsh.pool_refresh(pool_name, **acl_dargs) else: result = virsh.pool_refresh(pool_name) check_exit_status(result, refresh_error) # Step (7) # Pool vol-list if vol_list_acl: result = virsh.vol_list(pool_name, **acl_dargs) else: result = virsh.vol_list(pool_name) check_exit_status(result, vol_list_error) # Step (8) # Pool delete for 'dir' type pool if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) if delete_acl: result = virsh.pool_delete(pool_name, **acl_dargs) else: result = virsh.pool_delete(pool_name, ignore_status=True) check_exit_status(result, delete_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if not delete_error: if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = utils.system_output(cmd) lv_utils.vg_remove(vg_name) utils.run("pvremove %s" % pv_name) if cleanup_env[1]: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utils_test.libvirt.setup_or_cleanup_nfs(False)
def run(test, params, env): """ 1. Create a pool 2. Create n number of volumes(vol-create-as) 3. Check the volume details from the following commands vol-info vol-key vol-list vol-name vol-path vol-pool qemu-img info 4. Delete the volume and check in vol-list 5. Repeat the steps for number of volumes given 6. Delete the pool and target TODO: Handle negative testcases """ def delete_volume(expected_vol): """ Deletes Volume """ pool_name = expected_vol['pool_name'] vol_name = expected_vol['name'] pv = libvirt_storage.PoolVolume(pool_name) if not pv.delete_volume(vol_name): test.fail("Delete volume failed." % vol_name) else: logging.debug("Volume: %s successfully deleted on pool: %s", vol_name, pool_name) def get_vol_list(pool_name, vol_name): """ Parse the volume list """ output = virsh.vol_list(pool_name, "--details") rg = re.compile( r'^(\S+)\s+(\S+)\s+(\S+)\s+(\d+.\d+\s\S+)\s+(\d+.\d+.*)') vol = {} vols = [] volume_detail = None for line in output.stdout.splitlines(): match = re.search(rg, line.lstrip()) if match is not None: vol['name'] = match.group(1) vol['path'] = match.group(2) vol['type'] = match.group(3) vol['capacity'] = match.group(4) vol['allocation'] = match.group(5) vols.append(vol) vol = {} for volume in vols: if volume['name'] == vol_name: volume_detail = volume return volume_detail def norm_capacity(capacity): """ Normalize the capacity values to bytes """ # Normaize all values to bytes norm_capacity = {} des = { 'B': 'B', 'bytes': 'B', 'b': 'B', 'kib': 'K', 'KiB': 'K', 'K': 'K', 'k': 'K', 'KB': 'K', 'mib': 'M', 'MiB': 'M', 'M': 'M', 'm': 'M', 'MB': 'M', 'gib': 'G', 'GiB': 'G', 'G': 'G', 'g': 'G', 'GB': 'G', 'Gb': 'G', 'tib': 'T', 'TiB': 'T', 'TB': 'T', 'T': 'T', 't': 'T' } val = { 'B': 1, 'K': 1024, 'M': 1048576, 'G': 1073741824, 'T': 1099511627776 } reg_list = re.compile(r'(\S+)\s(\S+)') match_list = re.search(reg_list, capacity['list']) if match_list is not None: mem_value = float(match_list.group(1)) norm = val[des[match_list.group(2)]] norm_capacity['list'] = int(mem_value * norm) else: test.fail("Error in parsing capacity value in" " virsh vol-list") match_info = re.search(reg_list, capacity['info']) if match_info is not None: mem_value = float(match_info.group(1)) norm = val[des[match_list.group(2)]] norm_capacity['info'] = int(mem_value * norm) else: test.fail("Error in parsing capacity value " "in virsh vol-info") norm_capacity['qemu_img'] = capacity['qemu_img'] norm_capacity['xml'] = int(capacity['xml']) return norm_capacity def check_vol(expected, avail=True): """ Checks the expected volume details with actual volume details from vol-dumpxml vol-list vol-info vol-key vol-path qemu-img info """ error_count = 0 pv = libvirt_storage.PoolVolume(expected['pool_name']) vol_exists = pv.volume_exists(expected['name']) if vol_exists: if not avail: error_count += 1 logging.error("Expect volume %s not exists but find it", expected['name']) return error_count else: if avail: error_count += 1 logging.error("Expect volume %s exists but not find it", expected['name']) return error_count else: logging.info("Volume %s checked successfully for deletion", expected['name']) return error_count actual_list = get_vol_list(expected['pool_name'], expected['name']) actual_info = pv.volume_info(expected['name']) # Get values from vol-dumpxml volume_xml = vol_xml.VolXML.new_from_vol_dumpxml( expected['name'], expected['pool_name']) # Check against virsh vol-key vol_key = virsh.vol_key(expected['name'], expected['pool_name']) if vol_key.stdout.strip() != volume_xml.key: logging.error( "Volume key is mismatch \n%s" "Key from xml: %s\nKey from command: %s", expected['name'], volume_xml.key, vol_key) error_count += 1 else: logging.debug( "virsh vol-key for volume: %s successfully" " checked against vol-dumpxml", expected['name']) # Check against virsh vol-name get_vol_name = virsh.vol_name(expected['path']) if get_vol_name.stdout.strip() != expected['name']: logging.error( "Volume name mismatch\n" "Expected name: %s\nOutput of vol-name: %s", expected['name'], get_vol_name) # Check against virsh vol-path vol_path = virsh.vol_path(expected['name'], expected['pool_name']) if expected['path'] != vol_path.stdout.strip(): logging.error( "Volume path mismatch for volume: %s\n" "Expected path: %s\nOutput of vol-path: %s\n", expected['name'], expected['path'], vol_path) error_count += 1 else: logging.debug( "virsh vol-path for volume: %s successfully checked" " against created volume path", expected['name']) # Check path against virsh vol-list if expected['path'] != actual_list['path']: logging.error( "Volume path mismatch for volume:%s\n" "Expected Path: %s\nPath from virsh vol-list: %s", expected['name'], expected['path'], actual_list['path']) error_count += 1 else: logging.debug( "Path of volume: %s from virsh vol-list " "successfully checked against created " "volume path", expected['name']) # Check path against virsh vol-dumpxml if expected['path'] != volume_xml.path: logging.error( "Volume path mismatch for volume: %s\n" "Expected Path: %s\nPath from virsh vol-dumpxml: %s", expected['name'], expected['path'], volume_xml.path) error_count += 1 else: logging.debug( "Path of volume: %s from virsh vol-dumpxml " "successfully checked against created volume path", expected['name']) # Check type against virsh vol-list if expected['type'] != actual_list['type']: logging.error( "Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-list: %s", expected['name'], expected['type'], actual_list['type']) error_count += 1 else: logging.debug( "Type of volume: %s from virsh vol-list " "successfully checked against the created " "volume type", expected['name']) # Check type against virsh vol-info if expected['type'] != actual_info['Type']: logging.error( "Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-info: %s", expected['name'], expected['type'], actual_info['Type']) error_count += 1 else: logging.debug( "Type of volume: %s from virsh vol-info successfully" " checked against the created volume type", expected['name']) # Check name against virsh vol-info if expected['name'] != actual_info['Name']: logging.error( "Volume name mismatch for volume: %s\n" "Expected name: %s\n Name from vol-info: %s", expected['name'], expected['name'], actual_info['Name']) error_count += 1 else: logging.debug( "Name of volume: %s from virsh vol-info successfully" " checked against the created volume name", expected['name']) # Check format from against qemu-img info img_info = utils_misc.get_image_info(expected['path']) if expected['format']: if expected['format'] != img_info['format']: logging.error( "Volume format mismatch for volume: %s\n" "Expected format: %s\n" "Format from qemu-img info: %s", expected['name'], expected['format'], img_info['format']) error_count += 1 else: logging.debug( "Format of volume: %s from qemu-img info " "checked successfully against the created " "volume format", expected['name']) # Check format against vol-dumpxml if expected['format']: if expected['format'] != volume_xml.format: logging.error( "Volume format mismatch for volume: %s\n" "Expected format: %s\n" "Format from vol-dumpxml: %s", expected['name'], expected['format'], volume_xml.format) error_count += 1 else: logging.debug( "Format of volume: %s from virsh vol-dumpxml " "checked successfully against the created" " volume format", expected['name']) logging.info(expected['encrypt_format']) # Check encrypt against vol-dumpxml if expected['encrypt_format']: # As the 'default' format will change to specific valut(qcow), so # just output it here logging.debug("Encryption format of volume '%s' is: %s", expected['name'], volume_xml.encryption.format) # And also output encryption secret uuid secret_uuid = volume_xml.encryption.secret['uuid'] logging.debug("Encryption secret of volume '%s' is: %s", expected['name'], secret_uuid) if expected['encrypt_secret']: if expected['encrypt_secret'] != secret_uuid: logging.error( "Encryption secret mismatch for volume: %s\n" "Expected secret uuid: %s\n" "Secret uuid from vol-dumpxml: %s", expected['name'], expected['encrypt_secret'], secret_uuid) error_count += 1 else: # If no set encryption secret value, automatically # generate a secret value at the time of volume creation logging.debug("Volume encryption secret is %s", secret_uuid) # Check pool name against vol-pool vol_pool = virsh.vol_pool(expected['path']) if expected['pool_name'] != vol_pool.stdout.strip(): logging.error( "Pool name mismatch for volume: %s against" "virsh vol-pool", expected['name']) error_count += 1 else: logging.debug( "Pool name of volume: %s checked successfully" " against the virsh vol-pool", expected['name']) norm_cap = {} capacity = {} capacity['list'] = actual_list['capacity'] capacity['info'] = actual_info['Capacity'] capacity['xml'] = volume_xml.capacity capacity['qemu_img'] = img_info['vsize'] norm_cap = norm_capacity(capacity) delta_size = int(params.get('delta_size', "1024")) if abs(expected['capacity'] - norm_cap['list']) > delta_size: logging.error( "Capacity mismatch for volume: %s against virsh" " vol-list\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['list']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " virsh vol-list for volume %s", expected['name']) if abs(expected['capacity'] - norm_cap['info']) > delta_size: logging.error( "Capacity mismatch for volume: %s against virsh" " vol-info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['info']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " virsh vol-info for volume %s", expected['name']) if abs(expected['capacity'] - norm_cap['xml']) > delta_size: logging.error( "Capacity mismatch for volume: %s against virsh" " vol-dumpxml\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['xml']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " virsh vol-dumpxml for volume: %s", expected['name']) if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size: logging.error( "Capacity mismatch for volume: %s against " "qemu-img info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['qemu_img']) error_count += 1 else: logging.debug( "Capacity value checked successfully against" " qemu-img info for volume: %s", expected['name']) return error_count def get_all_secrets(): """ Return all exist libvirt secrets uuid in a list """ secret_list = [] secrets = virsh.secret_list().stdout.strip() for secret in secrets.splitlines()[2:]: secret_list.append(secret.strip().split()[0]) return secret_list # Initialize the variables pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("volume_name") vol_number = int(params.get("number_of_volumes", "2")) capacity = params.get("volume_size", "1048576") allocation = params.get("volume_allocation", "1048576") vol_format = params.get("volume_format") source_name = params.get("gluster_source_name", "gluster-vol1") source_path = params.get("gluster_source_path", "/") encrypt_format = params.get("vol_encrypt_format") encrypt_secret = params.get("encrypt_secret") emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") try: str_capa = utils_misc.normalize_data_size(capacity, "B") int_capa = int(str(str_capa).split('.')[0]) except ValueError: test.error("Translate size %s to 'B' failed" % capacity) try: str_capa = utils_misc.normalize_data_size(allocation, "B") int_allo = int(str(str_capa).split('.')[0]) except ValueError: test.error("Translate size %s to 'B' failed" % allocation) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Get exists libvirt secrets before test ori_secrets = get_all_secrets() expected_vol = {} vol_type = 'file' if pool_type in ['disk', 'logical']: vol_type = 'block' if pool_type == 'gluster': vol_type = 'network' logging.debug( "Debug:\npool_name:%s\npool_type:%s\npool_target:%s\n" "vol_name:%s\nvol_number:%s\ncapacity:%s\nallocation:%s\n" "vol_format:%s", pool_name, pool_type, pool_target, vol_name, vol_number, capacity, allocation, vol_format) libv_pvt = utlv.PoolVolumeTest(test, params) # Run Testcase total_err_count = 0 try: # Create a new pool libv_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, source_name=source_name, source_path=source_path) for i in range(vol_number): volume_name = "%s_%d" % (vol_name, i) expected_vol['pool_name'] = pool_name expected_vol['pool_type'] = pool_type expected_vol['pool_target'] = pool_target expected_vol['capacity'] = int_capa expected_vol['allocation'] = int_allo expected_vol['format'] = vol_format expected_vol['name'] = volume_name expected_vol['type'] = vol_type expected_vol['encrypt_format'] = encrypt_format expected_vol['encrypt_secret'] = encrypt_secret # Creates volume if pool_type != "gluster": expected_vol['path'] = pool_target + '/' + volume_name new_volxml = vol_xml.VolXML() new_volxml.name = volume_name new_volxml.capacity = int_capa new_volxml.allocation = int_allo if vol_format: new_volxml.format = vol_format encrypt_dict = {} if encrypt_format: encrypt_dict.update({"format": encrypt_format}) if encrypt_secret: encrypt_dict.update({"secret": {'uuid': encrypt_secret}}) if encrypt_dict: new_volxml.encryption = new_volxml.new_encryption( **encrypt_dict) logging.debug("Volume XML for creation:\n%s", str(new_volxml)) virsh.vol_create(pool_name, new_volxml.xml, debug=True) else: ip_addr = utlv.get_host_ipv4_addr() expected_vol['path'] = "gluster://%s/%s/%s" % ( ip_addr, source_name, volume_name) process.run("qemu-img create -f %s %s %s" % (vol_format, expected_vol['path'], capacity), shell=True) virsh.pool_refresh(pool_name) # Check volumes total_err_count += check_vol(expected_vol) # Delete volume and check for results delete_volume(expected_vol) total_err_count += check_vol(expected_vol, False) if total_err_count > 0: test.fail("Get %s errors when checking volume" % total_err_count) finally: # Clean up for sec in get_all_secrets(): if sec not in ori_secrets: virsh.secret_undefine(sec) try: libv_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, source_name=source_name) except test.fail as detail: logging.error(str(detail)) if multipathd_status: multipathd.start()
def run(test, params, env): """ 1. Create a pool 2. Create n number of volumes(vol-create-as) 3. Check the volume details from the following commands vol-info vol-key vol-list vol-name vol-path vol-pool qemu-img info 4. Delete the volume and check in vol-list 5. Repeat the steps for number of volumes given 6. Delete the pool and target TODO: Handle negative testcases """ def delete_volume(expected_vol): """ Deletes Volume """ pool_name = expected_vol['pool_name'] vol_name = expected_vol['name'] pv = libvirt_storage.PoolVolume(pool_name) if not pv.delete_volume(vol_name): raise error.TestFail("Delete volume failed." % vol_name) else: logging.debug("Volume: %s successfully deleted on pool: %s", vol_name, pool_name) def get_vol_list(pool_name, vol_name): """ Parse the volume list """ output = virsh.vol_list(pool_name, "--details") rg = re.compile( r'^(\S+)\s+(\S+)\s+(\S+)\s+(\d+.\d+\s\S+)\s+(\d+.\d+.*)') vol = {} vols = [] volume_detail = None for line in output.stdout.splitlines(): match = re.search(rg, line.lstrip()) if match is not None: vol['name'] = match.group(1) vol['path'] = match.group(2) vol['type'] = match.group(3) vol['capacity'] = match.group(4) vol['allocation'] = match.group(5) vols.append(vol) vol = {} for volume in vols: if volume['name'] == vol_name: volume_detail = volume return volume_detail def norm_capacity(capacity): """ Normalize the capacity values to bytes """ # Normaize all values to bytes norm_capacity = {} des = {'B': 'B', 'bytes': 'B', 'b': 'B', 'kib': 'K', 'KiB': 'K', 'K': 'K', 'k': 'K', 'KB': 'K', 'mib': 'M', 'MiB': 'M', 'M': 'M', 'm': 'M', 'MB': 'M', 'gib': 'G', 'GiB': 'G', 'G': 'G', 'g': 'G', 'GB': 'G', 'Gb': 'G', 'tib': 'T', 'TiB': 'T', 'TB': 'T', 'T': 'T', 't': 'T' } val = {'B': 1, 'K': 1024, 'M': 1048576, 'G': 1073741824, 'T': 1099511627776 } reg_list = re.compile(r'(\S+)\s(\S+)') match_list = re.search(reg_list, capacity['list']) if match_list is not None: mem_value = float(match_list.group(1)) norm = val[des[match_list.group(2)]] norm_capacity['list'] = int(mem_value * norm) else: raise error.TestFail("Error in parsing capacity value in" " virsh vol-list") match_info = re.search(reg_list, capacity['info']) if match_info is not None: mem_value = float(match_info.group(1)) norm = val[des[match_list.group(2)]] norm_capacity['info'] = int(mem_value * norm) else: raise error.TestFail("Error in parsing capacity value " "in virsh vol-info") norm_capacity['qemu_img'] = capacity['qemu_img'] norm_capacity['xml'] = int(capacity['xml']) return norm_capacity def check_vol(expected, avail=True): """ Checks the expected volume details with actual volume details from vol-dumpxml vol-list vol-info vol-key vol-path qemu-img info """ error_count = 0 pv = libvirt_storage.PoolVolume(expected['pool_name']) vol_exists = pv.volume_exists(expected['name']) if vol_exists: if not avail: error_count += 1 logging.error("Expect volume %s not exists but find it", expected['name']) return error_count else: if avail: error_count += 1 logging.error("Expect volume %s exists but not find it", expected['name']) return error_count else: logging.info("Volume %s checked successfully for deletion", expected['name']) return error_count actual_list = get_vol_list(expected['pool_name'], expected['name']) actual_info = pv.volume_info(expected['name']) # Get values from vol-dumpxml volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(expected['name'], expected['pool_name']) # Check against virsh vol-key vol_key = virsh.vol_key(expected['name'], expected['pool_name']) if vol_key.stdout.strip() != volume_xml.key: logging.error("Volume key is mismatch \n%s" "Key from xml: %s\nKey from command: %s", expected['name'], volume_xml.key, vol_key) error_count += 1 else: logging.debug("virsh vol-key for volume: %s successfully" " checked against vol-dumpxml", expected['name']) # Check against virsh vol-name get_vol_name = virsh.vol_name(expected['path']) if get_vol_name.stdout.strip() != expected['name']: logging.error("Volume name mismatch\n" "Expected name: %s\nOutput of vol-name: %s", expected['name'], get_vol_name) # Check against virsh vol-path vol_path = virsh.vol_path(expected['name'], expected['pool_name']) if expected['path'] != vol_path.stdout.strip(): logging.error("Volume path mismatch for volume: %s\n" "Expected path: %s\nOutput of vol-path: %s\n", expected['name'], expected['path'], vol_path) error_count += 1 else: logging.debug("virsh vol-path for volume: %s successfully checked" " against created volume path", expected['name']) # Check path against virsh vol-list if expected['path'] != actual_list['path']: logging.error("Volume path mismatch for volume:%s\n" "Expected Path: %s\nPath from virsh vol-list: %s", expected['name'], expected['path'], actual_list['path']) error_count += 1 else: logging.debug("Path of volume: %s from virsh vol-list " "successfully checked against created " "volume path", expected['name']) # Check path against virsh vol-dumpxml if expected['path'] != volume_xml.path: logging.error("Volume path mismatch for volume: %s\n" "Expected Path: %s\nPath from virsh vol-dumpxml: %s", expected['name'], expected['path'], volume_xml.path) error_count += 1 else: logging.debug("Path of volume: %s from virsh vol-dumpxml " "successfully checked against created volume path", expected['name']) # Check type against virsh vol-list if expected['type'] != actual_list['type']: logging.error("Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-list: %s", expected['name'], expected['type'], actual_list['type']) error_count += 1 else: logging.debug("Type of volume: %s from virsh vol-list " "successfully checked against the created " "volume type", expected['name']) # Check type against virsh vol-info if expected['type'] != actual_info['Type']: logging.error("Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-info: %s", expected['name'], expected['type'], actual_info['Type']) error_count += 1 else: logging.debug("Type of volume: %s from virsh vol-info successfully" " checked against the created volume type", expected['name']) # Check name against virsh vol-info if expected['name'] != actual_info['Name']: logging.error("Volume name mismatch for volume: %s\n" "Expected name: %s\n Name from vol-info: %s", expected['name'], expected['name'], actual_info['Name']) error_count += 1 else: logging.debug("Name of volume: %s from virsh vol-info successfully" " checked against the created volume name", expected['name']) # Check format from against qemu-img info img_info = utils_misc.get_image_info(expected['path']) if expected['format']: if expected['format'] != img_info['format']: logging.error("Volume format mismatch for volume: %s\n" "Expected format: %s\n" "Format from qemu-img info: %s", expected['name'], expected['format'], img_info['format']) error_count += 1 else: logging.debug("Format of volume: %s from qemu-img info " "checked successfully against the created " "volume format", expected['name']) # Check format against vol-dumpxml if expected['format']: if expected['format'] != volume_xml.format: logging.error("Volume format mismatch for volume: %s\n" "Expected format: %s\n" "Format from vol-dumpxml: %s", expected['name'], expected['format'], volume_xml.format) error_count += 1 else: logging.debug("Format of volume: %s from virsh vol-dumpxml " "checked successfully against the created" " volume format", expected['name']) logging.info(expected['encrypt_format']) # Check encrypt against vol-dumpxml if expected['encrypt_format']: # As the 'default' format will change to specific valut(qcow), so # just output it here logging.debug("Encryption format of volume '%s' is: %s", expected['name'], volume_xml.encryption.format) # And also output encryption secret uuid secret_uuid = volume_xml.encryption.secret['uuid'] logging.debug("Encryption secret of volume '%s' is: %s", expected['name'], secret_uuid) if expected['encrypt_secret']: if expected['encrypt_secret'] != secret_uuid: logging.error("Encryption secret mismatch for volume: %s\n" "Expected secret uuid: %s\n" "Secret uuid from vol-dumpxml: %s", expected['name'], expected['encrypt_secret'], secret_uuid) error_count += 1 else: # If no set encryption secret value, automatically # generate a secret value at the time of volume creation logging.debug("Volume encryption secret is %s", secret_uuid) # Check pool name against vol-pool vol_pool = virsh.vol_pool(expected['path']) if expected['pool_name'] != vol_pool.stdout.strip(): logging.error("Pool name mismatch for volume: %s against" "virsh vol-pool", expected['name']) error_count += 1 else: logging.debug("Pool name of volume: %s checked successfully" " against the virsh vol-pool", expected['name']) norm_cap = {} capacity = {} capacity['list'] = actual_list['capacity'] capacity['info'] = actual_info['Capacity'] capacity['xml'] = volume_xml.capacity capacity['qemu_img'] = img_info['vsize'] norm_cap = norm_capacity(capacity) delta_size = params.get('delta_size', "1024") if abs(expected['capacity'] - norm_cap['list']) > delta_size: logging.error("Capacity mismatch for volume: %s against virsh" " vol-list\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['list']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-list for volume %s", expected['name']) if abs(expected['capacity'] - norm_cap['info']) > delta_size: logging.error("Capacity mismatch for volume: %s against virsh" " vol-info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['info']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-info for volume %s", expected['name']) if abs(expected['capacity'] - norm_cap['xml']) > delta_size: logging.error("Capacity mismatch for volume: %s against virsh" " vol-dumpxml\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['xml']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-dumpxml for volume: %s", expected['name']) if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size: logging.error("Capacity mismatch for volume: %s against " "qemu-img info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['qemu_img']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " qemu-img info for volume: %s", expected['name']) return error_count def get_all_secrets(): """ Return all exist libvirt secrets uuid in a list """ secret_list = [] secrets = virsh.secret_list().stdout.strip() for secret in secrets.splitlines()[2:]: secret_list.append(secret.strip().split()[0]) return secret_list # Initialize the variables pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("volume_name") vol_number = int(params.get("number_of_volumes", "2")) capacity = params.get("volume_size", "1048576") allocation = params.get("volume_allocation", "1048576") vol_format = params.get("volume_format") source_name = params.get("gluster_source_name", "gluster-vol1") source_path = params.get("gluster_source_path", "/") encrypt_format = params.get("vol_encrypt_format") encrypt_secret = params.get("encrypt_secret") emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") try: str_capa = utils_misc.normalize_data_size(capacity, "B") int_capa = int(str(str_capa).split('.')[0]) except ValueError: raise error.TestError("Translate size %s to 'B' failed" % capacity) try: str_capa = utils_misc.normalize_data_size(allocation, "B") int_allo = int(str(str_capa).split('.')[0]) except ValueError: raise error.TestError("Translate size %s to 'B' failed" % allocation) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Get exists libvirt secrets before test ori_secrets = get_all_secrets() expected_vol = {} vol_type = 'file' if pool_type in ['disk', 'logical']: vol_type = 'block' if pool_type == 'gluster': vol_type = 'network' logging.debug("Debug:\npool_name:%s\npool_type:%s\npool_target:%s\n" "vol_name:%s\nvol_number:%s\ncapacity:%s\nallocation:%s\n" "vol_format:%s", pool_name, pool_type, pool_target, vol_name, vol_number, capacity, allocation, vol_format) libv_pvt = utlv.PoolVolumeTest(test, params) # Run Testcase total_err_count = 0 try: # Create a new pool libv_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, source_name=source_name, source_path=source_path) for i in range(vol_number): volume_name = "%s_%d" % (vol_name, i) expected_vol['pool_name'] = pool_name expected_vol['pool_type'] = pool_type expected_vol['pool_target'] = pool_target expected_vol['capacity'] = int_capa expected_vol['allocation'] = int_allo expected_vol['format'] = vol_format expected_vol['name'] = volume_name expected_vol['type'] = vol_type expected_vol['encrypt_format'] = encrypt_format expected_vol['encrypt_secret'] = encrypt_secret # Creates volume if pool_type != "gluster": expected_vol['path'] = pool_target + '/' + volume_name new_volxml = vol_xml.VolXML() new_volxml.name = volume_name new_volxml.capacity = int_capa new_volxml.allocation = int_allo if vol_format: new_volxml.format = vol_format encrypt_dict = {} if encrypt_format: encrypt_dict.update({"format": encrypt_format}) if encrypt_secret: encrypt_dict.update({"secret": {'uuid': encrypt_secret}}) if encrypt_dict: new_volxml.encryption = new_volxml.new_encryption(**encrypt_dict) logging.debug("Volume XML for creation:\n%s", str(new_volxml)) virsh.vol_create(pool_name, new_volxml.xml, debug=True) else: ip_addr = utlv.get_host_ipv4_addr() expected_vol['path'] = "gluster://%s/%s/%s" % (ip_addr, source_name, volume_name) utils.run("qemu-img create -f %s %s %s" % (vol_format, expected_vol['path'], capacity)) virsh.pool_refresh(pool_name) # Check volumes total_err_count += check_vol(expected_vol) # Delete volume and check for results delete_volume(expected_vol) total_err_count += check_vol(expected_vol, False) if total_err_count > 0: raise error.TestFail("Get %s errors when checking volume" % total_err_count) finally: # Clean up for sec in get_all_secrets(): if sec not in ori_secrets: virsh.secret_undefine(sec) try: libv_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, source_name=source_name) except error.TestFail, detail: logging.error(str(detail)) if multipathd_status: multipathd.start()
def run(test, params, env): """ Do test for vol-download and vol-upload Basic steps are 1. Create pool with type defined in cfg 2. Create image with writing data in it 3. Get md5 value before operation 4. Do vol-download/upload with options(offset, length) 5. Check md5 value after operation """ pool_type = params.get("vol_download_upload_pool_type") pool_name = params.get("vol_download_upload_pool_name") pool_target = params.get("vol_download_upload_pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_download_upload_vol_name") file_name = params.get("vol_download_upload_file_name") file_path = os.path.join(data_dir.get_tmp_dir(), file_name) offset = params.get("vol_download_upload_offset") length = params.get("vol_download_upload_length") capacity = params.get("vol_download_upload_capacity") allocation = params.get("vol_download_upload_allocation") frmt = params.get("vol_download_upload_format") operation = params.get("vol_download_upload_operation") create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes")) setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") b_luks_encrypt = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} sparse_option_support = "yes" == params.get("sparse_option_support", "yes") with_clusterSize = "yes" == params.get("with_clusterSize") vol_clusterSize = params.get("vol_clusterSize", "64") vol_clusterSize_unit = params.get("vol_clusterSize_unit") vol_format = params.get("vol_format", "qcow2") libvirt_version.is_libvirt_feature_supported(params) # libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.error("API acl test not supported in current" " libvirt version.") # Destroy VM. if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest", pre_disk_vol=["50M"]) # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if pool_type == "disk": vol_name = utlv.new_disk_vol_name(pool_name) if vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % vol_name utlv.update_polkit_rule(params, vol_pat, new_value) if create_vol: if b_luks_encrypt: if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS format not supported in " "current libvirt version") params['sec_volume'] = os.path.join(pool_target, vol_name) luks_sec_uuid = utlv.create_secret(params) ret = virsh.secret_set_value(luks_sec_uuid, encryption_password, encode=True) utlv.check_exit_status(ret) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(capacity) vol_arg['allocation'] = int(allocation) if with_clusterSize: vol_arg['format'] = vol_format vol_arg['clusterSize'] = int(vol_clusterSize) vol_arg['clusterSize_unit'] = vol_clusterSize_unit create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name) virsh.pool_refresh(pool_name, debug=True) vol_list = virsh.vol_list(pool_name, debug=True).stdout.strip() # iscsi volume name is different from others if pool_type == "iscsi": # Due to BZ 1843791, the volume cannot be obtained sometimes. if len(vol_list.splitlines()) < 3: test.fail("Failed to get iscsi type volume.") vol_name = vol_list.split('\n')[2].split()[0] vol_path = virsh.vol_path(vol_name, pool_name, ignore_status=False).stdout.strip() logging.debug("vol_path is %s", vol_path) # Add command options if pool_type is not None: options = " --pool %s" % pool_name if offset is not None: options += " --offset %s" % offset offset = int(offset) else: offset = 0 if length is not None: options += " --length %s" % length length = int(length) else: length = 0 logging.debug("%s options are %s", operation, options) if operation == "upload": # write data to file write_file(file_path) # Set length for calculate the offset + length in the following # func get_pre_post_digest() and digest() if length == 0: length = 1048576 def get_pre_post_digest(): """ Get pre region and post region digest if have offset and length :return: pre digest and post digest """ # Get digest of pre region before offset if offset != 0: digest_pre = digest(vol_path, 0, offset) else: digest_pre = 0 logging.debug("pre region digest read from %s 0-%s is %s", vol_path, offset, digest_pre) # Get digest of post region after offset+length digest_post = digest(vol_path, offset + length, 0) logging.debug("post region digest read from %s %s-0 is %s", vol_path, offset + length, digest_post) return (digest_pre, digest_post) # Get pre and post digest before operation for compare (ori_pre_digest, ori_post_digest) = get_pre_post_digest() ori_digest = digest(file_path, 0, 0) logging.debug("ori digest read from %s is %s", file_path, ori_digest) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume upload result = virsh.vol_upload(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation (aft_pre_digest, aft_post_digest) = get_pre_post_digest() aft_digest = digest(vol_path, offset, length) logging.debug("aft digest read from %s is %s", vol_path, aft_digest) # Compare the pre and post part before and after if ori_pre_digest == aft_pre_digest and \ ori_post_digest == aft_post_digest: logging.info("file pre and aft digest match") else: test.fail("file pre or post digests do not" "match, in %s", operation) if operation == "download": # Write data to volume write_file(vol_path) # Record the digest value before operation ori_digest = digest(vol_path, offset, length) logging.debug("original digest read from %s is %s", vol_path, ori_digest) process.run("touch %s" % file_path, ignore_status=True, shell=True) if setup_libvirt_polkit: process.run("chmod 666 %s" % file_path, ignore_status=True, shell=True) # Do volume download result = virsh.vol_download(vol_name, file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) if result.exit_status == 0: # Get digest after operation aft_digest = digest(file_path, 0, 0) logging.debug("new digest read from %s is %s", file_path, aft_digest) if operation != "mix": if result.exit_status != 0: test.fail("Fail to %s volume: %s" % (operation, result.stderr)) # Compare the change part on volume and file if ori_digest == aft_digest: logging.info("file digests match, volume %s succeed", operation) else: test.fail("file digests do not match, volume %s failed" % operation) if operation == "mix": target = params.get("virt_disk_device_target", "vdb") disk_file_path = os.path.join(pool_target, file_name) # Create one disk xml and attach it to VM. custom_disk_xml = create_disk('file', disk_file_path, 'raw', 'file', 'disk', target, 'virtio') ret = virsh.attach_device(vm_name, custom_disk_xml.xml, flagstr="--config", debug=True) libvirt.check_exit_status(ret) if vm.is_dead(): vm.start() # Write 100M data into disk. data_size = 100 write_disk(test, vm, target, data_size) data_size_in_bytes = data_size * 1024 * 1024 # Refresh directory pool. virsh.pool_refresh(pool_name, debug=True) # Download volume to local with sparse option. download_spare_file = "download-sparse.raw" download_file_path = os.path.join(data_dir.get_tmp_dir(), download_spare_file) options += " --sparse" result = virsh.vol_download(file_name, download_file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) libvirt.check_exit_status(result) #Check download image size. one_g_in_bytes = 1073741824 download_img_info = utils_misc.get_image_info(download_file_path) download_disk_size = int(download_img_info['dsize']) if (download_disk_size < data_size_in_bytes or download_disk_size >= one_g_in_bytes): test.fail("download image size:%d is less than the generated " "data size:%d or greater than or equal to 1G." % (download_disk_size, data_size_in_bytes)) # Create one upload sparse image file. upload_sparse_file = "upload-sparse.raw" upload_file_path = os.path.join(pool_target, upload_sparse_file) libvirt.create_local_disk('file', upload_file_path, '1', 'raw') # Refresh directory pool. virsh.pool_refresh(pool_name, debug=True) # Do volume upload, upload sparse file which download last time. result = virsh.vol_upload(upload_sparse_file, download_file_path, options, unprivileged_user=unpri_user, uri=uri, debug=True) upload_img_info = utils_misc.get_image_info(upload_file_path) upload_disk_size = int(upload_img_info['dsize']) if (upload_disk_size < data_size_in_bytes or upload_disk_size >= one_g_in_bytes): test.fail("upload image size:%d is less than the generated " "data size:%d or greater than or equal to 1G." % (upload_disk_size, data_size_in_bytes)) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest") for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) if os.path.isfile(file_path): os.remove(file_path)
def run(test, params, env): """ Test startupPolicy for CD-ROM/floppy/Volume disks. Steps: 1. Prepare disk media image. 2. Setup startupPolicy for a disk. 3. Start the domain. 4. Save the domain. 5. Remove the disk source file and restore the domain. 6. Update startupPolicy for a disk. 7. Destroy the domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) startup_policy = params.get("policy") def create_iscsi_pool(): """ Setup iSCSI target,and create one iSCSI pool. """ libvirt.setup_or_cleanup_iscsi(is_setup=False) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size='1G', chap_user="", chap_passwd="", portal_ip=disk_src_host) # Define an iSCSI pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = pool_name poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iSCSI pool. virsh.pool_destroy(pool_name, **virsh_dargs) cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) def create_volume(pvt, created_vol_name=None): """ Create iSCSI volume. :param pvt: PoolVolumeTest object :param created_vol_name: Created volume name """ try: if pool_type == "iscsi": create_iscsi_pool() else: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) pvt.pre_vol(vol_name=created_vol_name, vol_format=vol_format, capacity=capacity, allocation=None, pool_name=pool_name) except Exception as pool_exception: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **virsh_dargs) test.error("Error occurred when prepare" + "pool xml with message %s:\n" % str(pool_exception)) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(pool_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)", str(cmd_result.stdout.strip())) try: return vol_list[1] except IndexError: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: tmp_vol_name, tmp_vol_path = vol_info else: test.error("Failed to get volume info") process.run('qemu-img create -f qcow2 %s %s' % (tmp_vol_path, '100M'), shell=True) return vol_info def check_disk_source(vm_name, target_dev, expect_value): """ Check the disk source: file and startupPolicy. :param vm_name: Domain name :param target_dev: Disk's target device :param expect_value: Expect value of source file and source startupPolicy """ logging.debug("Expect source file is '%s'", expect_value[0]) logging.debug("Expect source startupPolicy is '%s'", expect_value[1]) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.get_disk_all() source_value = [] try: disk_source = disks[target_dev].find('source') source_value.append(disk_source.get('file')) source_value.append(disk_source.get('startupPolicy')) except KeyError: test.error("No %s in domain %s" % (target_dev, vm_name)) logging.debug("Actual source file is '%s'", source_value[0]) logging.debug("Actual source startupPolicy is '%s'", source_value[1]) if source_value == expect_value: logging.debug("Domain disk XML check pass") else: test.error("Domain disk XML check fail") def create_disk_xml(): """ Create a disk xml file for attaching to a domain. """ if disk_type == "file": process.run("qemu-img create %s %s" % (media_file, image_size), shell=True) disk_params = { 'device_type': device_type, 'type_name': disk_type, 'target_dev': target_dev, 'target_bus': target_bus } if disk_type == "file": disk_params_src = { 'source_protocol': "file", 'source_file': media_file, 'source_startupPolicy': startup_policy } elif disk_type == "volume": disk_params_src = { 'source_pool': pool_name, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_startupPolicy': startup_policy } if pool_type == "iscsi": disk_params_src.update({'source_mode': "host"}) disk_params.update(disk_params_src) disk_xml = libvirt.create_disk_xml(disk_params) shutil.copyfile(disk_xml, disk_xml_file) return disk_xml def check_in_vm(old_parts): """ Check mount/read/write disk in VM. :param old_parts: pre-operated partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) logging.debug("new parted:%s", new_parts) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False if 'sr' not in added_part and 'fd' not in added_part: cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test".format( added_part)) status, output = session.cmd_status_output(cmd) logging.info("Check disk operation in VM:\n%s", output) if status != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_policy_update(origin_policy, policy_list, xml_policy_file, device_type, flag_str): """ Check updated policy after executing virsh update-device. :param origin_policy: the inherit startup policy value. :param policy_list: updated policy list. :param xml_policy_file: xml file for startupPolicy. :param device_type: device type,cdrom or disk.,etc :param flag_str: it can be --config,--live and --persistent. """ for policy in policy_list: xmltreefile = XMLTreeFile(xml_policy_file) try: policy_item = xmltreefile.find('/source') policy_item.set('startupPolicy', policy) except AttributeError as elem_attr: test.error("Fail to find startupPolicy attribute.%s", str(elem_attr)) xmltreefile.write(xml_policy_file, encoding="UTF-8") ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_str, debug=True) if all([device_type == "disk", policy == "requisite"]): libvirt.check_exit_status(ret, True) return else: libvirt.check_exit_status(ret) def check_policy_value(active_policy, inactive_policy): """ Check policy value in dumpxml with active or inactive option :param active_policy: active policy attribute value :param inactive_policy: inactive policy attribute value """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_list = vmxml.devices.by_device_tag("disk") disk = disk_list[len(disk_list) - 1] if not active_policy == disk.source.attrs["startupPolicy"]: test.error( "Actual policy:%s in active state is not equal to expected:%s" % (active_policy, disk.source.attrs["startupPolicy"])) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disk_list = vmxml.devices.by_device_tag("disk") disk = disk_list[len(disk_list) - 1] if not inactive_policy == disk.source.attrs["startupPolicy"]: test.error( "Actual policy:%s in inactive state is not equal to expected: %s" % (inactive_policy, disk.source.attrs["startupPolicy"])) if flag_str == "--live": check_policy_value(policy, origin_policy) elif flag_str == "--config": check_policy_value(origin_policy, policy) elif flag_str == "--persistent": check_policy_value(policy, policy) def check_source_update(xml_policy_file): """ Update source and policy at the same time,then check those changes. :param xml_policy_file: VM xml policy file """ xmltreefile = XMLTreeFile(xml_policy_file) policy_item = xmltreefile.find('/source') def configure_startup_policy(update=False, policy='optional'): """ Configure startupPolicy attribute value. :param update: update value or not :param policy: policy value :return: flag_option and boolean value """ if update: del policy_item.attrib["startupPolicy"] else: policy_item.set("startupPolicy", policy) flag_option = "--live" xmltreefile.write(xml_policy_file, encoding="UTF-8") return flag_option, False # Update source and startUpPolicy attribute value. def update_source_policy(update=True, policy='optional'): """ Update startupPolicy source value. :param update: update value or not :param policy: policy value :return: flag_option and boolean value """ source_file = policy_item.get('file') if update: new_source_file = source_file + ".empty" else: new_source_file = source_file + ".new" shutil.copyfile(source_file, new_source_file) policy_item.set("file", new_source_file) policy_item.set("startupPolicy", policy) flag_option = "--persistent" xmltreefile.write(xml_policy_file, encoding="UTF-8") return flag_option, False function_list = [ configure_startup_policy, update_source_policy, configure_startup_policy, update_source_policy ] function_parameter = [False, False, True, True] # Loop all above scenarios to update device. for index in list(range(len(function_list))): try: func = function_list[index] para = function_parameter[index] flag_option, update_error = func(para) ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_option, debug=True) libvirt.check_exit_status(ret, expect_error=update_error) except AttributeError as elem_attr: test.error("Fail to remove startupPolicy attribute:%s" % str(elem_attr)) except Exception as update_device_exception: test.error("Fail to update device:%s" % str(update_device_exception)) finally: source_file = policy_item.get('file') new_source_file = source_file + ".new" if os.path.exists(new_source_file): os.remove(new_source_file) def rename_file(source_file, target_file, revert=False): """ Rename a file or revert it. :param source_file: The source file name. :param target_file: The target file name. :param revert: It can be True or False. """ try: if not revert: os.rename(source_file, target_file) logging.debug("Rename %s to %s", source_file, target_file) else: os.rename(target_file, source_file) logging.debug("Rename %s to %s", target_file, source_file) except OSError as err: test.fail("Rename image failed: %s" % str(err)) # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Get start,restore configuration parameters. start_error = "yes" == params.get("start_error", "no") restore_error = "yes" == params.get("restore_error", "no") virsh_dargs = {'debug': True, 'ignore_status': True} attach_option = params.get("attach_option") # Create disk xml and attach it. device_type = params.get("device_type") disk_type = params.get("disk_type", "network") disk_src_host = params.get("disk_source_host", "127.0.0.1") target_dev = params.get("target_dev") target_bus = params.get("disk_target_bus", "virtio") image_size = params.get("image_size", "1.44M") emulated_image = "emulated-iscsi" # Storage pool and volume related paramters. pool_name = params.get("pool_name", "iscsi_pool") pool_type = params.get("pool_type") pool_target = params.get("pool_target", "/dev/disk/by-path") pool_src_host = params.get("pool_source_host", "127.0.0.1") vol_name = params.get("volume_name") capacity = params.get("volume_size", "1048576") vol_format = params.get("volume_format") # Source file parameters. media_name = params.get("media_name") media_file = os.path.join(data_dir.get_tmp_dir(), media_name) media_file_new = media_file + ".new" save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") snapshot_name = "s1" # Policy related paramters. disk_xml_file = os.path.join(data_dir.get_tmp_dir(), "attach_disk.xml") disk_xml_policy_file = os.path.join(data_dir.get_tmp_dir(), "attach_policy_disk.xml") update_policy = "yes" == params.get("update_policy", "no") policy_only = "yes" == params.get("policy_only", "no") update_policy_list = params.get("update_policy_list").split() expect_value = [None, startup_policy] try: if disk_type == "volume": pvt = libvirt.PoolVolumeTest(test, params) vol_name, vol_path = create_volume(pvt, vol_name) vol_path_new = vol_path + ".new" # Create disk xml. create_disk_xml() if vm.is_alive(): vm.destroy() try: # Backup disk xml file for policy update if update_policy=True. if update_policy: shutil.copyfile(disk_xml_file, disk_xml_policy_file) result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml_file, flagstr="--config", **virsh_dargs) # For iSCSI pool volume,startupPolicy attribute is not valid for it. # Moreover,setting disk 'requisite' is allowed only for cdrom or floppy. if pool_type == "iscsi" or all( [device_type == "disk", startup_policy == "requisite"]): libvirt.check_exit_status(result, expect_error=True) return else: libvirt.check_exit_status(result, expect_error=False) except Exception as attach_device_exception: logging.debug("Attach device throws exception:%s", str(attach_device_exception)) os.remove(media_file) test.error("Attach %s fail" % device_type) # Check update policy operations. if disk_type == "file" and update_policy: vm.start() if policy_only: check_policy_update(startup_policy, update_policy_list, disk_xml_policy_file, device_type, attach_option) else: check_source_update(disk_xml_policy_file) elif disk_type == "file": # Step 1. Start domain and destroy it normally vm.start() vm.destroy() # Step 2. Remove the source_file then start the domain rename_file(media_file, media_file_new) result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(result, expect_error=start_error) # For libvirt version >=2.0.0, feature is updated and startup policy attribute # can not exist alone without source protocol. if not start_error and not libvirt_version.version_compare( 2, 0, 0): check_disk_source(vm_name, target_dev, expect_value) # Step 3. Move back the source file and start the domain(if needed). rename_file(media_file, media_file_new, revert=True) if not vm.is_alive(): vm.start() # Step 4. Save the domain normally, then remove the source file # and restore it back vm.save_to_file(save_file) rename_file(media_file, media_file_new) result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=restore_error) if not restore_error and not libvirt_version.version_compare( 2, 0, 0): check_disk_source(vm_name, target_dev, expect_value) # Step 5. Move back the source file and restore the domain(if needed) rename_file(media_file, media_file_new, revert=True) if not vm.is_alive(): result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=False) elif disk_type == "volume": # Step 1. Start domain and destroy it normally. vm.start() # Step 1 Start VM successfully. if not check_in_vm(old_parts): test.fail("Check disk partitions in VM failed") # Step 2 Destroy VM, move the volume to other place, refresh the pool, then start the guest. vm.destroy() rename_file(vol_path, vol_path_new) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(result, expect_error=start_error) # Step 3 Move back the source file and start. rename_file(vol_path, vol_path_new, revert=True) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) if not vm.is_alive(): vm.start() # Step 4 Save the domain normally, then remove the source file,then restore domain. vm.save_to_file(save_file) rename_file(vol_path, vol_path_new) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=restore_error) # Step 5, Create snapshot,move the source to other place,then revert snapshot. if device_type == "disk": rename_file(vol_path, vol_path_new, revert=True) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) if restore_error: result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result) ret = virsh.snapshot_create_as(vm_name, snapshot_name, **virsh_dargs) libvirt.check_exit_status(ret) rename_file(vol_path, vol_path_new) ret = virsh.snapshot_revert(vm_name, snapshot_name, **virsh_dargs) # Clean up snapshot. libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() if disk_type == "volume": pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **virsh_dargs) if os.path.exists(save_file): os.remove(save_file) if os.path.exists(disk_xml_file): os.remove(disk_xml_file) if os.path.exists(media_file): os.remove(media_file) if os.path.exists(disk_xml_policy_file): os.remove(disk_xml_policy_file)
def run(test, params, env): """ Test startupPolicy for CD-ROM/floppy/Volume disks. Steps: 1. Prepare disk media image. 2. Setup startupPolicy for a disk. 3. Start the domain. 4. Save the domain. 5. Remove the disk source file and restore the domain. 6. Update startupPolicy for a disk. 7. Destroy the domain. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) startup_policy = params.get("policy") def create_iscsi_pool(): """ Setup iSCSI target,and create one iSCSI pool. """ libvirt.setup_or_cleanup_iscsi(is_setup=False) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size='1G', chap_user="", chap_passwd="", portal_ip=disk_src_host) # Define an iSCSI pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = pool_name poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iSCSI pool. virsh.pool_destroy(pool_name, **virsh_dargs) cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) def create_volume(pvt, created_vol_name=None): """ Create iSCSI volume. :param pvt: PoolVolumeTest object :param created_vol_name: Created volume name """ try: if pool_type == "iscsi": create_iscsi_pool() else: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) pvt.pre_vol(vol_name=created_vol_name, vol_format=vol_format, capacity=capacity, allocation=None, pool_name=pool_name) except Exception as pool_exception: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **virsh_dargs) test.error("Error occurred when prepare" + "pool xml with message %s:\n" % str(pool_exception)) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(pool_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)", str(cmd_result.stdout.strip())) try: return vol_list[1] except IndexError: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: tmp_vol_name, tmp_vol_path = vol_info else: test.error("Failed to get volume info") process.run('qemu-img create -f qcow2 %s %s' % (tmp_vol_path, '100M'), shell=True) return vol_info def check_disk_source(vm_name, target_dev, expect_value): """ Check the disk source: file and startupPolicy. :param vm_name: Domain name :param target_dev: Disk's target device :param expect_value: Expect value of source file and source startupPolicy """ logging.debug("Expect source file is '%s'", expect_value[0]) logging.debug("Expect source startupPolicy is '%s'", expect_value[1]) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disks = vmxml.get_disk_all() source_value = [] try: disk_source = disks[target_dev].find('source') source_value.append(disk_source.get('file')) source_value.append(disk_source.get('startupPolicy')) except KeyError: test.error("No %s in domain %s" % (target_dev, vm_name)) logging.debug("Actual source file is '%s'", source_value[0]) logging.debug("Actual source startupPolicy is '%s'", source_value[1]) if source_value == expect_value: logging.debug("Domain disk XML check pass") else: test.error("Domain disk XML check fail") def create_disk_xml(): """ Create a disk xml file for attaching to a domain. """ if disk_type == "file": process.run("qemu-img create %s %s" % (media_file, image_size), shell=True) disk_params = {'device_type': device_type, 'type_name': disk_type, 'target_dev': target_dev, 'target_bus': target_bus} if disk_type == "file": disk_params_src = {'source_protocol': "file", 'source_file': media_file, 'source_startupPolicy': startup_policy} elif disk_type == "volume": disk_params_src = {'source_pool': pool_name, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_startupPolicy': startup_policy} if pool_type == "iscsi": disk_params_src.update({'source_mode': "host"}) disk_params.update(disk_params_src) disk_xml = libvirt.create_disk_xml(disk_params) shutil.copyfile(disk_xml, disk_xml_file) return disk_xml def check_in_vm(old_parts): """ Check mount/read/write disk in VM. :param old_parts: pre-operated partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() new_parts = libvirt.get_parts_list(session) logging.debug("new parted:%s", new_parts) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False if 'sr' not in added_part and 'fd' not in added_part: cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test" .format(added_part)) status, output = session.cmd_status_output(cmd) logging.info("Check disk operation in VM:\n%s", output) if status != 0: return False return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_policy_update(origin_policy, policy_list, xml_policy_file, device_type, flag_str): """ Check updated policy after executing virsh update-device. :param origin_policy: the inherit startup policy value. :param policy_list: updated policy list. :param xml_policy_file: xml file for startupPolicy. :param device_type: device type,cdrom or disk.,etc :param flag_str: it can be --config,--live and --persistent. """ for policy in policy_list: xmltreefile = XMLTreeFile(xml_policy_file) try: policy_item = xmltreefile.find('/source') policy_item.set('startupPolicy', policy) except AttributeError as elem_attr: test.error("Fail to find startupPolicy attribute.%s", str(elem_attr)) xmltreefile.write(xml_policy_file, encoding="UTF-8") ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_str, debug=True) if all([device_type == "disk", policy == "requisite"]): libvirt.check_exit_status(ret, True) return else: libvirt.check_exit_status(ret) def check_policy_value(active_policy, inactive_policy): """ Check policy value in dumpxml with active or inactive option :param active_policy: active policy attribute value :param inactive_policy: inactive policy attribute value """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_list = vmxml.devices.by_device_tag("disk") disk = disk_list[len(disk_list)-1] if not active_policy == disk.source.attrs["startupPolicy"]: test.error("Actual policy:%s in active state is not equal to expected:%s" % (active_policy, disk.source.attrs["startupPolicy"])) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) disk_list = vmxml.devices.by_device_tag("disk") disk = disk_list[len(disk_list)-1] if not inactive_policy == disk.source.attrs["startupPolicy"]: test.error("Actual policy:%s in inactive state is not equal to expected: %s" % (inactive_policy, disk.source.attrs["startupPolicy"])) if flag_str == "--live": check_policy_value(policy, origin_policy) elif flag_str == "--config": check_policy_value(origin_policy, policy) elif flag_str == "--persistent": check_policy_value(policy, policy) def check_source_update(xml_policy_file): """ Update source and policy at the same time,then check those changes. :param xml_policy_file: VM xml policy file """ xmltreefile = XMLTreeFile(xml_policy_file) policy_item = xmltreefile.find('/source') def configure_startup_policy(update=False, policy='optional'): """ Configure startupPolicy attribute value. :param update: update value or not :param policy: policy value :return: flag_option and boolean value """ if update: del policy_item.attrib["startupPolicy"] else: policy_item.set("startupPolicy", policy) flag_option = "--live" xmltreefile.write(xml_policy_file, encoding="UTF-8") return flag_option, False # Update source and startUpPolicy attribute value. def update_source_policy(update=True, policy='optional'): """ Update startupPolicy source value. :param update: update value or not :param policy: policy value :return: flag_option and boolean value """ source_file = policy_item.get('file') if update: new_source_file = source_file+".empty" else: new_source_file = source_file+".new" shutil.copyfile(source_file, new_source_file) policy_item.set("file", new_source_file) policy_item.set("startupPolicy", policy) flag_option = "--persistent" xmltreefile.write(xml_policy_file, encoding="UTF-8") return flag_option, False function_list = [configure_startup_policy, update_source_policy, configure_startup_policy, update_source_policy] function_parameter = [False, False, True, True] # Loop all above scenarios to update device. for index in list(range(len(function_list))): try: func = function_list[index] para = function_parameter[index] flag_option, update_error = func(para) ret = virsh.update_device(vm_name, xml_policy_file, flagstr=flag_option, debug=True) libvirt.check_exit_status(ret, expect_error=update_error) except AttributeError as elem_attr: test.error("Fail to remove startupPolicy attribute:%s" % str(elem_attr)) except Exception as update_device_exception: test.error("Fail to update device:%s" % str(update_device_exception)) finally: source_file = policy_item.get('file') new_source_file = source_file+".new" if os.path.exists(new_source_file): os.remove(new_source_file) def rename_file(source_file, target_file, revert=False): """ Rename a file or revert it. :param source_file: The source file name. :param target_file: The target file name. :param revert: It can be True or False. """ try: if not revert: os.rename(source_file, target_file) logging.debug("Rename %s to %s", source_file, target_file) else: os.rename(target_file, source_file) logging.debug("Rename %s to %s", target_file, source_file) except OSError as err: test.fail("Rename image failed: %s" % str(err)) # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Start VM and get all partitions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Get start,restore configuration parameters. start_error = "yes" == params.get("start_error", "no") restore_error = "yes" == params.get("restore_error", "no") virsh_dargs = {'debug': True, 'ignore_status': True} attach_option = params.get("attach_option") # Create disk xml and attach it. device_type = params.get("device_type") disk_type = params.get("disk_type", "network") disk_src_host = params.get("disk_source_host", "127.0.0.1") target_dev = params.get("target_dev") target_bus = params.get("disk_target_bus", "virtio") image_size = params.get("image_size", "1.44M") emulated_image = "emulated-iscsi" # Storage pool and volume related paramters. pool_name = params.get("pool_name", "iscsi_pool") pool_type = params.get("pool_type") pool_target = params.get("pool_target", "/dev/disk/by-path") pool_src_host = params.get("pool_source_host", "127.0.0.1") vol_name = params.get("volume_name") capacity = params.get("volume_size", "1048576") vol_format = params.get("volume_format") # Source file parameters. media_name = params.get("media_name") media_file = os.path.join(data_dir.get_tmp_dir(), media_name) media_file_new = media_file + ".new" save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") snapshot_name = "s1" # Policy related paramters. disk_xml_file = os.path.join(data_dir.get_tmp_dir(), "attach_disk.xml") disk_xml_policy_file = os.path.join(data_dir.get_tmp_dir(), "attach_policy_disk.xml") update_policy = "yes" == params.get("update_policy", "no") policy_only = "yes" == params.get("policy_only", "no") update_policy_list = params.get("update_policy_list").split() expect_value = [None, startup_policy] try: if disk_type == "volume": pvt = libvirt.PoolVolumeTest(test, params) vol_name, vol_path = create_volume(pvt, vol_name) vol_path_new = vol_path + ".new" # Create disk xml. create_disk_xml() if vm.is_alive(): vm.destroy() try: # Backup disk xml file for policy update if update_policy=True. if update_policy: shutil.copyfile(disk_xml_file, disk_xml_policy_file) result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml_file, flagstr="--config", **virsh_dargs) # For iSCSI pool volume,startupPolicy attribute is not valid for it. # Moreover,setting disk 'requisite' is allowed only for cdrom or floppy. if pool_type == "iscsi" or all([device_type == "disk", startup_policy == "requisite"]): libvirt.check_exit_status(result, expect_error=True) return else: libvirt.check_exit_status(result, expect_error=False) except Exception as attach_device_exception: logging.debug("Attach device throws exception:%s", str(attach_device_exception)) os.remove(media_file) test.error("Attach %s fail" % device_type) # Check update policy operations. if disk_type == "file" and update_policy: vm.start() if policy_only: check_policy_update(startup_policy, update_policy_list, disk_xml_policy_file, device_type, attach_option) else: check_source_update(disk_xml_policy_file) elif disk_type == "file": # Step 1. Start domain and destroy it normally vm.start() vm.destroy() # Step 2. Remove the source_file then start the domain rename_file(media_file, media_file_new) result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(result, expect_error=start_error) # For libvirt version >=2.0.0, feature is updated and startup policy attribute # can not exist alone without source protocol. if not start_error and not libvirt_version.version_compare(2, 0, 0): check_disk_source(vm_name, target_dev, expect_value) # Step 3. Move back the source file and start the domain(if needed). rename_file(media_file, media_file_new, revert=True) if not vm.is_alive(): vm.start() # Step 4. Save the domain normally, then remove the source file # and restore it back vm.save_to_file(save_file) rename_file(media_file, media_file_new) result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=restore_error) if not restore_error and not libvirt_version.version_compare(2, 0, 0): check_disk_source(vm_name, target_dev, expect_value) # Step 5. Move back the source file and restore the domain(if needed) rename_file(media_file, media_file_new, revert=True) if not vm.is_alive(): result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=False) elif disk_type == "volume": # Step 1. Start domain and destroy it normally. vm.start() # Step 1 Start VM successfully. if not check_in_vm(old_parts): test.fail("Check disk partitions in VM failed") # Step 2 Move the volume to other place, refresh the pool, then reboot the guest. rename_file(vol_path, vol_path_new) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) vm.destroy() result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(result, expect_error=start_error) # Step 3 Move back the source file and start. rename_file(vol_path, vol_path_new, revert=True) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) if not vm.is_alive(): vm.start() # Step 4 Save the domain normally, then remove the source file,then restore domain. vm.save_to_file(save_file) rename_file(vol_path, vol_path_new) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result, expect_error=restore_error) # Step 5, Create snapshot,move the source to other place,then revert snapshot. if device_type == "disk": rename_file(vol_path, vol_path_new, revert=True) cmd_result = virsh.pool_refresh(pool_name) libvirt.check_exit_status(cmd_result) if restore_error: result = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(result) ret = virsh.snapshot_create_as(vm_name, snapshot_name, **virsh_dargs) libvirt.check_exit_status(ret) rename_file(vol_path, vol_path_new) ret = virsh.snapshot_revert(vm_name, snapshot_name, **virsh_dargs) # Clean up snapshot. libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() if disk_type == "volume": pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **virsh_dargs) if os.path.exists(save_file): os.remove(save_file) if os.path.exists(disk_xml_file): os.remove(disk_xml_file) if os.path.exists(media_file): os.remove(media_file) if os.path.exists(disk_xml_policy_file): os.remove(disk_xml_policy_file)
if not pool_ins.is_pool_active(pool_name): pool_start_status = virsh.pool_start(pool_name) utlv.check_exit_status(pool_start_status) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) # create vol if required if need_vol_create == "yes": vol_create_as_status = virsh.vol_create_as( volume_name, pool_name, volume_capacity, allocation, vol_format, "", debug=True ) utlv.check_exit_status(vol_create_as_status) virsh.pool_refresh(pool_name) vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_DELAY_TIME*3) logging.debug('Volume list is: %s' % vol_list) # use test_unit to save the first vol in pool if pool_type == "mpath": cmd = "virsh vol-list %s | grep \"%s\" |\ awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path) cmd_result = process.run(cmd, shell=True) status = cmd_result.exit_status output = cmd_result.stdout.strip() if cmd_result.exit_status: raise exceptions.TestFail("vol-list pool %s failed", pool_name) if not output: raise exceptions.TestFail("Newly added mpath dev not in pool.")
def run(test, params, env): """ Test DAC in adding nfs pool disk to VM. (1).Init variables for test. (2).Create nfs pool and vol. (3).Attach the nfs pool vol to VM. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing") # Get qemu.conf config variables qemu_user = params.get("qemu_user") qemu_group = params.get("qemu_group") dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about pool vol virt_use_nfs = params.get("virt_use_nfs", "off") nfs_server_dir = params.get("nfs_server_dir", "nfs-server") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") export_options = params.get("export_options", "rw,async,no_root_squash") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") bk_file_name = params.get("bk_file_name") # Get pool vol variables img_tup = ("img_user", "img_group", "img_mode") img_val = [] for i in img_tup: try: img_val.append(int(params.get(i))) except ValueError: test.cancel("%s value '%s' is not a number." % (i, params.get(i))) # False positive - img_val was filled in the for loop above. # pylint: disable=E0632 img_user, img_group, img_mode = img_val # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vm_os_xml = vmxml.os # Backup domain disk label disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in list(disks.values()): disk_path = disk['source'] label = check_ownership(disk_path) if label: backup_labels_of_disks[disk_path] = label try: if vm_os_xml.nvram: nvram_path = vm_os_xml.nvram if not os.path.exists(nvram_path): # Need libvirt automatically generate the path vm.start() vm.destroy(gracefully=False) label = check_ownership(nvram_path) if label: backup_labels_of_disks[nvram_path] = label except xcepts.LibvirtXMLNotFoundError: logging.debug("vm xml don't have nvram element") # Backup selinux status of host. backup_sestatus = utils_selinux.get_status() pvt = None snapshot_name = None disk_snap_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # chown domain disk to qemu:qemu to avoid fail on local disk for file_path in list(backup_labels_of_disks.keys()): if qemu_user == "root": os.chown(file_path, 0, 0) elif qemu_user == "qemu": os.chown(file_path, 107, 107) else: process.run('chown %s %s' % (qemu_user, file_path), shell=True) # Set selinux of host. if backup_sestatus == "disabled": test.cancel("SELinux is in Disabled mode." "It must be Enabled to" "run this test") utils_selinux.set_status(host_sestatus) # set qemu conf qemu_conf.user = qemu_user qemu_conf.group = qemu_user if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s", qemu_conf) libvirtd.restart() # Create dst pool for create attach vol img logging.debug("export_options is: %s", export_options) pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], export_options=export_options) # set virt_use_nfs result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.cancel("Failed to set virt_use_nfs value") # Init a QemuImg instance and create img on nfs server dir. params['image_name'] = vol_name tmp_dir = data_dir.get_tmp_dir() nfs_path = os.path.join(tmp_dir, nfs_server_dir) image = qemu_storage.QemuImg(params, nfs_path, vol_name) # Create a image. server_img_path, result = image.create(params) if params.get("image_name_backing_file"): params['image_name'] = bk_file_name params['has_backing_file'] = "yes" image = qemu_storage.QemuImg(params, nfs_path, bk_file_name) server_img_path, result = image.create(params) # Get vol img path vol_name = server_img_path.split('/')[-1] virsh.pool_refresh(pool_name, debug=True) cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() # Do the attach action. extra = "--persistent --subdriver qcow2" result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: test.fail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Change img ownership and mode on nfs server dir os.chown(server_img_path, img_user, img_group) os.chmod(server_img_path, img_mode) img_label_before = check_ownership(server_img_path) if img_label_before: logging.debug( "attached image ownership on nfs server before " "start: %s", img_label_before) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. img_label_after = check_ownership(server_img_path) if img_label_after: logging.debug( "attached image ownership on nfs server after" " start: %s", img_label_after) if status_error: test.fail('Test succeeded in negative case.') except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) if params.get("image_name_backing_file"): options = "--disk-only" snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if not status_error: test.fail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0) if snapshot_name: disks_snap = vm.get_disk_devices() for disk in list(disks_snap.values()): disk_snap_path.append(disk['source']) virsh.snapshot_delete(vm_name, snapshot_name, "--metadata", debug=True) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except process.CmdError: test.fail("Detach disk 'vdf' from VM %s failed." % vm.name) finally: # clean up vm.destroy() qemu_conf.restore() for path, label in list(backup_labels_of_disks.items()): label_list = label.split(":") os.chown(path, int(label_list[0]), int(label_list[1])) if snapshot_name: backup_xml.sync("--snapshots-metadata") else: backup_xml.sync() for i in disk_snap_path: if i and os.path.exists(i): os.unlink(i) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except exceptions.TestFail as detail: logging.error(str(detail)) utils_selinux.set_status(backup_sestatus) libvirtd.restart()
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(test.tmpdir, pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") if virsh.has_command_help_match("vol-wipe", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: raise error.TestNAError("Option --prealloc-metadata " "is not supported.") # Using algorithms other than zero need scrub installed. try: utils_misc.find_command("scrub") except ValueError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = ["zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random"] # Choose an algorithms randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get("unprivileged_user") if unpri_user: if unpri_user.count("EXAMPLE"): unpri_user = "******" if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: raise error.TestNAError("API acl test not supported in current" " libvirt version.") del_pool = True libv_pvt = libvirt.PoolVolumeTest(test, params) try: libv_pool = libvirt_storage.StoragePool() while libv_pool.pool_exists(pool_name): logging.debug("Use exist pool '%s'", pool_name) del_pool = False else: # Create a new pool disk_vol = [] if pool_type == "disk": disk_vol.append(params.get("pre_vol", "10M")) libv_pvt.pre_pool( pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol, ) libv_vol = libvirt_storage.PoolVolume(pool_name) if libv_vol.volume_exists(vol_name): logging.debug("Use exist volume '%s'", vol_name) elif vol_format in ["raw", "qcow2", "qed", "vmdk"]: # Create a new volume libv_pvt.pre_vol( vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name ) elif vol_format == "partition": vol_name = libv_vol.list_volumes().keys()[0] logging.debug("Partition %s in disk pool is volume" % vol_name) elif vol_format == "sparse": # Create a sparse file in pool sparse_file = pool_target + "/" + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability utils.run(cmd) else: raise error.TestError("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name) vol_info = libv_vol.volume_info(vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count("prealloc-metadata"): clone_status_error = True if pool_type == "disk": new_vol_name = libvirt.new_disk_vol_name(pool_name) if new_vol_name is None: raise error.TestError("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name libvirt.update_polkit_rule(params, vol_pat, new_value) # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: raise error.TestFail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libv_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe( new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True ) unsupported_err = ["Unsupported algorithm", "no such pattern sequence"] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): raise error.TestNAError(wipe_result.stderr) raise error.TestFail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libv_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info["format"] != "raw": raise error.TestFail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: raise error.TestFail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: raise error.TestFail("Expect clone volume fail, but run" " successfully.") finally: # Clean up try: if del_pool: libv_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) else: # Only delete the volumes libv_vol = libvirt_storage.PoolVolume(pool_name) for vol in [vol_name, new_vol_name]: libv_vol.delete_volume(vol) except error.TestFail, detail: logging.error(str(detail))
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get volume name") # Create iscsi network disk XML disk_params = {'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly} disk_params_src = {} if disk_type == "network": disk_params_src = {'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': disk_src_host, 'source_host_port': disk_src_port} elif disk_type == "volume": disk_params_src = {'source_pool': disk_src_pool, 'source_volume': vol_name, 'source_mode': disk_src_mode} else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = {'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target} disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") disk_xml_f = open(disk_xml) disk_xml_content = disk_xml_f.read() disk_xml_f.close() logging.debug("Attach disk by XML: %s", disk_xml_content) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Wait for domain is stable vm.wait_for_login().close() domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: error.TestFail("Failed getting snapshots list for %s", vm_name) try: virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) except process.CmdError: error.TestFail("Failed getting snapshots info for %s", vm_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2, **virsh_dargs) libvirt.check_exit_status(cmd_result) pass else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
vol_path_list.append(vol_path) except error.TestFail, e: stderr = cmd_result.stderr if any(err in stderr for err in fmt_err_list): raise error.TestNAError(skip_msg) else: raise e # Post process vol by other programs process_vol_by = params.get("process_vol_by") process_vol_type = params.get("process_vol_type", "") expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes") if process_vol_by and vol_path_list: process_vol = post_process_vol(vol_path_list[0]) if process_vol is not None: try: virsh.pool_refresh(src_pool_name, ignore_status=False) check_vol(src_pool_name, process_vol, expect_vol_exist) except (process.CmdError, error.TestFail), e: if process_vol_type == "thin": logging.error(e) raise error.TestNAError("You may encounter bug BZ#1060287") else: raise e else: raise error.TestFail("Post process volume failed") finally: # Cleanup # For old version lvm2(2.02.106 or early), deactivate volume group # (destroy libvirt logical pool) will fail if which has deactivated # lv snapshot, so before destroy the pool, we need activate it manually if src_pool_type == 'logical' and vol_path_list:
utlv.check_exit_status(pool_start_status) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) # create vol if required if need_vol_create == "yes": vol_create_as_status = virsh.vol_create_as(volume_name, pool_name, volume_capacity, allocation, vol_format, "", debug=True) utlv.check_exit_status(vol_create_as_status) virsh.pool_refresh(pool_name) vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_DELAY_TIME * 3) logging.debug('Volume list is: %s' % vol_list) # use test_unit to save the first vol in pool if pool_type == "mpath": cmd = "virsh vol-list %s | grep \"%s\" |\ awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path) cmd_result = process.run(cmd, shell=True) status = cmd_result.exit_status output = cmd_result.stdout.strip() if cmd_result.exit_status: raise exceptions.TestFail("vol-list pool %s failed", pool_name) if not output:
def run(test, params, env): """ Test virsh vol-create command to cover the following matrix: pool_type = [dir, fs, netfs] volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed, vmdk, vpc] pool_type = [disk] volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm, linux-raid, extended] pool_type = [logical] volume_format = [none] pool_type = [iscsi, scsi] Not supported with format type TODO: pool_type = [rbd, glusterfs] Reference: http://www.libvirt.org/storage.html """ src_pool_type = params.get("src_pool_type") src_pool_target = params.get("src_pool_target") src_pool_format = params.get("src_pool_format", "") pool_vol_num = int(params.get("src_pool_vol_num", '1')) src_emulated_image = params.get("src_emulated_image") extra_option = params.get("extra_option", "") prefix_vol_name = params.get("vol_name", "vol_create_test") vol_format = params.get("vol_format", "raw") vol_capacity = params.get("vol_capacity", 1048576) vol_allocation = params.get("vol_allocation", 1048576) image_size = params.get("emulate_image_size", "1G") lazy_refcounts = "yes" == params.get("lazy_refcounts") status_error = "yes" == params.get("status_error", "no") by_xml = "yes" == params.get("create_vol_by_xml", "yes") incomplete_target = "yes" == params.get("incomplete_target", "no") if not libvirt_version.version_compare(1, 0, 0): if "--prealloc-metadata" in extra_option: raise error.TestNAError("metadata preallocation not supported in" " current libvirt version.") if incomplete_target: raise error.TestNAError("It does not support generate target " "path in thi libvirt version.") pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi'] if src_pool_type not in pool_type: raise error.TestNAError("pool type %s not in supported type list: %s" % (src_pool_type, pool_type)) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts def post_process_vol(ori_vol_path): """ Create or disactive a volume without libvirt :param ori_vol_path: Full path of an original volume :retur: Volume name for checking """ process_vol_name = params.get("process_vol_name", "process_vol") process_vol_options = params.get("process_vol_options", "") process_vol_capacity = params.get("process_vol_capacity", vol_capacity) process_vol_cmd = "" unsupport_err = "Unsupport do '%s %s' in this test" % ( process_vol_by, process_vol_type) if process_vol_by == "lvcreate": process_vol_cmd = "lvcreate -L %s " % process_vol_capacity if process_vol_type == "thin": if not process_vol_options: process_vol_options = "-T " process_vol_cmd += "%s " % process_vol_options processthin_pool_name = params.get("processthin_pool_name", "thinpool") processthin_vol_name = params.get("processthin_vol_name", "thinvol") process_vol_capacity = params.get("process_vol_capacity", "1G") os.path.dirname(ori_vol_path) process_vol_cmd += "%s/%s " % (os.path.dirname(ori_vol_path), processthin_pool_name) process_vol_cmd += "-V %s " % process_vol_capacity process_vol_cmd += "-n %s " % processthin_vol_name process_vol_name = processthin_vol_name elif process_vol_type == "snapshot": if not process_vol_options: process_vol_options = "-s " process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "-n %s " % process_vol_name process_vol_cmd += "%s " % (ori_vol_path) else: logging.error(unsupport_err) return elif process_vol_by == "qemu-img" and process_vol_type == "create": process_vol_cmd = "qemu-img create " process_vol_path = os.path.dirname(ori_vol_path) + "/" process_vol_path += process_vol_name process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "%s " % process_vol_path process_vol_cmd += "%s " % process_vol_capacity elif process_vol_by == "lvchange" and process_vol_type == "deactivate": process_vol_cmd = "lvchange %s " % ori_vol_path if not process_vol_options: process_vol_options = "-an" process_vol_cmd += process_vol_options else: logging.error(unsupport_err) return rst = utils.run(process_vol_cmd, ignore_status=True) if rst.exit_status: if "Snapshots of snapshots are not supported" in rst.stderr: logging.debug("%s is already a snapshot volume", ori_vol_path) process_vol_name = os.path.basename(ori_vol_path) else: logging.error(rst.stderr) return return process_vol_name def check_vol(pool_name, vol_name, expect_exist=True): """ Check volume vol_name in pool pool_name """ src_volumes = src_pv.list_volumes().keys() logging.debug("Current volumes in %s: %s", pool_name, src_volumes) if expect_exist: if vol_name not in src_volumes: raise error.TestFail("Can't find volume %s in pool %s" % (vol_name, pool_name)) # check format in volume xml post_xml = volxml.new_from_vol_dumpxml(vol_name, pool_name) logging.debug("Volume %s XML: %s" % (vol_name, post_xml.xmltreefile)) if 'format' in post_xml.keys() and vol_format is not None: if post_xml.format != vol_format: raise error.TestFail("Volume format %s is not expected" % vol_format + " as defined.") else: if vol_name in src_volumes: raise error.TestFail( "Find volume %s in pool %s, but expect not" % (vol_name, pool_name)) fmt_err0 = "Unknown file format '%s'" % vol_format fmt_err1 = "Formatting or formatting option not " fmt_err1 += "supported for file format '%s'" % vol_format fmt_err2 = "Driver '%s' does not support " % vol_format fmt_err2 += "image creation" fmt_err_list = [fmt_err0, fmt_err1, fmt_err2] skip_msg = "Volume format '%s' is not supported by qemu-img" % vol_format vol_path_list = [] try: # Create the src pool src_pool_name = "virt-%s-pool" % src_pool_type pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(src_pool_name, src_pool_type, src_pool_target, src_emulated_image, image_size=image_size, source_format=src_pool_format) src_pv = libvirt_storage.PoolVolume(src_pool_name) # Print current pools for debugging logging.debug("Current pools:%s", libvirt_storage.StoragePool().list_pools()) # Create volumes by virsh in a loop while pool_vol_num > 0: # Set volume xml file vol_name = prefix_vol_name + "_%s" % pool_vol_num pool_vol_num -= 1 if by_xml: # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if src_pool_type == "disk": vol_name = utlv.new_disk_vol_name(src_pool_name) if vol_name is None: raise error.TestError("Fail to generate volume name") vol_arg['name'] = vol_name volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] if params.get('setup_libvirt_polkit') == 'yes': utils.run("chmod 666 %s" % vol_xml, ignore_status=True) # Run virsh_vol_create to create vol logging.debug("Create volume from XML: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create( src_pool_name, vol_xml, extra_option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) else: # Run virsh_vol_create_as to create_vol cmd_result = virsh.vol_create_as( vol_name, src_pool_name, vol_capacity, vol_allocation, vol_format, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) # Check result try: utlv.check_exit_status(cmd_result, status_error) check_vol(src_pool_name, vol_name, not status_error) if not status_error: vol_path = virsh.vol_path(vol_name, src_pool_name).stdout.strip() logging.debug("Full path of %s: %s", vol_name, vol_path) vol_path_list.append(vol_path) except error.TestFail, e: stderr = cmd_result.stderr if any(err in stderr for err in fmt_err_list): raise error.TestNAError(skip_msg) else: raise e # Post process vol by other programs process_vol_by = params.get("process_vol_by") process_vol_type = params.get("process_vol_type", "") expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes") if process_vol_by and vol_path_list: process_vol = post_process_vol(vol_path_list[0]) if process_vol is not None: try: virsh.pool_refresh(src_pool_name, ignore_status=False) check_vol(src_pool_name, process_vol, expect_vol_exist) except (error.CmdError, error.TestFail), e: if process_vol_type == "thin": logging.error(e) raise error.TestNAError( "You may encounter bug BZ#1060287") else: raise e else: raise error.TestFail("Post process volume failed")
def run(test, params, env): """ Test command: virsh pool-define; pool-define-as; pool-start; vol-list pool; attach-device LUN to guest; mount the device; dd to the mounted device; unmount; pool-destroy; pool-undefine; Pre-requiste: Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to SAN controller. """ pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pre_def_pool = params.get("pre_def_pool", "no") define_pool = params.get("define_pool", "no") define_pool_as = params.get("define_pool_as", "no") pool_create_as = params.get("pool_create_as", "no") need_pool_build = params.get("need_pool_build", "no") need_vol_create = params.get("need_vol_create", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") pool_adapter_type = params.get("pool_adapter_type", "") pool_adapter_parent = params.get("pool_adapter_parent", "") target_device = params.get("disk_target_dev", "sdc") pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE") pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE") vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE") vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE") volume_name = params.get("volume_name", "imagefrommapper.qcow2") volume_capacity = params.get("volume_capacity", '1G') allocation = params.get("allocation", '1G') vol_format = params.get("volume_format", 'raw') attach_method = params.get("attach_method", "hot") test_unit = None mount_disk = None pool_kwargs = {} pool_extra_args = "" emulated_image = "emulated-image" disk_xml = "" new_vhbas = [] source_dev = "" mpath_vol_path = "" old_mpath_conf = "" mpath_conf_path = "/etc/multipath.conf" original_mpath_conf_exist = os.path.exists(mpath_conf_path) if pool_type == "scsi": if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for npiv scsi pool.") if pool_type == "logical": if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for vhba.") online_hbas_list = utils_npiv.find_hbas("hba") logging.debug("The online hbas are: %s", online_hbas_list) old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, replace_existing=True) if not online_hbas_list: raise exceptions.TestSkipError( "Host doesn't have online hba cards") old_vhbas = utils_npiv.find_hbas("vhba") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if not vm.is_alive(): vm.start() libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) if pool_type == "scsi": if define_pool == "yes": if pool_adapter_parent == "": pool_adapter_parent = online_hbas_list[0] pool_kwargs = {'source_path': source_path, 'source_name': source_name, 'source_format': source_format, 'pool_adapter_type': pool_adapter_type, 'pool_adapter_parent': pool_adapter_parent, 'pool_wwnn': pool_wwnn, 'pool_wwpn': pool_wwpn} elif pool_type == "logical": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME*5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) source_dev = "/dev/mapper/" + new_mpath_devs[0] logging.debug("We are going to use \"%s\" as our source device" " to create a logical pool", source_dev) try: cmd = "parted %s mklabel msdos -s" % source_dev cmd_result = process.run(cmd, shell=True) except Exception as e: raise exceptions.TestError("Error occurred when parted mklable") if define_pool_as == "yes": pool_extra_args = "" if source_dev: pool_extra_args = ' --source-dev %s' % source_dev elif pool_type == "mpath": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME*2) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0] try: cmd = "parted %s mklabel msdos -s" % mpath_vol_path cmd_result = process.run(cmd, shell=True) except Exception as e: raise exceptions.TestError("Error occurred when parted mklable") if pre_def_pool == "yes": try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), _DELAY_TIME*2) virsh.pool_dumpxml(pool_name, to_file=pool_xml_f) virsh.pool_destroy(pool_name) except Exception as e: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) raise exceptions.TestError( "Error occurred when prepare pool xml:\n %s" % e) if os.path.exists(pool_xml_f): with open(pool_xml_f, 'r') as f: logging.debug("Create pool from file: %s", f.read()) try: # define/create/start the pool if (pre_def_pool == "yes") and (define_pool == "yes"): pool_define_status = virsh.pool_define(pool_xml_f, ignore_status=True, debug=True) utlv.check_exit_status(pool_define_status) if define_pool_as == "yes": pool_define_as_status = virsh.pool_define_as( pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True, debug=True ) utlv.check_exit_status(pool_define_as_status) if pool_create_as == "yes": if pool_type != "scsi": raise exceptions.TestSkipError("pool-create-as only needs to " "be covered by scsi pool for " "NPIV test.") cmd = "virsh pool-create-as %s %s \ --adapter-wwnn %s --adapter-wwpn %s \ --adapter-parent %s --target %s"\ % (pool_name, pool_type, pool_wwnn, pool_wwpn, online_hbas_list[0], pool_target) cmd_status = process.system(cmd, verbose=True) if cmd_status: raise exceptions.TestFail("pool-create-as scsi pool failed.") if need_pool_build == "yes": pool_build_status = virsh.pool_build(pool_name, "--overwrite") utlv.check_exit_status(pool_build_status) pool_ins = libvirt_storage.StoragePool() if not pool_ins.pool_exists(pool_name): raise exceptions.TestFail("define or create pool failed.") else: if not pool_ins.is_pool_active(pool_name): pool_start_status = virsh.pool_start(pool_name) utlv.check_exit_status(pool_start_status) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) # create vol if required if need_vol_create == "yes": vol_create_as_status = virsh.vol_create_as( volume_name, pool_name, volume_capacity, allocation, vol_format, "", debug=True ) utlv.check_exit_status(vol_create_as_status) virsh.pool_refresh(pool_name) vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_DELAY_TIME*3) logging.debug('Volume list is: %s' % vol_list) # use test_unit to save the first vol in pool if pool_type == "mpath": cmd = "virsh vol-list %s | grep \"%s\" |\ awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path) cmd_result = process.run(cmd, shell=True) status = cmd_result.exit_status output = cmd_result.stdout_text.strip() if cmd_result.exit_status: raise exceptions.TestFail("vol-list pool %s failed", pool_name) if not output: raise exceptions.TestFail("Newly added mpath dev not in pool.") test_unit = output logging.info( "Using %s to attach to a guest", test_unit) else: test_unit = list(vol_list.keys())[0] logging.info( "Using the first volume %s to attach to a guest", test_unit) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) session = vm.wait_for_login() output = session.cmd_status_output('lsblk') logging.debug("%s", output[1]) old_count = vmxml.get_disk_count(vm_name) bf_disks = libvirt_vm.get_disks() # prepare disk xml which will be hot/cold attached to vm disk_params = {'type_name': 'volume', 'target_dev': target_device, 'target_bus': 'virtio', 'source_pool': pool_name, 'source_volume': test_unit, 'driver_type': vol_format} disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml') lun_disk_xml = utlv.create_disk_xml(disk_params) copyfile(lun_disk_xml, disk_xml) disk_xml_str = open(lun_disk_xml).read() logging.debug("The disk xml is: %s", disk_xml_str) # hot attach disk xml to vm if attach_method == "hot": copyfile(lun_disk_xml, disk_xml) dev_attach_status = virsh.attach_device(vm_name, disk_xml, debug=True) # Pool/vol virtual disk is not supported by mpath pool yet. if dev_attach_status.exit_status and pool_type == "mpath": raise exceptions.TestSkipError("mpath pool vol is not " "supported in virtual disk yet," "the error message is: %s", dev_attach_status.stderr) session.close() utlv.check_exit_status(dev_attach_status) # cold attach disk xml to vm elif attach_method == "cold": if vm.is_alive(): vm.destroy(gracefully=False) new_disk = disk.Disk() new_disk.xml = disk_xml_str vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml.devices = vmxml.devices.append(new_disk) vmxml.sync() logging.debug(vmxml) try: vm.start() except virt_vm.VMStartError as e: logging.debug(e) if pool_type == "mpath": raise exceptions.TestSkipError("'mpath' pools for backing " "'volume' disks isn't " "supported for now") else: raise exceptions.TestFail("Failed to start vm") session = vm.wait_for_login() else: pass # checking attached disk in vm logging.info("Checking disk availability in domain") if not vmxml.get_disk_count(vm_name): raise exceptions.TestFail("No disk in domain %s." % vm_name) new_count = vmxml.get_disk_count(vm_name) if new_count <= old_count: raise exceptions.TestFail( "Failed to attach disk %s" % lun_disk_xml) logging.debug("Disks before attach: %s", bf_disks) af_disks = libvirt_vm.get_disks() logging.debug("Disks after attach: %s", af_disks) mount_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not mount_disk: raise exceptions.TestFail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", mount_disk) logging.debug("Creating file system for %s", mount_disk) output = session.cmd_status_output( 'echo yes | mkfs.ext4 %s' % mount_disk) logging.debug("%s", output[1]) if mount_disk: mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Mount failed") else: raise exceptions.TestFail("Partition not available for disk") logging.debug("Unmounting disk") session.cmd_status_output('umount %s' % mount_disk) output = session.cmd_status_output('mount') logging.debug("%s", output[1]) mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Mount failed") logging.debug("Unmounting disk") session.cmd_status_output('umount %s' % mount_disk) session.close() # detach disk from vm dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(dev_detach_status) finally: vm.destroy(gracefully=False) vmxml_backup.sync() logging.debug('Destroying pool %s', pool_name) virsh.pool_destroy(pool_name) logging.debug('Undefining pool %s', pool_name) virsh.pool_undefine(pool_name) if os.path.exists(pool_xml_f): os.remove(pool_xml_f) if os.path.exists(disk_xml): data_dir.clean_tmp_files() logging.debug("Cleanup disk xml") if pre_def_pool == "yes": # Do not apply cleanup_pool for logical pool, logical pool will # be cleaned below pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) if (test_unit and (need_vol_create == "yes" and (pre_def_pool == "no")) and (pool_type == "logical")): process.system('lvremove -f %s/%s' % (pool_name, test_unit), verbose=True) process.system('vgremove -f %s' % pool_name, verbose=True) process.system('pvremove -f %s' % source_dev, verbose=True) if new_vhbas: utils_npiv.vhbas_cleanup(new_vhbas) # Restart multipathd, this is to avoid bz1399075 if source_dev: utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(source_dev), _DELAY_TIME*5, 0.0, 5.0) elif mpath_vol_path: utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(mpath_vol_path), _DELAY_TIME*5, 0.0, 5.0) else: utils_npiv.restart_multipathd() if old_mpath_conf: utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, conf_content=old_mpath_conf, replace_existing=True) if not original_mpath_conf_exist and os.path.exists(mpath_conf_path): os.remove(mpath_conf_path)
def run(test, params, env): """ Test the virsh pool commands with acl, initiate a pool then do following operations. (1) Undefine a given type pool (2) Define the pool from xml (3) Build given type pool (4) Start pool (5) Destroy pool (6) Refresh pool after start it (7) Run vol-list with the pool (9) Delete pool For negative cases, redo failed step to make the case run continue. Run cleanup at last restore env. """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name vol_path = os.path.join(pool_target, vol_name) define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") start_acl = "yes" == params.get("start_acl", "no") destroy_acl = "yes" == params.get("destroy_acl", "no") build_acl = "yes" == params.get("build_acl", "no") delete_acl = "yes" == params.get("delete_acl", "no") refresh_acl = "yes" == params.get("refresh_acl", "no") vol_list_acl = "yes" == params.get("vol_list_acl", "no") list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no") src_pool_error = "yes" == params.get("src_pool_error", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") start_error = "yes" == params.get("start_error", "no") destroy_error = "yes" == params.get("destroy_error", "no") build_error = "yes" == params.get("build_error", "no") delete_error = "yes" == params.get("delete_error", "no") refresh_error = "yes" == params.get("refresh_error", "no") vol_list_error = "yes" == params.get("vol_list_error", "no") # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster cleanup_env = [False, False, False, "", False] # libvirt acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True} def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable if list_dumpxml_acl: result = virsh.pool_list(option, **acl_dargs) else: result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) # Run Testcase kwargs = {'source_format': params.get('pool_source_format', 'ext4')} try: _pool = libvirt_storage.StoragePool() # Init a pool for test result = utlv.define_pool(pool_name, pool_type, pool_target, cleanup_env, **kwargs) utlv.check_exit_status(result, src_pool_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if list_dumpxml_acl: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs) else: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (1) # Undefine pool if undefine_acl: result = virsh.pool_undefine(pool_name, **acl_dargs) else: result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, undefine_error) if undefine_error: check_pool_list(pool_name, "--all", False) # Redo under negative case to keep case continue result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) else: check_pool_list(pool_name, "--all", True) # Step (2) # Define pool from XML file if define_acl: result = virsh.pool_define(pool_xml, **acl_dargs) else: result = virsh.pool_define(pool_xml) utlv.check_exit_status(result, define_error) if define_error: # Redo under negative case to keep case continue result = virsh.pool_define(pool_xml) utlv.check_exit_status(result) # Step (3) # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if pool_type != "logical": option = '' if pool_type in ['disk', 'fs']: option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) if build_acl: result = virsh.pool_build(pool_name, option, **acl_dargs) else: result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result, build_error) if build_error: # Redo under negative case to keep case continue result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) # For iSCSI pool, we need discover targets before start the pool if pool_type == 'iscsi': cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1' process.run(cmd, shell=True) # Step (4) # Pool start if start_acl: result = virsh.pool_start(pool_name, **acl_dargs) else: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, start_error) if start_error: # Redo under negative case to keep case continue result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (5) # Pool destroy if destroy_acl: result = virsh.pool_destroy(pool_name, **acl_dargs) else: result = virsh.pool_destroy(pool_name) if result: if destroy_error: raise error.TestFail("Expect fail, but run successfully.") else: if not destroy_error: raise error.TestFail("Pool %s destroy failed, not expected." % pool_name) else: # Redo under negative case to keep case continue if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (6) # Pool refresh for 'dir' type pool # Pool start result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) if pool_type == "dir": os.mknod(vol_path) if refresh_acl: result = virsh.pool_refresh(pool_name, **acl_dargs) else: result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result, refresh_error) # Step (7) # Pool vol-list if vol_list_acl: result = virsh.vol_list(pool_name, **acl_dargs) else: result = virsh.vol_list(pool_name) utlv.check_exit_status(result, vol_list_error) # Step (8) # Pool delete for 'dir' type pool if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) if delete_acl: result = virsh.pool_delete(pool_name, **acl_dargs) else: result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result, delete_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if not delete_error: if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.system_output(cmd, shell=True) lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name, shell=True) if cleanup_env[1]: utlv.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utlv.setup_or_cleanup_nfs( False, restore_selinux=cleanup_env[3])
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Destroy the pool (21) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (22) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm cleanup_env = [False, False, False] def check_exit_status(result, expect_error=False): """ Check the exit status of virsh commands. :param result: Virsh command result object :param expect_error: Boolean value, expect command success or fail """ if not expect_error: if result.exit_status != 0: raise error.TestFail(result.stderr) else: logging.debug("Command output:\n%s", result.stdout.strip()) elif expect_error and result.exit_status == 0: raise error.TestFail("Expect fail, but run successfully.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if vol_name in item[0]: found = True break if found: logging.debug( "Find volume '%s' in pool '%s'.", vol_name, pool_name) else: raise error.TestFail( "Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: raise error.TestFail("Pool info dictionary is needed.") if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: raise error.TestFail("Pool '%s' isn't '%s'." % (check_point, value)) # Run Testcase try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define result = utils_test.libvirt.define_pool(pool_name, pool_type, pool_target, cleanup_env) check_exit_status(result, status_error) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (4) # Undefine pool result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(pool_xml) check_exit_status(result, status_error) # Step (6) # Buid pool, this step may fail for 'disk' and 'logical' types pool if pool_type not in ["disk", "logical"]: option = "" # Options --overwrite and --no-overwrite can only be used to # build a filesystem pool, but it will fail for now # if pool_type == "fs": # option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # If the filesystem cntaining the directory is mounted, then the # directory will show as running, which means the local 'dir' pool # don't need start after restart libvirtd if pool_type != "dir": result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) check_exit_status(result) check_vol_list(vol_name, pool_name) # Step(20) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (21) # Pool delete for 'dir' type pool if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) result = virsh.pool_delete(pool_name, ignore_status=True) check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result, True) # Step (22) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = utils.system_output(cmd) lv_utils.vg_remove(vg_name) utils.run("pvremove %s" % pv_name) if cleanup_env[1]: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utils_test.libvirt.setup_or_cleanup_nfs(False)
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_allocation = params.get("vol_allocation") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") b_luks_encrypted = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] wipe_old_vol = False with_clusterSize = "yes" == params.get("with_clusterSize") vol_clusterSize = params.get("vol_clusterSize", "64") vol_clusterSize_unit = params.get("vol_clusterSize_unit") libvirt_version.is_libvirt_feature_supported(params) if virsh.has_command_help_match("vol-clone", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: test.cancel("Option --prealloc-metadata " "is not supported.") clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.cancel("API acl test not supported in current" " libvirt version.") # Using algorithms other than zero need scrub installed. try: utils_path.find_command('scrub') except utils_path.CmdNotFoundError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = [ "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random" ] # Choose an algorithm randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) libvirt_pvt = utlv.PoolVolumeTest(test, params) libvirt_pool = libvirt_storage.StoragePool() if libvirt_pool.pool_exists(pool_name): test.error("Pool '%s' already exist" % pool_name) try: # Create a new pool disk_vol = [] if pool_type == 'disk': disk_vol.append(params.get("pre_vol", '10M')) libvirt_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol) libvirt_vol = libvirt_storage.PoolVolume(pool_name) # Create a new volume if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']: if (b_luks_encrypted and vol_format in ['raw', 'qcow2']): if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS is not supported in current" " libvirt version") if vol_format == "qcow2" and not libvirt_version.version_compare( 6, 10, 0): test.cancel("Qcow2 format with luks encryption is not" " supported in current libvirt version") luks_sec_uuid = create_luks_secret( os.path.join(pool_target, vol_name), encryption_password, test) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(vol_capability) vol_arg['allocation'] = int(vol_allocation) vol_arg['format'] = vol_format if with_clusterSize: vol_arg['clusterSize'] = int(vol_clusterSize) vol_arg['clusterSize_unit'] = vol_clusterSize_unit create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: libvirt_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name) elif vol_format == 'partition': vol_name = list(utlv.get_vol_list(pool_name).keys())[0] logging.debug("Find partition %s in disk pool", vol_name) elif vol_format == 'sparse': # Create a sparse file in pool sparse_file = pool_target + '/' + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability process.run(cmd, ignore_status=True, shell=True) else: test.error("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name, debug=True) vol_info = libvirt_vol.volume_info(vol_name) if not vol_info: test.error("Fail to get info of volume %s" % vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count( "prealloc-metadata"): clone_status_error = True if b_luks_encrypted: wipe_old_vol = True if pool_type == "disk": new_vol_name = utlv.new_disk_vol_name(pool_name) if new_vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name utlv.update_polkit_rule(params, vol_pat, new_value) bad_cloned_vol_name = params.get("bad_cloned_vol_name", "") if bad_cloned_vol_name: new_vol_name = bad_cloned_vol_name # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: test.fail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libvirt_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = [ "Unsupported algorithm", "no such pattern sequence" ] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: test.fail("Expect clone volume fail, but run" " successfully.") if wipe_old_vol: # Wipe the old volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = [ "Unsupported algorithm", "no such pattern sequence" ] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") if bad_cloned_vol_name: pattern = "volume name '%s' cannot contain '/'" % new_vol_name if re.search(pattern, clone_result.stderr) is None: test.fail("vol-clone failed with unexpected reason") finally: # Clean up try: libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) except exceptions.TestFail as detail: logging.error(str(detail))
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_allocation = params.get("vol_allocation") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") b_luks_encrypted = "luks" == params.get("encryption_method") encryption_password = params.get("encryption_password", "redhat") secret_uuids = [] wipe_old_vol = False if virsh.has_command_help_match("vol-clone", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: test.cancel("Option --prealloc-metadata " "is not supported.") clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: test.cancel("API acl test not supported in current" " libvirt version.") # Using algorithms other than zero need scrub installed. try: utils_path.find_command('scrub') except utils_path.CmdNotFoundError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = ["zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random"] # Choose an algorithm randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) libvirt_pvt = utlv.PoolVolumeTest(test, params) libvirt_pool = libvirt_storage.StoragePool() if libvirt_pool.pool_exists(pool_name): test.error("Pool '%s' already exist" % pool_name) try: # Create a new pool disk_vol = [] if pool_type == 'disk': disk_vol.append(params.get("pre_vol", '10M')) libvirt_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol) libvirt_vol = libvirt_storage.PoolVolume(pool_name) # Create a new volume if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']: if (b_luks_encrypted and vol_format in ['raw']): if not libvirt_version.version_compare(2, 0, 0): test.cancel("LUKS is not supported in current" " libvirt version") luks_sec_uuid = create_luks_secret(os.path.join(pool_target, vol_name), encryption_password, test) secret_uuids.append(luks_sec_uuid) vol_arg = {} vol_arg['name'] = vol_name vol_arg['capacity'] = int(vol_capability) vol_arg['allocation'] = int(vol_allocation) create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg) else: libvirt_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name) elif vol_format == 'partition': vol_name = list(utlv.get_vol_list(pool_name).keys())[0] logging.debug("Find partition %s in disk pool", vol_name) elif vol_format == 'sparse': # Create a sparse file in pool sparse_file = pool_target + '/' + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability process.run(cmd, ignore_status=True, shell=True) else: test.error("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name, debug=True) vol_info = libvirt_vol.volume_info(vol_name) if not vol_info: test.error("Fail to get info of volume %s" % vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count("prealloc-metadata"): clone_status_error = True if b_luks_encrypted: wipe_old_vol = True if pool_type == "disk": new_vol_name = utlv.new_disk_vol_name(pool_name) if new_vol_name is None: test.error("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name utlv.update_polkit_rule(params, vol_pat, new_value) bad_cloned_vol_name = params.get("bad_cloned_vol_name", "") if bad_cloned_vol_name: new_vol_name = bad_cloned_vol_name # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: test.fail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libvirt_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = ["Unsupported algorithm", "no such pattern sequence"] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: test.fail("Expect clone volume fail, but run" " successfully.") if wipe_old_vol: # Wipe the old volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = ["Unsupported algorithm", "no such pattern sequence"] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): test.cancel(wipe_result.stderr) test.fail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libvirt_vol.volume_info(vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': test.fail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: test.fail("Expect wipe volume fail, but run" " successfully.") if bad_cloned_vol_name: pattern = "volume name '%s' cannot contain '/'" % new_vol_name if re.search(pattern, clone_result.stderr) is None: test.fail("vol-clone failed with unexpected reason") finally: # Clean up try: libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) except exceptions.TestFail as detail: logging.error(str(detail))
def run(test, params, env): """ Test virsh vol-create command to cover the following matrix: pool_type = [dir, fs, netfs] volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed, vmdk, vpc] pool_type = [disk] volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm, linux-raid, extended] pool_type = [logical] volume_format = [none] pool_type = [iscsi, scsi] Not supported with format type TODO: pool_type = [rbd, glusterfs] Reference: http://www.libvirt.org/storage.html """ src_pool_type = params.get("src_pool_type") src_pool_target = params.get("src_pool_target") src_pool_format = params.get("src_pool_format", "") pool_vol_num = int(params.get("src_pool_vol_num", '1')) src_emulated_image = params.get("src_emulated_image") extra_option = params.get("extra_option", "") prefix_vol_name = params.get("vol_name", "vol_create_test") vol_format = params.get("vol_format", "raw") vol_capacity = params.get("vol_capacity", 1048576) vol_allocation = params.get("vol_allocation", 1048576) image_size = params.get("emulate_image_size", "1G") lazy_refcounts = "yes" == params.get("lazy_refcounts") status_error = "yes" == params.get("status_error", "no") by_xml = "yes" == params.get("create_vol_by_xml", "yes") incomplete_target = "yes" == params.get("incomplete_target", "no") if not libvirt_version.version_compare(1, 0, 0): if "--prealloc-metadata" in extra_option: raise error.TestNAError("metadata preallocation not supported in" " current libvirt version.") if incomplete_target: raise error.TestNAError("It does not support generate target " "path in thi libvirt version.") pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi'] if src_pool_type not in pool_type: raise error.TestNAError("pool type %s not in supported type list: %s" % (src_pool_type, pool_type)) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts def post_process_vol(ori_vol_path): """ Create or disactive a volume without libvirt :param ori_vol_path: Full path of an original volume :retur: Volume name for checking """ process_vol_name = params.get("process_vol_name", "process_vol") process_vol_options = params.get("process_vol_options", "") process_vol_capacity = params.get("process_vol_capacity", vol_capacity) process_vol_cmd = "" unsupport_err = "Unsupport do '%s %s' in this test" % (process_vol_by, process_vol_type) if process_vol_by == "lvcreate": process_vol_cmd = "lvcreate -L %s " % process_vol_capacity if process_vol_type == "thin": if not process_vol_options: process_vol_options = "-T " process_vol_cmd += "%s " % process_vol_options processthin_pool_name = params.get("processthin_pool_name", "thinpool") processthin_vol_name = params.get("processthin_vol_name", "thinvol") process_vol_capacity = params.get("process_vol_capacity", "1G") os.path.dirname(ori_vol_path) process_vol_cmd += "%s/%s " % (os.path.dirname(ori_vol_path), processthin_pool_name) process_vol_cmd += "-V %s " % process_vol_capacity process_vol_cmd += "-n %s " % processthin_vol_name process_vol_name = processthin_vol_name elif process_vol_type == "snapshot": if not process_vol_options: process_vol_options = "-s " process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "-n %s " % process_vol_name process_vol_cmd += "%s " % (ori_vol_path) else: logging.error(unsupport_err) return elif process_vol_by == "qemu-img" and process_vol_type == "create": process_vol_cmd = "qemu-img create " process_vol_path = os.path.dirname(ori_vol_path) + "/" process_vol_path += process_vol_name process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "%s " % process_vol_path process_vol_cmd += "%s " % process_vol_capacity elif process_vol_by == "lvchange" and process_vol_type == "deactivate": process_vol_cmd = "lvchange %s " % ori_vol_path if not process_vol_options: process_vol_options = "-an" process_vol_cmd += process_vol_options else: logging.error(unsupport_err) return rst = process.run(process_vol_cmd, ignore_status=True, shell=True) if rst.exit_status: if "Snapshots of snapshots are not supported" in rst.stderr: logging.debug("%s is already a snapshot volume", ori_vol_path) process_vol_name = os.path.basename(ori_vol_path) else: logging.error(rst.stderr) return return process_vol_name def check_vol(pool_name, vol_name, expect_exist=True): """ Check volume vol_name in pool pool_name """ src_volumes = src_pv.list_volumes().keys() logging.debug("Current volumes in %s: %s", pool_name, src_volumes) if expect_exist: if vol_name not in src_volumes: raise error.TestFail("Can't find volume %s in pool %s" % (vol_name, pool_name)) # check format in volume xml post_xml = volxml.new_from_vol_dumpxml(vol_name, pool_name) logging.debug("Volume %s XML: %s" % (vol_name, post_xml.xmltreefile)) if 'format' in post_xml.keys() and vol_format is not None: if post_xml.format != vol_format: raise error.TestFail("Volume format %s is not expected" % vol_format + " as defined.") else: if vol_name in src_volumes: raise error.TestFail("Find volume %s in pool %s, but expect not" % (vol_name, pool_name)) fmt_err0 = "Unknown file format '%s'" % vol_format fmt_err1 = "Formatting or formatting option not " fmt_err1 += "supported for file format '%s'" % vol_format fmt_err2 = "Driver '%s' does not support " % vol_format fmt_err2 += "image creation" fmt_err_list = [fmt_err0, fmt_err1, fmt_err2] skip_msg = "Volume format '%s' is not supported by qemu-img" % vol_format vol_path_list = [] try: # Create the src pool src_pool_name = "virt-%s-pool" % src_pool_type pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(src_pool_name, src_pool_type, src_pool_target, src_emulated_image, image_size=image_size, source_format=src_pool_format) src_pv = libvirt_storage.PoolVolume(src_pool_name) # Print current pools for debugging logging.debug("Current pools:%s", libvirt_storage.StoragePool().list_pools()) # Create volumes by virsh in a loop while pool_vol_num > 0: # Set volume xml file vol_name = prefix_vol_name + "_%s" % pool_vol_num pool_vol_num -= 1 if by_xml: # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if src_pool_type == "disk": vol_name = utlv.new_disk_vol_name(src_pool_name) if vol_name is None: raise error.TestError("Fail to generate volume name") vol_arg['name'] = vol_name volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) vol_xml = newvol['xml'] if params.get('setup_libvirt_polkit') == 'yes': process.run("chmod 666 %s" % vol_xml, ignore_status=True, shell=True) # Run virsh_vol_create to create vol logging.debug("Create volume from XML: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create( src_pool_name, vol_xml, extra_option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) else: # Run virsh_vol_create_as to create_vol cmd_result = virsh.vol_create_as( vol_name, src_pool_name, vol_capacity, vol_allocation, vol_format, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) # Check result try: utlv.check_exit_status(cmd_result, status_error) check_vol(src_pool_name, vol_name, not status_error) if not status_error: vol_path = virsh.vol_path(vol_name, src_pool_name).stdout.strip() logging.debug("Full path of %s: %s", vol_name, vol_path) vol_path_list.append(vol_path) except error.TestFail, e: stderr = cmd_result.stderr if any(err in stderr for err in fmt_err_list): raise error.TestNAError(skip_msg) else: raise e # Post process vol by other programs process_vol_by = params.get("process_vol_by") process_vol_type = params.get("process_vol_type", "") expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes") if process_vol_by and vol_path_list: process_vol = post_process_vol(vol_path_list[0]) if process_vol is not None: try: virsh.pool_refresh(src_pool_name, ignore_status=False) check_vol(src_pool_name, process_vol, expect_vol_exist) except (process.CmdError, error.TestFail), e: if process_vol_type == "thin": logging.error(e) raise error.TestNAError("You may encounter bug BZ#1060287") else: raise e else: raise error.TestFail("Post process volume failed")
def run(test, params, env): """ Test the virsh pool commands with acl, initiate a pool then do following operations. (1) Undefine a given type pool (2) Define the pool from xml (3) Build given type pool (4) Start pool (5) Destroy pool (6) Refresh pool after start it (7) Run vol-list with the pool (9) Delete pool For negative cases, redo failed step to make the case run continue. Run cleanup at last restore env. """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name vol_path = os.path.join(pool_target, vol_name) define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") start_acl = "yes" == params.get("start_acl", "no") destroy_acl = "yes" == params.get("destroy_acl", "no") build_acl = "yes" == params.get("build_acl", "no") delete_acl = "yes" == params.get("delete_acl", "no") refresh_acl = "yes" == params.get("refresh_acl", "no") vol_list_acl = "yes" == params.get("vol_list_acl", "no") list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no") src_pool_error = "yes" == params.get("src_pool_error", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") start_error = "yes" == params.get("start_error", "no") destroy_error = "yes" == params.get("destroy_error", "no") build_error = "yes" == params.get("build_error", "no") delete_error = "yes" == params.get("delete_error", "no") refresh_error = "yes" == params.get("refresh_error", "no") vol_list_error = "yes" == params.get("vol_list_error", "no") # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm # cleanup_env[3] for selinux backup status, cleanup_env[4] for gluster cleanup_env = [False, False, False, "", False] # libvirt acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") acl_dargs = { 'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True } def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable if list_dumpxml_acl: result = virsh.pool_list(option, **acl_dargs) else: result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) # Run Testcase kwargs = {'source_format': params.get('pool_source_format', 'ext4')} try: _pool = libvirt_storage.StoragePool() # Init a pool for test result = utlv.define_pool(pool_name, pool_type, pool_target, cleanup_env, **kwargs) utlv.check_exit_status(result, src_pool_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if list_dumpxml_acl: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml, **acl_dargs) else: xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (1) # Undefine pool if undefine_acl: result = virsh.pool_undefine(pool_name, **acl_dargs) else: result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, undefine_error) if undefine_error: check_pool_list(pool_name, "--all", False) # Redo under negative case to keep case continue result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) else: check_pool_list(pool_name, "--all", True) # Step (2) # Define pool from XML file if define_acl: result = virsh.pool_define(pool_xml, **acl_dargs) else: result = virsh.pool_define(pool_xml) utlv.check_exit_status(result, define_error) if define_error: # Redo under negative case to keep case continue result = virsh.pool_define(pool_xml) utlv.check_exit_status(result) # Step (3) # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if pool_type != "logical": option = '' if pool_type in ['disk', 'fs']: option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) if build_acl: result = virsh.pool_build(pool_name, option, **acl_dargs) else: result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result, build_error) if build_error: # Redo under negative case to keep case continue result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) # For iSCSI pool, we need discover targets before start the pool if pool_type == 'iscsi': cmd = 'iscsiadm -m discovery -t sendtargets -p 127.0.0.1' process.run(cmd, shell=True) # Step (4) # Pool start if start_acl: result = virsh.pool_start(pool_name, **acl_dargs) else: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, start_error) if start_error: # Redo under negative case to keep case continue result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (5) # Pool destroy if destroy_acl: result = virsh.pool_destroy(pool_name, **acl_dargs) else: result = virsh.pool_destroy(pool_name) if result: if destroy_error: raise error.TestFail("Expect fail, but run successfully.") else: if not destroy_error: raise error.TestFail("Pool %s destroy failed, not expected." % pool_name) else: # Redo under negative case to keep case continue if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (6) # Pool refresh for 'dir' type pool # Pool start result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) if pool_type == "dir": os.mknod(vol_path) if refresh_acl: result = virsh.pool_refresh(pool_name, **acl_dargs) else: result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result, refresh_error) # Step (7) # Pool vol-list if vol_list_acl: result = virsh.vol_list(pool_name, **acl_dargs) else: result = virsh.vol_list(pool_name) utlv.check_exit_status(result, vol_list_error) # Step (8) # Pool delete for 'dir' type pool if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) if delete_acl: result = virsh.pool_delete(pool_name, **acl_dargs) else: result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result, delete_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if not delete_error: if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = process.system_output(cmd, shell=True) lv_utils.vg_remove(vg_name) process.run("pvremove %s" % pv_name, shell=True) if cleanup_env[1]: utlv.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utlv.setup_or_cleanup_nfs(False, restore_selinux=cleanup_env[3])
def run(test, params, env): """ 1. Create a pool 2. Create n number of volumes(vol-create-as) 3. Check the volume details from the following commands vol-info vol-key vol-list vol-name vol-path vol-pool qemu-img info 4. Delete the volume and check in vol-list 5. Repeat the steps for number of volumes given 6. Delete the pool and target TODO: Handle negative testcases """ def delete_volume(expected_vol): """ Deletes Volume """ pool_name = expected_vol['pool_name'] vol_name = expected_vol['name'] pv = libvirt_storage.PoolVolume(pool_name) if not pv.delete_volume(vol_name): raise error.TestFail("Delete volume failed." % vol_name) else: logging.debug("Volume: %s successfully deleted on pool: %s", vol_name, pool_name) def get_vol_list(pool_name, vol_name): """ Parse the volume list """ output = virsh.vol_list(pool_name, "--details") rg = re.compile( r'^(\S+)\s+(\S+)\s+(\S+)\s+(\d+.\d+\s\S+)\s+(\d+.\d+.*)') vol = {} vols = [] volume_detail = None for line in output.stdout.splitlines(): match = re.search(rg, line.lstrip()) if match is not None: vol['name'] = match.group(1) vol['path'] = match.group(2) vol['type'] = match.group(3) vol['capacity'] = match.group(4) vol['allocation'] = match.group(5) vols.append(vol) vol = {} for volume in vols: if volume['name'] == vol_name: volume_detail = volume return volume_detail def norm_capacity(capacity): """ Normalize the capacity values to bytes """ # Normaize all values to bytes norm_capacity = {} des = {'B': 'B', 'bytes': 'B', 'b': 'B', 'kib': 'K', 'KiB': 'K', 'K': 'K', 'k': 'K', 'KB': 'K', 'mib': 'M', 'MiB': 'M', 'M': 'M', 'm': 'M', 'MB': 'M', 'gib': 'G', 'GiB': 'G', 'G': 'G', 'g': 'G', 'GB': 'G', 'Gb': 'G', 'tib': 'T', 'TiB': 'T', 'TB': 'T', 'T': 'T', 't': 'T' } val = {'B': 1, 'K': 1024, 'M': 1048576, 'G': 1073741824, 'T': 1099511627776 } reg_list = re.compile(r'(\S+)\s(\S+)') match_list = re.search(reg_list, capacity['list']) if match_list is not None: mem_value = float(match_list.group(1)) norm = val[des[match_list.group(2)]] norm_capacity['list'] = int(mem_value * norm) else: raise error.TestFail("Error in parsing capacity value in" " virsh vol-list") match_info = re.search(reg_list, capacity['info']) if match_info is not None: mem_value = float(match_info.group(1)) norm = val[des[match_list.group(2)]] norm_capacity['info'] = int(mem_value * norm) else: raise error.TestFail("Error in parsing capacity value " "in virsh vol-info") norm_capacity['qemu_img'] = capacity['qemu_img'] norm_capacity['xml'] = int(capacity['xml']) return norm_capacity def check_vol(expected, avail=True): """ Checks the expected volume details with actual volume details from vol-dumpxml vol-list vol-info vol-key vol-path qemu-img info """ error_count = 0 volume_xml = {} pv = libvirt_storage.PoolVolume(expected['pool_name']) pool_exists = pv.volume_exists(expected['name']) if pool_exists: if not avail: error_count += 1 logging.error("Expect volume %s not exists but find it", expected['name']) return error_count else: if avail: error_count += 1 logging.error("Expect volume %s exists but not find it", expected['name']) return error_count else: logging.info("Volume %s checked successfully for deletion", expected['name']) return error_count actual_list = get_vol_list(expected['pool_name'], expected['name']) actual_info = pv.volume_info(expected['name']) # Get values from vol-dumpxml volume_xml = vol_xml.VolXML.get_vol_details_by_name(expected['name'], expected['pool_name']) # Check against virsh vol-key vol_key = virsh.vol_key(expected['name'], expected['pool_name']) if vol_key.stdout.strip() != volume_xml['key']: logging.error("Volume key is mismatch \n%s" "Key from xml: %s\nKey from command: %s", expected['name'], volume_xml['key'], vol_key) error_count += 1 else: logging.debug("virsh vol-key for volume: %s successfully" " checked against vol-dumpxml", expected['name']) # Check against virsh vol-name get_vol_name = virsh.vol_name(expected['path']) if get_vol_name.stdout.strip() != expected['name']: logging.error("Volume name mismatch\n" "Expected name: %s\nOutput of vol-name: %s", expected['name'], get_vol_name) # Check against virsh vol-path vol_path = virsh.vol_path(expected['name'], expected['pool_name']) if expected['path'] != vol_path.stdout.strip(): logging.error("Volume path mismatch for volume: %s\n" "Expected path: %s\nOutput of vol-path: %s\n", expected['name'], expected['path'], vol_path) error_count += 1 else: logging.debug("virsh vol-path for volume: %s successfully checked" " against created volume path", expected['name']) # Check path against virsh vol-list if expected['path'] != actual_list['path']: logging.error("Volume path mismatch for volume:%s\n" "Expected Path: %s\nPath from virsh vol-list: %s", expected['name'], expected['path'], actual_list['path']) error_count += 1 else: logging.debug("Path of volume: %s from virsh vol-list " "successfully checked against created " "volume path", expected['name']) # Check path against virsh vol-dumpxml if expected['path'] != volume_xml['path']: logging.error("Volume path mismatch for volume: %s\n" "Expected Path: %s\nPath from virsh vol-dumpxml: %s", expected['name'], expected['path'], volume_xml['path']) error_count += 1 else: logging.debug("Path of volume: %s from virsh vol-dumpxml " "successfully checked against created volume path", expected['name']) # Check type against virsh vol-list if expected['type'] != actual_list['type']: logging.error("Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-list: %s", expected['name'], expected['type'], actual_list['type']) error_count += 1 else: logging.debug("Type of volume: %s from virsh vol-list " "successfully checked against the created " "volume type", expected['name']) # Check type against virsh vol-info if expected['type'] != actual_info['Type']: logging.error("Volume type mismatch for volume: %s\n" "Expected Type: %s\n Type from vol-info: %s", expected['name'], expected['type'], actual_info['Type']) error_count += 1 else: logging.debug("Type of volume: %s from virsh vol-info successfully" " checked against the created volume type", expected['name']) # Check name against virsh vol-info if expected['name'] != actual_info['Name']: logging.error("Volume name mismatch for volume: %s\n" "Expected name: %s\n Name from vol-info: %s", expected['name'], expected['name'], actual_info['Name']) error_count += 1 else: logging.debug("Name of volume: %s from virsh vol-info successfully" " checked against the created volume name", expected['name']) # Check format from against qemu-img info img_info = utils_misc.get_image_info(expected['path']) if expected['format']: if expected['format'] != img_info['format']: logging.error("Volume format mismatch for volume: %s\n" "Expected format: %s\n" "Format from qemu-img info: %s", expected['name'], expected['format'], img_info['format']) error_count += 1 else: logging.debug("Format of volume: %s from qemu-img info " "checked successfully against the created " "volume format", expected['name']) # Check format against vol-dumpxml if expected['format']: if expected['format'] != volume_xml['format']: logging.error("Volume format mismatch for volume: %s\n" "Expected format: %s\n" "Format from vol-dumpxml: %s", expected['name'], expected['format'], volume_xml['format']) error_count += 1 else: logging.debug("Format of volume: %s from virsh vol-dumpxml " "checked successfully against the created" " volume format", expected['name']) # Check pool name against vol-pool vol_pool = virsh.vol_pool(expected['path']) if expected['pool_name'] != vol_pool.stdout.strip(): logging.error("Pool name mismatch for volume: %s against" "virsh vol-pool", expected['name']) error_count += 1 else: logging.debug("Pool name of volume: %s checked successfully" " against the virsh vol-pool", expected['name']) norm_cap = {} capacity = {} capacity['list'] = actual_list['capacity'] capacity['info'] = actual_info['Capacity'] capacity['xml'] = volume_xml['capacity'] capacity['qemu_img'] = img_info['vsize'] norm_cap = norm_capacity(capacity) delta_size = params.get('delta_size', "1024") if abs(expected['capacity'] - norm_cap['list']) > delta_size: logging.error("Capacity mismatch for volume: %s against virsh" " vol-list\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['list']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-list for volume %s", expected['name']) if abs(expected['capacity'] - norm_cap['info']) > delta_size: logging.error("Capacity mismatch for volume: %s against virsh" " vol-info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['info']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-info for volume %s", expected['name']) if abs(expected['capacity'] - norm_cap['xml']) > delta_size: logging.error("Capacity mismatch for volume: %s against virsh" " vol-dumpxml\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['xml']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " virsh vol-dumpxml for volume: %s", expected['name']) if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size: logging.error("Capacity mismatch for volume: %s against " "qemu-img info\nExpected: %s\nActual: %s", expected['name'], expected['capacity'], norm_cap['qemu_img']) error_count += 1 else: logging.debug("Capacity value checked successfully against" " qemu-img info for volume: %s", expected['name']) return error_count # Initialize the variables pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("volume_name") vol_number = int(params.get("number_of_volumes", "2")) capacity = params.get("volume_size", "1048576") allocation = params.get("volume_allocation", "1048576") vol_format = params.get("volume_format") source_name = params.get("gluster_source_name", "gluster-vol1") source_path = params.get("gluster_source_path", "/") emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") try: str_capa = utils_misc.normalize_data_size(capacity, "B") int_capa = int(str(str_capa).split('.')[0]) except ValueError: raise error.TestError("Translate size %s to 'B' failed" % capacity) try: str_capa = utils_misc.normalize_data_size(allocation, "B") int_allo = int(str(str_capa).split('.')[0]) except ValueError: raise error.TestError("Translate size %s to 'B' failed" % allocation) expected_vol = {} vol_type = 'file' if pool_type in ['disk', 'logical']: vol_type = 'block' if pool_type == 'gluster': vol_type = 'network' logging.debug("Debug:\npool_name:%s\npool_type:%s\npool_target:%s\n" "vol_name:%s\nvol_number:%s\ncapacity:%s\nallocation:%s\n" "vol_format:%s", pool_name, pool_type, pool_target, vol_name, vol_number, capacity, allocation, vol_format) libv_pvt = utlv.PoolVolumeTest(test, params) # Run Testcase total_err_count = 0 try: # Create a new pool libv_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, source_name=source_name, source_path=source_path) for i in range(vol_number): volume_name = "%s_%d" % (vol_name, i) expected_vol['pool_name'] = pool_name expected_vol['pool_type'] = pool_type expected_vol['pool_target'] = pool_target expected_vol['capacity'] = int_capa expected_vol['allocation'] = int_allo expected_vol['format'] = vol_format expected_vol['name'] = volume_name expected_vol['type'] = vol_type # Creates volume if pool_type != "gluster": expected_vol['path'] = pool_target + '/' + volume_name libv_pvt.pre_vol(volume_name, vol_format, capacity, allocation, pool_name) else: ip_addr = utlv.get_host_ipv4_addr() expected_vol['path'] = "gluster://%s/%s/%s" % (ip_addr, source_name, volume_name) utils.run("qemu-img create -f %s %s %s" % (vol_format, expected_vol['path'], capacity)) virsh.pool_refresh(pool_name) # Check volumes total_err_count += check_vol(expected_vol) # Delete volume and check for results delete_volume(expected_vol) total_err_count += check_vol(expected_vol, False) if total_err_count > 0: raise error.TestFail("Test case failed due to previous errors.\n" "Check for error logs") finally: # Clean up try: libv_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, source_name) except error.TestFail, detail: logging.error(str(detail))
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get volume name") # Create iscsi network disk XML disk_params = { 'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly } disk_params_src = {} if disk_type == "network": disk_params_src = { 'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': disk_src_host, 'source_host_port': disk_src_port } elif disk_type == "volume": disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'source_mode': disk_src_mode } else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = { 'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target } disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") disk_xml_f = open(disk_xml) disk_xml_content = disk_xml_f.read() disk_xml_f.close() logging.debug("Attach disk by XML: %s", disk_xml_content) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Wait for domain is stable vm.wait_for_login().close() domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: error.TestFail("Failed getting snapshots list for %s", vm_name) try: virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) except process.CmdError: error.TestFail("Failed getting snapshots info for %s", vm_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2, **virsh_dargs) libvirt.check_exit_status(cmd_result) pass else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Check pool 'Capacity', 'Allocation' and 'Available' Create a over size vol in pool(expect fail), then check these values (21) Undefine the pool, and this should fail as pool is still active (22) Destroy the pool (23) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (24) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) ip_protocal = params.get('ip_protocal', 'ipv4') if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": raise error.TestNAError("Gluster pool is not supported in current" " libvirt version.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) utlv.check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if vol_name in item[0]: found = True break if found: logging.debug("Find volume '%s' in pool '%s'.", vol_name, pool_name) else: raise error.TestFail("Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def is_in_range(actual, expected, error_percent): deviation = 100 - (100 * (float(actual) / float(expected))) logging.debug("Deviation: %0.2f%%", float(deviation)) return float(deviation) <= float(error_percent) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: raise error.TestFail("Pool info dictionary is needed.") val_tup = ('Capacity', 'Allocation', 'Available') if check_point in val_tup and float(value.split()[0]): # As from bytes to GiB, could cause deviation, and it should not # exceed 1 percent. if is_in_range(float(pool_info[check_point].split()[0]), float(value.split()[0]), 1): logging.debug("Pool '%s' is '%s'.", check_point, value) else: raise error.TestFail("Pool '%s' isn't '%s'." % (check_point, value)) else: if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: raise error.TestFail("Pool '%s' isn't '%s'." % (check_point, value)) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Run Testcase pvt = utlv.PoolVolumeTest(test, params) emulated_image = "emulated-image" kwargs = { 'image_size': '1G', 'pre_disk_vol': ['1M'], 'source_name': source_name, 'source_path': source_path, 'source_format': source_format, 'persistent': True, 'ip_protocal': ip_protocal } try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (4) # Undefine pool result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(pool_xml) utlv.check_exit_status(result, status_error) # Step (6) # Buid pool, this step may fail for 'disk' and 'logical' types pool if pool_type not in ["disk", "logical"]: option = "" # Options --overwrite and --no-overwrite can only be used to # build a filesystem pool, but it will fail for now # if pool_type == "fs": # option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) utlv.check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) utlv.check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # When libvirtd starts up, it'll check to see if any of the storage # pools have been activated externally. If so, then it'll mark the # pool as active. This is independent of autostart. # So a directory based storage pool is thus pretty much always active, # and so as the SCSI pool. if pool_type not in ["dir", 'scsi']: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result) check_vol_list(vol_name, pool_name) # Step (20) # Create an over size vol in pool(expect fail), then check pool: # 'Capacity', 'Allocation' and 'Available' # For NFS type pool, there's a bug(BZ#1077068) about allocate volume, # and glusterfs pool not support create volume, so not test them if pool_type != "netfs": vol_capacity = "10000G" vol_allocation = "10000G" result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw") utlv.check_exit_status(result, True) new_info = _pool.pool_info(pool_name) check_pool_info(pool_info, "Capacity", new_info['Capacity']) check_pool_info(pool_info, "Allocation", new_info['Allocation']) check_pool_info(pool_info, "Available", new_info['Available']) # Step (21) # Undefine pool, this should fail as the pool is active result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, expect_error=True) check_pool_list(pool_name, "", False) # Step (22) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (23) # Pool delete for 'dir' type pool if pool_type == "dir": for f in os.listdir(pool_target): os.remove(os.path.join(pool_target, f)) result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, True) # Step (24) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) except error.TestFail, detail: logging.error(str(detail)) if multipathd_status: multipathd.start() if os.path.exists(pool_xml): os.remove(pool_xml)
def run(test, params, env): """ Test virsh vol-create and vol-create-as command to cover the following matrix: pool_type = [dir, fs, netfs] volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed, vmdk, vpc] pool_type = [disk] volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm, linux-raid, extended] pool_type = [logical] volume_format = [none] pool_type = [iscsi, scsi] Not supported with format type TODO: pool_type = [rbd, glusterfs] Reference: http://www.libvirt.org/storage.html """ src_pool_type = params.get("src_pool_type") src_pool_target = params.get("src_pool_target") src_pool_format = params.get("src_pool_format", "") pool_vol_num = int(params.get("src_pool_vol_num", '1')) src_emulated_image = params.get("src_emulated_image") extra_option = params.get("extra_option", "") prefix_vol_name = params.get("vol_name", "vol_create_test") vol_format = params.get("vol_format", "raw") vol_capacity = params.get("vol_capacity", 1048576) vol_allocation = params.get("vol_allocation", 1048576) image_size = params.get("emulate_image_size", "1G") lazy_refcounts = "yes" == params.get("lazy_refcounts") status_error = "yes" == params.get("status_error", "no") by_xml = "yes" == params.get("create_vol_by_xml", "yes") incomplete_target = "yes" == params.get("incomplete_target", "no") luks_encrypted = "luks" == params.get("encryption_method") encryption_secret_type = params.get("encryption_secret_type", "passphrase") virsh_readonly_mode = 'yes' == params.get("virsh_readonly", "no") if not libvirt_version.version_compare(1, 0, 0): if "--prealloc-metadata" in extra_option: test.cancel("metadata preallocation not supported in" " current libvirt version.") if incomplete_target: test.cancel("It does not support generate target path" "in current libvirt version.") pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi'] if src_pool_type not in pool_type: test.cancel("pool type %s not in supported type list: %s" % (src_pool_type, pool_type)) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Set volume xml attribute dictionary, extract all params start with 'vol_' # which are for setting volume xml, except 'lazy_refcounts'. vol_arg = {} for key in params.keys(): if key.startswith('vol_'): if key[4:] in ['capacity', 'allocation', 'owner', 'group']: vol_arg[key[4:]] = int(params[key]) else: vol_arg[key[4:]] = params[key] vol_arg['lazy_refcounts'] = lazy_refcounts def create_luks_secret(vol_path): """ Create secret for luks encryption :param vol_path. volume path. :return: secret id if create successfully. """ sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "volume secret" sec_xml.usage = 'volume' sec_xml.volume = vol_path sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) utlv.check_exit_status(ret) # Get secret uuid. try: encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() except IndexError as detail: test.error("Fail to get newly created secret uuid") logging.debug("Secret uuid %s", encryption_uuid) # Set secret value. encoding = locale.getpreferredencoding() secret_string = base64.b64encode( 'redhat'.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(encryption_uuid, secret_string) utlv.check_exit_status(ret) return encryption_uuid def post_process_vol(ori_vol_path): """ Create or disactive a volume without libvirt :param ori_vol_path: Full path of an original volume :retur: Volume name for checking """ process_vol_name = params.get("process_vol_name", "process_vol") process_vol_options = params.get("process_vol_options", "") process_vol_capacity = params.get("process_vol_capacity", vol_capacity) process_vol_cmd = "" unsupport_err = "Unsupport do '%s %s' in this test" % ( process_vol_by, process_vol_type) if process_vol_by == "lvcreate": process_vol_cmd = "lvcreate -L %s " % process_vol_capacity if process_vol_type == "thin": if not process_vol_options: process_vol_options = "-T " process_vol_cmd += "%s " % process_vol_options processthin_pool_name = params.get("processthin_pool_name", "thinpool") processthin_vol_name = params.get("processthin_vol_name", "thinvol") process_vol_capacity = params.get("process_vol_capacity", "1G") os.path.dirname(ori_vol_path) process_vol_cmd += "%s/%s " % (os.path.dirname(ori_vol_path), processthin_pool_name) process_vol_cmd += "-V %s " % process_vol_capacity process_vol_cmd += "-n %s " % processthin_vol_name process_vol_name = processthin_vol_name elif process_vol_type == "snapshot": if not process_vol_options: process_vol_options = "-s " process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "-n %s " % process_vol_name process_vol_cmd += "%s " % (ori_vol_path) else: logging.error(unsupport_err) return elif process_vol_by == "qemu-img" and process_vol_type == "create": process_vol_cmd = "qemu-img create " process_vol_path = os.path.dirname(ori_vol_path) + "/" process_vol_path += process_vol_name process_vol_cmd += "%s " % process_vol_options process_vol_cmd += "%s " % process_vol_path process_vol_cmd += "%s " % process_vol_capacity elif process_vol_by == "lvchange" and process_vol_type == "deactivate": process_vol_cmd = "lvchange %s " % ori_vol_path if not process_vol_options: process_vol_options = "-an" process_vol_cmd += process_vol_options else: logging.error(unsupport_err) return rst = process.run(process_vol_cmd, ignore_status=True, shell=True) if rst.exit_status: if "Snapshots of snapshots are not supported" in rst.stderr_text: logging.debug("%s is already a snapshot volume", ori_vol_path) process_vol_name = os.path.basename(ori_vol_path) else: logging.error(rst.stderr_text) return return process_vol_name def check_vol(pool_name, vol_name, expect_exist=True): """ Check volume vol_name in pool pool_name """ src_volumes = src_pv.list_volumes().keys() logging.debug("Current volumes in %s: %s", pool_name, src_volumes) if expect_exist: if vol_name not in src_volumes: test.fail("Can't find volume %s in pool %s" % (vol_name, pool_name)) # check format in volume xml volxml = libvirt_xml.VolXML() post_xml = volxml.new_from_vol_dumpxml(vol_name, pool_name) logging.debug("Volume %s XML: %s" % (vol_name, post_xml.xmltreefile)) if 'format' in post_xml.keys() and vol_format is not None: if post_xml.format != vol_format: test.fail("Volume format %s is not expected" % vol_format + " as defined.") else: if vol_name in src_volumes: test.fail("Find volume %s in pool %s, but expect not" % (vol_name, pool_name)) fmt_err0 = "Unknown file format '%s'" % vol_format fmt_err1 = "Formatting or formatting option not " fmt_err1 += "supported for file format '%s'" % vol_format fmt_err2 = "Driver '%s' does not support " % vol_format fmt_err2 += "image creation" fmt_err_list = [fmt_err0, fmt_err1, fmt_err2] skip_msg = "Volume format '%s' is not supported by qemu-img" % vol_format vol_path_list = [] secret_uuids = [] try: # Create the src pool src_pool_name = "virt-%s-pool" % src_pool_type pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(src_pool_name, src_pool_type, src_pool_target, src_emulated_image, image_size=image_size, source_format=src_pool_format) src_pv = libvirt_storage.PoolVolume(src_pool_name) src_pool_uuid = libvirt_storage.StoragePool().pool_info( src_pool_name)['UUID'] # Print current pools for debugging logging.debug("Current pools:%s", libvirt_storage.StoragePool().list_pools()) # Create volumes by virsh in a loop while pool_vol_num > 0: # Set volume xml file vol_name = prefix_vol_name + "_%s" % pool_vol_num bad_vol_name = params.get("bad_vol_name", "") if bad_vol_name: vol_name = bad_vol_name pool_vol_num -= 1 # disk partition for new volume if src_pool_type == "disk": vol_name = utlv.new_disk_vol_name(src_pool_name) if vol_name is None: test.error("Fail to generate volume name") if by_xml: # According to BZ#1138523, we need inpect the right name # (disk partition) for new volume if src_pool_type == "disk": vol_name = utlv.new_disk_vol_name(src_pool_name) if vol_name is None: test.error("Fail to generate volume name") vol_arg['name'] = vol_name volxml = libvirt_xml.VolXML() newvol = volxml.new_vol(**vol_arg) if luks_encrypted: # For luks encrypted disk, add related xml in newvol luks_encryption_params = {} luks_encryption_params.update({"format": "luks"}) luks_secret_uuid = create_luks_secret( os.path.join(src_pool_target, vol_name)) secret_uuids.append(luks_secret_uuid) luks_encryption_params.update({ "secret": { "type": encryption_secret_type, "uuid": luks_secret_uuid } }) newvol.encryption = volxml.new_encryption( **luks_encryption_params) vol_xml = newvol['xml'] if params.get('setup_libvirt_polkit') == 'yes': process.run("chmod 666 %s" % vol_xml, ignore_status=True, shell=True) if luks_encrypted and libvirt_version.version_compare( 4, 5, 0): try: polkit = test_setup.LibvirtPolkitConfig(params) polkit_rules_path = polkit.polkit_rules_path with open(polkit_rules_path, 'r+') as f: rule = f.readlines() for index, v in enumerate(rule): if v.find("secret") >= 0: nextline = rule[index + 1] s = nextline.replace( "QEMU", "secret").replace( "pool_name", "secret_uuid").replace( "virt-dir-pool", "%s" % luks_secret_uuid) rule[index + 1] = s rule = ''.join(rule) with open(polkit_rules_path, 'w+') as f: f.write(rule) logging.debug(rule) polkit.polkitd.restart() except IOError as e: logging.error(e) # Run virsh_vol_create to create vol logging.debug("Create volume from XML: %s" % newvol.xmltreefile) cmd_result = virsh.vol_create( src_pool_name, vol_xml, extra_option, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) else: # Run virsh_vol_create_as to create_vol pool_name = src_pool_name if params.get("create_vol_by_pool_uuid") == "yes": pool_name = src_pool_uuid cmd_result = virsh.vol_create_as( vol_name, pool_name, vol_capacity, vol_allocation, vol_format, extra_option, unprivileged_user=unprivileged_user, uri=uri, readonly=virsh_readonly_mode, ignore_status=True, debug=True) # Check result try: utlv.check_exit_status(cmd_result, status_error) check_vol(src_pool_name, vol_name, not status_error) if bad_vol_name: pattern = "volume name '%s' cannot contain '/'" % vol_name logging.debug("pattern: %s", pattern) if "\\" in pattern and by_xml: pattern = pattern.replace("\\", "\\\\") if re.search(pattern, cmd_result.stderr) is None: test.fail("vol-create failed with unexpected reason") if not status_error: vol_path = virsh.vol_path(vol_name, src_pool_name).stdout.strip() logging.debug("Full path of %s: %s", vol_name, vol_path) vol_path_list.append(vol_path) except exceptions.TestFail as detail: stderr = cmd_result.stderr if any(err in stderr for err in fmt_err_list): test.cancel(skip_msg) else: test.fail("Create volume fail:\n%s" % detail) # Post process vol by other programs process_vol_by = params.get("process_vol_by") process_vol_type = params.get("process_vol_type", "") expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes") if process_vol_by and vol_path_list: process_vol = post_process_vol(vol_path_list[0]) if process_vol is not None: try: virsh.pool_refresh(src_pool_name, ignore_status=False) check_vol(src_pool_name, process_vol, expect_vol_exist) except (process.CmdError, exceptions.TestFail) as detail: if process_vol_type == "thin": logging.error(str(detail)) test.cancel("You may encounter bug BZ#1060287") else: test.fail("Fail to refresh pool:\n%s" % detail) else: test.fail("Post process volume failed") finally: # Cleanup # For old version lvm2(2.02.106 or early), deactivate volume group # (destroy libvirt logical pool) will fail if which has deactivated # lv snapshot, so before destroy the pool, we need activate it manually if src_pool_type == 'logical' and vol_path_list: vg_name = vol_path_list[0].split('/')[2] process.run("lvchange -ay %s" % vg_name, shell=True) try: pvt.cleanup_pool(src_pool_name, src_pool_type, src_pool_target, src_emulated_image) for secret_uuid in set(secret_uuids): virsh.secret_undefine(secret_uuid) except exceptions.TestFail as detail: logging.error(str(detail)) if multipathd_status: multipathd.start()
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Check pool 'Capacity', 'Allocation' and 'Available' Create a over size vol in pool(expect fail), then check these values (21) Undefine the pool, and this should fail as pool is still active (22) Destroy the pool (23) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (24) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") new_pool_name = params.get("new_pool_name", "") build_option = params.get("build_option", "") iscsi_initiator = params.get("iscsi_initiator", "") same_source_test = "yes" == params.get("same_source_test", "no") customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn", "no") # The file for dumped pool xml poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("volume_name", "temp_vol_1") # Use pool name as VG name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) ip_protocal = params.get('ip_protocal', 'ipv4') if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") if not libvirt_version.version_compare(4, 7, 0): if pool_type == "iscsi-direct": test.cancel("iSCSI-direct pool is not supported in current" "libvirt version.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: test.fail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: test.fail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) utlv.check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if vol_name in item[0]: found = True break if found: logging.debug("Find volume '%s' in pool '%s'.", vol_name, pool_name) else: test.fail("Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def is_in_range(actual, expected, error_percent): deviation = 100 - (100 * (float(actual) / float(expected))) logging.debug("Deviation: %0.2f%%", float(deviation)) return float(deviation) <= float(error_percent) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: test.fail("Pool info dictionary is needed.") val_tup = ('Capacity', 'Allocation', 'Available') if check_point in val_tup and float(value.split()[0]): # As from bytes to GiB, could cause deviation, and it should not # exceed 1 percent. if is_in_range(float(pool_info[check_point].split()[0]), float(value.split()[0]), 1): logging.debug("Pool '%s' is '%s'.", check_point, value) else: test.fail("Pool '%s' isn't '%s'." % (check_point, value)) else: if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: test.fail("Pool '%s' isn't '%s'." % (check_point, value)) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Run Testcase pvt = utlv.PoolVolumeTest(test, params) kwargs = { 'image_size': '1G', 'pre_disk_vol': ['100M'], 'source_name': source_name, 'source_path': source_path, 'source_format': source_format, 'persistent': True, 'ip_protocal': ip_protocal, 'emulated_image': "emulated-image", 'pool_target': pool_target, 'iscsi_initiator': iscsi_initiator } params.update(kwargs) try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define pvt.pre_pool(**params) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=poolxml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Update pool name if new_pool_name: if "/" in new_pool_name: new_pool_name = new_pool_name.replace("/", "\/") logging.debug(new_pool_name) p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) p_xml.name = new_pool_name del p_xml.uuid poolxml = p_xml.xml logging.debug("XML after update pool name:\n%s" % p_xml) # Update host name if same_source_test: s_xml = p_xml.get_source() s_xml.host_name = "192.168.1.1" p_xml.set_source(s_xml) poolxml = p_xml.xml logging.debug("XML after update host name:\n%s" % p_xml) if customize_initiator_iqn: initiator_iqn = params.get("initiator_iqn", "iqn.2018-07.com.virttest:pool.target") p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) s_node = p_xml.xmltreefile.find('/source') i_node = ET.SubElement(s_node, 'initiator') ET.SubElement(i_node, 'iqn', {'name': initiator_iqn}) p_xml.xmltreefile.write() poolxml = p_xml.xml logging.debug('XML after add Multi-IQN:\n%s' % p_xml) # Step (4) # Undefine pool if not same_source_test: result = virsh.pool_undefine(pool_name) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(poolxml, debug=True) # Give error msg when exit status is not expected if "/" in new_pool_name and not result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) if "." in new_pool_name and result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) if same_source_test and not result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) utlv.check_exit_status(result, status_error) if not result.exit_status: # Step (6) # Buid pool # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if new_pool_name: pool_name = new_pool_name if pool_type != "logical": result = virsh.pool_build(pool_name, build_option, ignore_status=True) utlv.check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, debug=True, ignore_status=True) utlv.check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) utlv.check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # When libvirtd starts up, it'll check to see if any of the storage # pools have been activated externally. If so, then it'll mark the # pool as active. This is independent of autostart. # So a directory based storage pool is thus pretty much always active, # and so as the SCSI pool. if pool_type not in ["dir", 'scsi']: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result) check_vol_list(vol_name, pool_name) # Step (20) # Create an over size vol in pool(expect fail), then check pool: # 'Capacity', 'Allocation' and 'Available' # For NFS type pool, there's a bug(BZ#1077068) about allocate volume, # and glusterfs pool not support create volume, so not test them if pool_type != "netfs": vol_capacity = "10000G" vol_allocation = "10000G" result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw") utlv.check_exit_status(result, True) new_info = _pool.pool_info(pool_name) check_items = ["Capacity", "Allocation", "Available"] for i in check_items: check_pool_info(pool_info, i, new_info[i]) # Step (21) # Undefine pool, this should fail as the pool is active result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, expect_error=True) check_pool_list(pool_name, "", False) # Step (22) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (23) # Pool delete for 'dir' type pool if pool_type == "dir": for f in os.listdir(pool_target): os.remove(os.path.join(pool_target, f)) result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): test.fail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, True) # Step (24) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up try: pvt.cleanup_pool(**params) utlv.setup_or_cleanup_iscsi(False) except exceptions.TestFail as detail: logging.error(str(detail)) if multipathd_status: multipathd.start() if os.path.exists(poolxml): os.remove(poolxml)
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Check pool 'Capacity', 'Allocation' and 'Available' Create a over size vol in pool(expect fail), then check these values (21) Undefine the pool, and this should fail as pool is still active (22) Destroy the pool (23) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (24) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") new_pool_name = params.get("new_pool_name", "") build_option = params.get("build_option", "") same_source_test = "yes" == params.get("same_source_test", "no") customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn", "no") # The file for dumped pool xml poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) ip_protocal = params.get('ip_protocal', 'ipv4') if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) utlv.check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: test.fail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: test.fail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) utlv.check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip())) for item in output: if vol_name in item[0]: found = True break if found: logging.debug( "Find volume '%s' in pool '%s'.", vol_name, pool_name) else: test.fail( "Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def is_in_range(actual, expected, error_percent): deviation = 100 - (100 * (float(actual) / float(expected))) logging.debug("Deviation: %0.2f%%", float(deviation)) return float(deviation) <= float(error_percent) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: test.fail("Pool info dictionary is needed.") val_tup = ('Capacity', 'Allocation', 'Available') if check_point in val_tup and float(value.split()[0]): # As from bytes to GiB, could cause deviation, and it should not # exceed 1 percent. if is_in_range(float(pool_info[check_point].split()[0]), float(value.split()[0]), 1): logging.debug("Pool '%s' is '%s'.", check_point, value) else: test.fail("Pool '%s' isn't '%s'." % (check_point, value)) else: if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: test.fail("Pool '%s' isn't '%s'." % (check_point, value)) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Run Testcase pvt = utlv.PoolVolumeTest(test, params) emulated_image = "emulated-image" kwargs = {'image_size': '1G', 'pre_disk_vol': ['100M'], 'source_name': source_name, 'source_path': source_path, 'source_format': source_format, 'persistent': True, 'ip_protocal': ip_protocal} try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=poolxml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Update pool name if new_pool_name: if "/" in new_pool_name: new_pool_name = new_pool_name.replace("/", "\/") logging.debug(new_pool_name) p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) p_xml.name = new_pool_name del p_xml.uuid poolxml = p_xml.xml logging.debug("XML after update pool name:\n%s" % p_xml) # Update host name if same_source_test: s_xml = p_xml.get_source() s_xml.host_name = "192.168.1.1" p_xml.set_source(s_xml) poolxml = p_xml.xml logging.debug("XML after update host name:\n%s" % p_xml) if customize_initiator_iqn: initiator_iqn = params.get("initiator_iqn", "iqn.2018-07.com.virttest:pool.target") p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) s_node = p_xml.xmltreefile.find('/source') i_node = ET.SubElement(s_node, 'initiator') ET.SubElement(i_node, 'iqn', {'name': initiator_iqn}) p_xml.xmltreefile.write() poolxml = p_xml.xml logging.debug('XML after add Multi-IQN:\n%s' % p_xml) # Step (4) # Undefine pool if not same_source_test: result = virsh.pool_undefine(pool_name) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(poolxml, debug=True) # Give error msg when exit status is not expected if "/" in new_pool_name and not result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) if "." in new_pool_name and result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) if same_source_test and not result.exit_status: error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 " error_msg += "is helpful for tracing this bug." logging.error(error_msg) utlv.check_exit_status(result, status_error) if not result.exit_status: # Step (6) # Buid pool # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool # disk/fs pool: as prepare step already make label and create filesystem # for the disk, use '--overwrite' is necessary # logical_pool: build pool will fail if VG already exist, BZ#1373711 if new_pool_name: pool_name = new_pool_name if pool_type != "logical": result = virsh.pool_build(pool_name, build_option, ignore_status=True) utlv.check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, debug=True, ignore_status=True) utlv.check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) utlv.check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # When libvirtd starts up, it'll check to see if any of the storage # pools have been activated externally. If so, then it'll mark the # pool as active. This is independent of autostart. # So a directory based storage pool is thus pretty much always active, # and so as the SCSI pool. if pool_type not in ["dir", 'scsi']: result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) utlv.check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) utlv.check_exit_status(result) check_vol_list(vol_name, pool_name) # Step (20) # Create an over size vol in pool(expect fail), then check pool: # 'Capacity', 'Allocation' and 'Available' # For NFS type pool, there's a bug(BZ#1077068) about allocate volume, # and glusterfs pool not support create volume, so not test them if pool_type != "netfs": vol_capacity = "10000G" vol_allocation = "10000G" result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw") utlv.check_exit_status(result, True) new_info = _pool.pool_info(pool_name) check_items = ["Capacity", "Allocation", "Available"] for i in check_items: check_pool_info(pool_info, i, new_info[i]) # Step (21) # Undefine pool, this should fail as the pool is active result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result, expect_error=True) check_pool_list(pool_name, "", False) # Step (22) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: test.fail("Destroy pool % failed." % pool_name) # Step (23) # Pool delete for 'dir' type pool if pool_type == "dir": for f in os.listdir(pool_target): os.remove(os.path.join(pool_target, f)) result = virsh.pool_delete(pool_name, ignore_status=True) utlv.check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): test.fail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) utlv.check_exit_status(result, True) # Step (24) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) utlv.check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) utlv.setup_or_cleanup_iscsi(False) except exceptions.TestFail as detail: logging.error(str(detail)) if multipathd_status: multipathd.start() if os.path.exists(poolxml): os.remove(poolxml)
def run(test, params, env): """ Test the virsh pool commands with acl, initiate a pool then do following operations. (1) Undefine a given type pool (2) Define the pool from xml (3) Build given type pool (4) Start pool (5) Destroy pool (6) Refresh pool after start it (7) Run vol-list with the pool (9) Delete pool For negative cases, redo failed step to make the case run continue. Run cleanup at last restore env. """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name vg_name = pool_name vol_path = os.path.join(pool_target, vol_name) define_acl = "yes" == params.get("define_acl", "no") undefine_acl = "yes" == params.get("undefine_acl", "no") start_acl = "yes" == params.get("start_acl", "no") destroy_acl = "yes" == params.get("destroy_acl", "no") build_acl = "yes" == params.get("build_acl", "no") delete_acl = "yes" == params.get("delete_acl", "no") refresh_acl = "yes" == params.get("refresh_acl", "no") vol_list_acl = "yes" == params.get("vol_list_acl", "no") src_pool_error = "yes" == params.get("src_pool_error", "no") define_error = "yes" == params.get("define_error", "no") undefine_error = "yes" == params.get("undefine_error", "no") start_error = "yes" == params.get("start_error", "no") destroy_error = "yes" == params.get("destroy_error", "no") build_error = "yes" == params.get("build_error", "no") delete_error = "yes" == params.get("delete_error", "no") refresh_error = "yes" == params.get("refresh_error", "no") vol_list_error = "yes" == params.get("vol_list_error", "no") # Clean up flags: # cleanup_env[0] for nfs, cleanup_env[1] for iscsi, cleanup_env[2] for lvm cleanup_env = [False, False, False] # libvirt acl related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" + " libvirt version.") acl_dargs = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True} def check_exit_status(result, expect_error=False): """ Check the exit status of virsh commands. :param result: Virsh command result object :param expect_error: Boolean value, expect command success or fail """ if not expect_error: if result.exit_status != 0: raise error.TestFail(result.stderr) else: logging.debug("Command output:\n%s", result.stdout.strip()) elif expect_error and result.exit_status == 0: raise error.TestFail("Expect fail, but run successfully.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) # Run Testcase try: _pool = libvirt_storage.StoragePool() # Init a pool for test result = utils_test.libvirt.define_pool(pool_name, pool_type, pool_target, cleanup_env) check_exit_status(result, src_pool_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (1) # Undefine pool if undefine_acl: result = virsh.pool_undefine(pool_name, **acl_dargs) else: result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result, undefine_error) if undefine_error: check_pool_list(pool_name, "--all", False) # Redo under negative case to keep case continue result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) else: check_pool_list(pool_name, "--all", True) # Step (2) # Define pool from XML file if define_acl: result = virsh.pool_define(pool_xml, **acl_dargs) else: result = virsh.pool_define(pool_xml) check_exit_status(result, define_error) if define_error: # Redo under negative case to keep case continue result = virsh.pool_define(pool_xml) check_exit_status(result) # Step (3) # Buid pool, this step may fail for 'disk' and 'logical' types pool if pool_type not in ["disk", "logical"]: option = "" # Options --overwrite and --no-overwrite can only be used to # build a filesystem pool, but it will fail for now # if pool_type == "fs": # option = '--overwrite' if build_acl: result = virsh.pool_build(pool_name, option, **acl_dargs) else: result = virsh.pool_build(pool_name, option, ignore_status=True) check_exit_status(result, build_error) if build_error: # Redo under negative case to keep case continue result = virsh.pool_build(pool_name, option, ignore_status=True) check_exit_status(result) # Step (4) # Pool start if start_acl: result = virsh.pool_start(pool_name, **acl_dargs) else: result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result, start_error) if start_error: # Redo under negative case to keep case continue result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (5) # Pool destroy if destroy_acl: result = virsh.pool_destroy(pool_name, **acl_dargs) else: result = virsh.pool_destroy(pool_name) if result: if destroy_error: raise error.TestFail("Expect fail, but run successfully.") else: if not destroy_error: raise error.TestFail("Pool %s destroy failed, not expected." % pool_name) else: # Redo under negative case to keep case continue if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (6) # Pool refresh for 'dir' type pool # Pool start result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) if pool_type == "dir": os.mknod(vol_path) if refresh_acl: result = virsh.pool_refresh(pool_name, **acl_dargs) else: result = virsh.pool_refresh(pool_name) check_exit_status(result, refresh_error) # Step (7) # Pool vol-list if vol_list_acl: result = virsh.vol_list(pool_name, **acl_dargs) else: result = virsh.vol_list(pool_name) check_exit_status(result, vol_list_error) # Step (8) # Pool delete for 'dir' type pool if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) if pool_type == "dir": if os.path.exists(vol_path): os.remove(vol_path) if delete_acl: result = virsh.pool_delete(pool_name, **acl_dargs) else: result = virsh.pool_delete(pool_name, ignore_status=True) check_exit_status(result, delete_error) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if not delete_error: if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up if os.path.exists(pool_xml): os.remove(pool_xml) if not _pool.delete_pool(pool_name): logging.error("Can't delete pool: %s", pool_name) if cleanup_env[2]: cmd = "pvs |grep %s |awk '{print $1}'" % vg_name pv_name = utils.system_output(cmd) lv_utils.vg_remove(vg_name) utils.run("pvremove %s" % pv_name) if cleanup_env[1]: utils_test.libvirt.setup_or_cleanup_iscsi(False) if cleanup_env[0]: utils_test.libvirt.setup_or_cleanup_nfs(False)
def run(test, params, env): """ Test the virsh pool commands (1) Define a given type pool (2) List pool with '--inactive --type' options (3) Dumpxml for the pool (4) Undefine the pool (5) Define pool by using the XML file in step (3) (6) Build the pool(except 'disk' type pool For 'fs' type pool, cover --overwrite and --no-overwrite options (7) Start the pool (8) List pool with '--persistent --type' options (9) Mark pool autostart (10) List pool with '--autostart --type' options (11) Restart libvirtd and list pool with '--autostart --persistent' options (12) Destroy the pool (13) Unmark pool autostart (14) Repeat step (11) (15) Start the pool (16) Get pool info (17) Get pool uuid by name (18) Get pool name by uuid (19) Refresh the pool For 'dir' type pool, touch a file under target path and refresh again to make the new file show in vol-list. (20) Check pool 'Capacity', 'Allocation' and 'Available' Create a over size vol in pool(expect fail), then check these values (21) Undefine the pool, and this should fail as pool is still active (22) Destroy the pool (23) Delete pool for 'dir' type pool. After the command, the pool object will still exist but target path will be deleted (24) Undefine the pool """ # Initialize the variables pool_name = params.get("pool_name", "temp_pool_1") pool_type = params.get("pool_type", "dir") pool_target = params.get("pool_target", "") source_format = params.get("source_format", "") source_name = params.get("pool_source_name", "gluster-vol1") source_path = params.get("pool_source_path", "/") # The file for dumped pool xml pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp") if os.path.dirname(pool_target) is "": pool_target = os.path.join(test.tmpdir, pool_target) vol_name = params.get("vol_name", "temp_vol_1") # Use pool name as VG name status_error = "yes" == params.get("status_error", "no") vol_path = os.path.join(pool_target, vol_name) ip_protocal = params.get("ip_protocal", "ipv4") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": raise error.TestNAError("Gluster pool is not supported in current" " libvirt version.") def check_exit_status(result, expect_error=False): """ Check the exit status of virsh commands. :param result: Virsh command result object :param expect_error: Boolean value, expect command success or fail """ if not expect_error: if result.exit_status != 0: raise error.TestFail(result.stderr) else: logging.debug("Command output:\n%s", result.stdout.strip()) elif expect_error and result.exit_status == 0: raise error.TestFail("Expect fail, but run successfully.") def check_pool_list(pool_name, option="--all", expect_error=False): """ Check pool by running pool-list command with given option. :param pool_name: Name of the pool :param option: option for pool-list command :param expect_error: Boolean value, expect command success or fail """ found = False # Get the list stored in a variable result = virsh.pool_list(option, ignore_status=True) check_exit_status(result, False) output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if pool_name in item[0]: found = True break if found: logging.debug("Find pool '%s' in pool list.", pool_name) else: logging.debug("Not find pool %s in pool list.", pool_name) if expect_error and found: raise error.TestFail("Unexpect pool '%s' exist." % pool_name) if not expect_error and not found: raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name) def check_vol_list(vol_name, pool_name): """ Check volume from the list :param vol_name: Name of the volume :param pool_name: Name of the pool """ found = False # Get the volume list stored in a variable result = virsh.vol_list(pool_name, ignore_status=True) check_exit_status(result) output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout)) for item in output: if vol_name in item[0]: found = True break if found: logging.debug("Find volume '%s' in pool '%s'.", vol_name, pool_name) else: raise error.TestFail("Not find volume '%s' in pool '%s'." % (vol_name, pool_name)) def check_pool_info(pool_info, check_point, value): """ Check the pool name, uuid, etc. :param pool_info: A dict include pool's information :param key: Key of pool info dict, available value: Name, UUID, State Persistent, Autostart, Capacity, Allocation, Available :param value: Expect value of pool_info[key] """ if pool_info is None: raise error.TestFail("Pool info dictionary is needed.") if pool_info[check_point] == value: logging.debug("Pool '%s' is '%s'.", check_point, value) else: raise error.TestFail("Pool '%s' isn't '%s'." % (check_point, value)) # Stop multipathd to avoid start pool fail(For fs like pool, the new add # disk may in use by device-mapper, so start pool will report disk already # mounted error). multipathd = service.Factory.create_service("multipathd") multipathd_status = multipathd.status() if multipathd_status: multipathd.stop() # Run Testcase pvt = utlv.PoolVolumeTest(test, params) emulated_image = "emulated-image" kwargs = { "image_size": "1G", "pre_disk_vol": ["1M"], "source_name": source_name, "source_path": source_path, "source_format": source_format, "persistent": True, "ip_protocal": ip_protocal, } try: _pool = libvirt_storage.StoragePool() # Step (1) # Pool define pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) # Step (2) # Pool list option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) # Step (3) # Pool dumpxml xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml) logging.debug("Pool '%s' XML:\n%s", pool_name, xml) # Step (4) # Undefine pool result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) # Step (5) # Define pool from XML file result = virsh.pool_define(pool_xml) check_exit_status(result, status_error) # Step (6) # Buid pool, this step may fail for 'disk' and 'logical' types pool if pool_type not in ["disk", "logical"]: option = "" # Options --overwrite and --no-overwrite can only be used to # build a filesystem pool, but it will fail for now # if pool_type == "fs": # option = '--overwrite' result = virsh.pool_build(pool_name, option, ignore_status=True) check_exit_status(result) # Step (7) # Pool start result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) # Step (8) # Pool list option = "--persistent --type %s" % pool_type check_pool_list(pool_name, option) # Step (9) # Pool autostart result = virsh.pool_autostart(pool_name, ignore_status=True) check_exit_status(result) # Step (10) # Pool list option = "--autostart --type %s" % pool_type check_pool_list(pool_name, option) # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.libvirtd_restart() option = "--autostart --persistent" check_pool_list(pool_name, option) # Step (12) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (13) # Pool autostart disable result = virsh.pool_autostart(pool_name, "--disable", ignore_status=True) check_exit_status(result) # Step (14) # Repeat step (11) utils_libvirtd.libvirtd_restart() option = "--autostart" check_pool_list(pool_name, option, True) # Step (15) # Pool start # When libvirtd starts up, it'll check to see if any of the storage # pools have been activated externally. If so, then it'll mark the # pool as active. This is independent of autostart. # So a directory based storage pool is thus pretty much always active, # and so as the SCSI pool. if pool_type != "scsi" and (pool_type != "dir" or libvirt_version.version_compare(1, 2, 15)): result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result) # Step (16) # Pool info pool_info = _pool.pool_info(pool_name) logging.debug("Pool '%s' info:\n%s", pool_name, pool_info) # Step (17) # Pool UUID result = virsh.pool_uuid(pool_info["Name"], ignore_status=True) check_exit_status(result) check_pool_info(pool_info, "UUID", result.stdout.strip()) # Step (18) # Pool Name result = virsh.pool_name(pool_info["UUID"], ignore_status=True) check_exit_status(result) check_pool_info(pool_info, "Name", result.stdout.strip()) # Step (19) # Pool refresh for 'dir' type pool if pool_type == "dir": os.mknod(vol_path) result = virsh.pool_refresh(pool_name) check_exit_status(result) check_vol_list(vol_name, pool_name) # Step (20) # Create an over size vol in pool(expect fail), then check pool: # 'Capacity', 'Allocation' and 'Available' # For NFS type pool, there's a bug(BZ#1077068) about allocate volume, # and glusterfs pool not support create volume, so not test them if pool_type != "netfs": vol_capacity = "10000G" vol_allocation = "10000G" result = virsh.vol_create_as("oversize_vol", pool_name, vol_capacity, vol_allocation, "raw") check_exit_status(result, True) new_info = _pool.pool_info(pool_name) check_pool_info(pool_info, "Capacity", new_info["Capacity"]) check_pool_info(pool_info, "Allocation", new_info["Allocation"]) check_pool_info(pool_info, "Available", new_info["Available"]) # Step (21) # Undefine pool, this should fail as the pool is active result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result, expect_error=True) check_pool_list(pool_name, "", False) # Step (22) # Pool destroy if virsh.pool_destroy(pool_name): logging.debug("Pool %s destroyed.", pool_name) else: raise error.TestFail("Destroy pool % failed." % pool_name) # Step (23) # Pool delete for 'dir' type pool if pool_type == "dir": for f in os.listdir(pool_target): os.remove(os.path.join(pool_target, f)) result = virsh.pool_delete(pool_name, ignore_status=True) check_exit_status(result) option = "--inactive --type %s" % pool_type check_pool_list(pool_name, option) if os.path.exists(pool_target): raise error.TestFail("The target path '%s' still exist." % pool_target) result = virsh.pool_start(pool_name, ignore_status=True) check_exit_status(result, True) # Step (24) # Pool undefine result = virsh.pool_undefine(pool_name, ignore_status=True) check_exit_status(result) check_pool_list(pool_name, "--all", True) finally: # Clean up try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) except error.TestFail, detail: logging.error(str(detail)) if multipathd_status: multipathd.start() if os.path.exists(pool_xml): os.remove(pool_xml)
def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip(disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ( "--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool( pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err)
def create_pool(): """ Define and start a pool. """ sp = libvirt_storage.StoragePool() if create_by_xml: p_xml = pool_xml.PoolXML(pool_type=pool_type) p_xml.name = pool_name s_xml = pool_xml.SourceXML() s_xml.vg_name = disk_src_pool source_host = [] for (host_name, host_port) in zip( disk_src_host.split(), disk_src_port.split()): source_host.append({'name': host_name, 'port': host_port}) s_xml.hosts = source_host if auth_type: s_xml.auth_type = auth_type if auth_user: s_xml.auth_username = auth_user if auth_usage: s_xml.secret_usage = auth_usage p_xml.source = s_xml logging.debug("Pool xml: %s", p_xml) p_xml.xmltreefile.write() ret = virsh.pool_define(p_xml.xml, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_build(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_start(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) else: auth_opt = "" if client_name and client_key: auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'" % (auth_type, auth_user, auth_usage)) if not sp.define_rbd_pool(pool_name, mon_host, disk_src_pool, extra=auth_opt): test.fail("Failed to define storage pool") if not sp.build_pool(pool_name): test.fail("Failed to build storage pool") if not sp.start_pool(pool_name): test.fail("Failed to start storage pool") # Check pool operation ret = virsh.pool_refresh(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) ret = virsh.pool_uuid(pool_name, **virsh_dargs) libvirt.check_exit_status(ret) # pool-info pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'no': test.fail("Failed to check pool information") # pool-autostart if not sp.set_pool_autostart(pool_name): test.fail("Failed to set pool autostart") pool_info = sp.pool_info(pool_name) if pool_info["Autostart"] != 'yes': test.fail("Failed to check pool information") # pool-autostart --disable if not sp.set_pool_autostart(pool_name, "--disable"): test.fail("Failed to set pool autostart") # If port is not pre-configured, port value should not be hardcoded in pool information. if "yes" == params.get("rbd_port", "no"): if 'port' in virsh.pool_dumpxml(pool_name): test.fail("port attribute should not be in pool information") # find-storage-pool-sources-as if "yes" == params.get("find_storage_pool_sources_as", "no"): ret = virsh.find_storage_pool_sources_as("rbd", mon_host) libvirt.check_result(ret, skip_if=unsupported_err)
def run(test, params, env): """ This test cover two volume commands: vol-clone and vol-wipe. 1. Create a given type pool. 2. Create a given format volume in the pool. 3. Clone the new create volume. 4. Wipe the new clone volume. 5. Delete the volume and pool. """ pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") if not os.path.dirname(pool_target): pool_target = os.path.join(test.tmpdir, pool_target) emulated_image = params.get("emulated_image") emulated_image_size = params.get("emulated_image_size") vol_name = params.get("vol_name") new_vol_name = params.get("new_vol_name") vol_capability = params.get("vol_capability") vol_format = params.get("vol_format") clone_option = params.get("clone_option", "") wipe_algorithms = params.get("wipe_algorithms") if virsh.has_command_help_match("vol-wipe", "--prealloc-metadata") is None: if "prealloc-metadata" in clone_option: raise error.TestNAError("Option --prealloc-metadata " "is not supported.") # Using algorithms other than zero need scrub installed. try: utils_misc.find_command('scrub') except ValueError: logging.warning("Can't locate scrub binary, only 'zero' algorithm " "is used.") valid_algorithms = ["zero"] else: valid_algorithms = [ "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7", "pfitzner33", "random" ] # Choose an algorithms randomly if wipe_algorithms: alg = random.choice(wipe_algorithms.split()) else: alg = random.choice(valid_algorithms) clone_status_error = "yes" == params.get("clone_status_error", "no") wipe_status_error = "yes" == params.get("wipe_status_error", "no") setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit") # libvirt acl polkit related params uri = params.get("virsh_uri") unpri_user = params.get('unprivileged_user') if unpri_user: if unpri_user.count('EXAMPLE'): unpri_user = '******' if not libvirt_version.version_compare(1, 1, 1): if setup_libvirt_polkit: raise error.TestNAError("API acl test not supported in current" " libvirt version.") del_pool = True libv_pvt = libvirt.PoolVolumeTest(test, params) try: libv_pool = libvirt_storage.StoragePool() while libv_pool.pool_exists(pool_name): logging.debug("Use exist pool '%s'", pool_name) del_pool = False else: # Create a new pool disk_vol = [] if pool_type == 'disk': disk_vol.append(params.get("pre_vol", '10M')) libv_pvt.pre_pool(pool_name=pool_name, pool_type=pool_type, pool_target=pool_target, emulated_image=emulated_image, image_size=emulated_image_size, pre_disk_vol=disk_vol) libv_vol = libvirt_storage.PoolVolume(pool_name) if libv_vol.volume_exists(vol_name): logging.debug("Use exist volume '%s'", vol_name) elif vol_format in ['raw', 'qcow2', 'qed', 'vmdk']: # Create a new volume libv_pvt.pre_vol(vol_name=vol_name, vol_format=vol_format, capacity=vol_capability, allocation=None, pool_name=pool_name) elif vol_format == 'partition': vol_name = libv_vol.list_volumes().keys()[0] logging.debug("Partition %s in disk pool is volume" % vol_name) elif vol_format == 'sparse': # Create a sparse file in pool sparse_file = pool_target + '/' + vol_name cmd = "dd if=/dev/zero of=" + sparse_file cmd += " bs=1 count=0 seek=" + vol_capability utils.run(cmd) else: raise error.TestError("Unknown volume format %s" % vol_format) # Refresh the pool virsh.pool_refresh(pool_name) vol_info = libv_vol.volume_info(vol_name) for key in vol_info: logging.debug("Original volume info: %s = %s", key, vol_info[key]) # Metadata preallocation is not support for block volume if vol_info["Type"] == "block" and clone_option.count( "prealloc-metadata"): clone_status_error = True if pool_type == "disk": new_vol_name = libvirt.new_disk_vol_name(pool_name) if new_vol_name is None: raise error.TestError("Fail to generate volume name") # update polkit rule as the volume name changed if setup_libvirt_polkit: vol_pat = r"lookup\('vol_name'\) == ('\S+')" new_value = "lookup('vol_name') == '%s'" % new_vol_name libvirt.update_polkit_rule(params, vol_pat, new_value) # Clone volume clone_result = virsh.vol_clone(vol_name, new_vol_name, pool_name, clone_option, debug=True) if not clone_status_error: if clone_result.exit_status != 0: raise error.TestFail("Clone volume fail:\n%s" % clone_result.stderr.strip()) else: vol_info = libv_vol.volume_info(new_vol_name) for key in vol_info: logging.debug("Cloned volume info: %s = %s", key, vol_info[key]) logging.debug("Clone volume successfully.") # Wipe the new clone volume if alg: logging.debug("Wiping volume by '%s' algorithm", alg) wipe_result = virsh.vol_wipe(new_vol_name, pool_name, alg, unprivileged_user=unpri_user, uri=uri, debug=True) unsupported_err = [ "Unsupported algorithm", "no such pattern sequence" ] if not wipe_status_error: if wipe_result.exit_status != 0: if any(err in wipe_result.stderr for err in unsupported_err): raise error.TestNAError(wipe_result.stderr) raise error.TestFail("Wipe volume fail:\n%s" % clone_result.stdout.strip()) else: virsh_vol_info = libv_vol.volume_info(new_vol_name) for key in virsh_vol_info: logging.debug("Wiped volume info(virsh): %s = %s", key, virsh_vol_info[key]) vol_path = virsh.vol_path(new_vol_name, pool_name).stdout.strip() qemu_vol_info = utils_misc.get_image_info(vol_path) for key in qemu_vol_info: logging.debug("Wiped volume info(qemu): %s = %s", key, qemu_vol_info[key]) if qemu_vol_info['format'] != 'raw': raise error.TestFail("Expect wiped volume " "format is raw") elif wipe_status_error and wipe_result.exit_status == 0: raise error.TestFail("Expect wipe volume fail, but run" " successfully.") elif clone_status_error and clone_result.exit_status == 0: raise error.TestFail("Expect clone volume fail, but run" " successfully.") finally: # Clean up try: if del_pool: libv_pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) else: # Only delete the volumes libv_vol = libvirt_storage.PoolVolume(pool_name) for vol in [vol_name, new_vol_name]: libv_vol.delete_volume(vol) except error.TestFail, detail: logging.error(str(detail))
def run(test, params, env): """ Test DAC in adding nfs pool disk to VM. (1).Init variables for test. (2).Create nfs pool and vol. (3).Attach the nfs pool vol to VM. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing") # Get qemu.conf config variables qemu_user = params.get("qemu_user") qemu_group = params.get("qemu_group") dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about pool vol virt_use_nfs = params.get("virt_use_nfs", "off") nfs_server_dir = params.get("nfs_server_dir", "nfs-server") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") export_options = params.get("export_options", "rw,async,no_root_squash,fsid=0") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") bk_file_name = params.get("bk_file_name") # Get pool vol variables img_tup = ("img_user", "img_group", "img_mode") img_val = [] for i in img_tup: try: img_val.append(int(params.get(i))) except ValueError: raise error.TestNAError("%s value '%s' is not a number." % (i, params.get(i))) img_user, img_group, img_mode = img_val # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Backup domain disk label disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in disks.values(): disk_path = disk['source'] f = os.open(disk_path, 0) stat_re = os.fstat(f) backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) os.close(f) # Backup selinux status of host. backup_sestatus = utils_selinux.get_status() pvt = None snapshot_name = None disk_snap_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # chown domain disk to qemu:qemu to avoid fail on local disk for disk in disks.values(): disk_path = disk['source'] if qemu_user == "root": os.chown(disk_path, 0, 0) elif qemu_user == "qemu": os.chown(disk_path, 107, 107) # Set selinux of host. utils_selinux.set_status(host_sestatus) # set qemu conf qemu_conf.user = qemu_user qemu_conf.group = qemu_user if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() # Create dst pool for create attach vol img logging.debug("export_options is: %s" % export_options) pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], export_options=export_options) # set virt_use_nfs result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs) if result.exit_status: raise error.TestNAError("Failed to set virt_use_nfs value") # Init a QemuImg instance and create img on nfs server dir. params['image_name'] = vol_name tmp_dir = data_dir.get_tmp_dir() nfs_path = os.path.join(tmp_dir, nfs_server_dir) image = qemu_storage.QemuImg(params, nfs_path, vol_name) # Create a image. server_img_path, result = image.create(params) if params.get("image_name_backing_file"): params['image_name'] = bk_file_name params['has_backing_file'] = "yes" image = qemu_storage.QemuImg(params, nfs_path, bk_file_name) server_img_path, result = image.create(params) # Get vol img path vol_name = server_img_path.split('/')[-1] virsh.pool_refresh(pool_name, debug=True) cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: raise error.TestNAError("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() # Do the attach action. extra = "--persistent --subdriver qcow2" result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: raise error.TestFail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Change img ownership and mode on nfs server dir os.chown(server_img_path, img_user, img_group) os.chmod(server_img_path, img_mode) img_label_before = check_ownership(server_img_path) if img_label_before: logging.debug("attached image ownership on nfs server before " "start: %s" % img_label_before) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. img_label_after = check_ownership(server_img_path) if img_label_after: logging.debug("attached image ownership on nfs server after" " start: %s" % img_label_after) if status_error: raise error.TestFail('Test succeeded in negative case.') except virt_vm.VMStartError, e: # Starting VM failed. if not status_error: raise error.TestFail("Test failed in positive case." "error: %s" % e) if params.get("image_name_backing_file"): options = "--disk-only" snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if not status_error: raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_name: disks_snap = vm.get_disk_devices() for disk in disks_snap.values(): disk_snap_path.append(disk['source']) virsh.snapshot_delete(vm_name, snapshot_name, "--metadata", debug=True) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except error.CmdError: raise error.TestFail("Detach disk 'vdf' from VM %s failed." % vm.name)
def run(test, params, env): """ Test DAC in adding nfs pool disk to VM. (1).Init variables for test. (2).Create nfs pool and vol. (3).Attach the nfs pool vol to VM. (4).Start VM and check result. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing") # Get qemu.conf config variables qemu_user = params.get("qemu_user") qemu_group = params.get("qemu_group") dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about pool vol virt_use_nfs = params.get("virt_use_nfs", "off") nfs_server_dir = params.get("nfs_server_dir", "nfs-server") pool_name = params.get("pool_name") pool_type = params.get("pool_type") pool_target = params.get("pool_target") export_options = params.get("export_options", "rw,async,no_root_squash") emulated_image = params.get("emulated_image") vol_name = params.get("vol_name") vol_format = params.get("vol_format") bk_file_name = params.get("bk_file_name") # Get pool vol variables img_tup = ("img_user", "img_group", "img_mode") img_val = [] for i in img_tup: try: img_val.append(int(params.get(i))) except ValueError: test.cancel("%s value '%s' is not a number." % (i, params.get(i))) # False positive - img_val was filled in the for loop above. # pylint: disable=E0632 img_user, img_group, img_mode = img_val # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vm_os_xml = vmxml.os # Backup domain disk label disks = vm.get_disk_devices() backup_labels_of_disks = {} for disk in list(disks.values()): disk_path = disk['source'] label = check_ownership(disk_path) if label: backup_labels_of_disks[disk_path] = label try: if vm_os_xml.nvram: nvram_path = vm_os_xml.nvram if not os.path.exists(nvram_path): # Need libvirt automatically generate the path vm.start() vm.destroy(gracefully=False) label = check_ownership(nvram_path) if label: backup_labels_of_disks[nvram_path] = label except xcepts.LibvirtXMLNotFoundError: logging.debug("vm xml don't have nvram element") # Backup selinux status of host. backup_sestatus = utils_selinux.get_status() pvt = None snapshot_name = None disk_snap_path = [] qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: # chown domain disk to qemu:qemu to avoid fail on local disk for file_path in list(backup_labels_of_disks.keys()): if qemu_user == "root": os.chown(file_path, 0, 0) elif qemu_user == "qemu": os.chown(file_path, 107, 107) else: process.run('chown %s %s' % (qemu_user, file_path), shell=True) # Set selinux of host. if backup_sestatus == "disabled": test.cancel("SELinux is in Disabled mode." "It must be Enabled to" "run this test") utils_selinux.set_status(host_sestatus) # set qemu conf qemu_conf.user = qemu_user qemu_conf.group = qemu_user if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s", qemu_conf) libvirtd.restart() # Create dst pool for create attach vol img logging.debug("export_options is: %s", export_options) pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, image_size="1G", pre_disk_vol=["20M"], export_options=export_options) # set virt_use_nfs result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True) if result.exit_status: test.cancel("Failed to set virt_use_nfs value") # Init a QemuImg instance and create img on nfs server dir. params['image_name'] = vol_name tmp_dir = data_dir.get_tmp_dir() nfs_path = os.path.join(tmp_dir, nfs_server_dir) image = qemu_storage.QemuImg(params, nfs_path, vol_name) # Create a image. server_img_path, result = image.create(params) if params.get("image_name_backing_file"): params['image_name'] = bk_file_name params['has_backing_file'] = "yes" image = qemu_storage.QemuImg(params, nfs_path, bk_file_name) server_img_path, result = image.create(params) # Get vol img path vol_name = server_img_path.split('/')[-1] virsh.pool_refresh(pool_name, debug=True) cmd_result = virsh.vol_path(vol_name, pool_name, debug=True) if cmd_result.exit_status: test.cancel("Failed to get volume path from pool.") img_path = cmd_result.stdout.strip() # Do the attach action. extra = "--persistent --subdriver qcow2" result = virsh.attach_disk(vm_name, source=img_path, target="vdf", extra=extra, debug=True) if result.exit_status: test.fail("Failed to attach disk %s to VM." "Detail: %s." % (img_path, result.stderr)) # Change img ownership and mode on nfs server dir os.chown(server_img_path, img_user, img_group) os.chmod(server_img_path, img_mode) img_label_before = check_ownership(server_img_path) if img_label_before: logging.debug("attached image ownership on nfs server before " "start: %s", img_label_before) # Start VM to check the VM is able to access the image or not. try: vm.start() # Start VM successfully. img_label_after = check_ownership(server_img_path) if img_label_after: logging.debug("attached image ownership on nfs server after" " start: %s", img_label_after) if status_error: test.fail('Test succeeded in negative case.') except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) if params.get("image_name_backing_file"): options = "--disk-only" snapshot_result = virsh.snapshot_create(vm_name, options, debug=True) if snapshot_result.exit_status: if not status_error: test.fail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) snapshot_name = re.search( "\d+", snapshot_result.stdout.strip()).group(0) if snapshot_name: disks_snap = vm.get_disk_devices() for disk in list(disks_snap.values()): disk_snap_path.append(disk['source']) virsh.snapshot_delete(vm_name, snapshot_name, "--metadata", debug=True) try: virsh.detach_disk(vm_name, target="vdf", extra="--persistent", debug=True) except process.CmdError: test.fail("Detach disk 'vdf' from VM %s failed." % vm.name) finally: # clean up vm.destroy() qemu_conf.restore() for path, label in list(backup_labels_of_disks.items()): label_list = label.split(":") os.chown(path, int(label_list[0]), int(label_list[1])) if snapshot_name: backup_xml.sync("--snapshots-metadata") else: backup_xml.sync() for i in disk_snap_path: if i and os.path.exists(i): os.unlink(i) if pvt: try: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image) except test.fail as detail: logging.error(str(detail)) utils_selinux.set_status(backup_sestatus) libvirtd.restart()