def run(test, params, env): """ Test command: virsh pool-define;pool-start;vol-list pool; attach-device LUN to guest; mount the device, dd; unmount; reboot guest; mount the device, dd again; pool-destroy; pool-undefine; Create a libvirt npiv pool from an XML file. The test needs to have a wwpn and wwnn of a vhba in host which is zoned & mapped to a SAN controller. Pre-requiste: Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to SAN controller. """ pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pre_def_pool = "yes" == params.get("pre_def_pool", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") pool_adapter_type = params.get("pool_adapter_type", "") pool_adapter_parent = params.get("pool_adapter_parent", "") target_device = params.get("pool_target_device", "sdc") pool_wwnn = params.get("pool_wwnn", "WWNN_EXAMPLE") pool_wwpn = params.get("pool_wwpn", "WWPN_EXAMPLE") test_unit = None mount_disk = None if 'EXAMPLE' in pool_wwnn or 'EXAMPLE' in pool_wwpn: raise exceptions.TestSkipError("Please provide proper WWPN/WWNN") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) pool_ins = libvirt_storage.StoragePool() if pre_def_pool and pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) online_hbas_list = nodedev.find_hbas("hba") logging.debug("The online hbas are: %s", online_hbas_list) # if no online hba cards on host test fails if not online_hbas_list: raise exceptions.TestSkipError("Host doesn't have online hba cards") else: if pool_adapter_parent == "": pool_adapter_parent = online_hbas_list[0] kwargs = {'source_path': source_path, 'source_name': source_name, 'source_format': source_format, 'pool_adapter_type': pool_adapter_type, 'pool_adapter_parent': pool_adapter_parent, 'pool_wwnn': pool_wwnn, 'pool_wwpn': pool_wwpn} pvt = utlv.PoolVolumeTest(test, params) emulated_image = "emulated-image" old_vhbas = nodedev.find_hbas("vhba") try: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) utils_misc.wait_for( lambda: nodedev.is_vhbas_added(old_vhbas), _DELAY_TIME) virsh.pool_dumpxml(pool_name, to_file=pool_xml_f) virsh.pool_destroy(pool_name) except Exception as e: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) raise exceptions.TestError( "Error occurred when prepare pool xml:\n %s" % e) if os.path.exists(pool_xml_f): with open(pool_xml_f, 'r') as f: logging.debug("Create pool from file:\n %s", f.read()) try: cmd_result = virsh.pool_define(pool_xml_f, ignore_status=True, debug=True) utlv.check_exit_status(cmd_result) cmd_result = virsh.pool_start(pool_name) utlv.check_exit_status(cmd_result) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) vol_list = utlv.get_vol_list(pool_name, timeout=10) test_unit = list(vol_list.keys())[0] logging.info( "Using the first LUN unit %s to attach to a guest", test_unit) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) session = vm.wait_for_login() output = session.cmd_status_output('lsblk') logging.debug("%s", output[1]) old_count = vmxml.get_disk_count(vm_name) bf_disks = libvirt_vm.get_disks() disk_params = {'type_name': 'volume', 'target_dev': target_device, 'target_bus': 'virtio', 'source_pool': pool_name, 'source_volume': test_unit, 'driver_type': 'raw'} disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml') lun_disk_xml = utlv.create_disk_xml(disk_params) copyfile(lun_disk_xml, disk_xml) attach_success = virsh.attach_device( vm_name, disk_xml, debug=True) utlv.check_exit_status(attach_success) virsh.reboot(vm_name, debug=True) logging.info("Checking disk availability in domain") if not vmxml.get_disk_count(vm_name): raise exceptions.TestFail("No disk in domain %s." % vm_name) new_count = vmxml.get_disk_count(vm_name) if new_count <= old_count: raise exceptions.TestFail( "Failed to attach disk %s" % lun_disk_xml) session = vm.wait_for_login() output = session.cmd_status_output('lsblk') logging.debug("%s", output[1]) logging.debug("Disks before attach: %s", bf_disks) af_disks = libvirt_vm.get_disks() logging.debug("Disks after attach: %s", af_disks) mount_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not mount_disk: raise exceptions.TestFail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", mount_disk) logging.debug("Creating file system for %s", mount_disk) output = session.cmd_status_output( 'echo yes | mkfs.ext4 %s' % mount_disk) logging.debug("%s", output[1]) if mount_disk: mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Mount failed") else: raise exceptions.TestFail("Partition not available for disk") logging.debug("Unmounting disk") session.cmd_status_output('umount %s' % mount_disk) virsh.reboot(vm_name, debug=True) session = vm.wait_for_login() output = session.cmd_status_output('mount') logging.debug("%s", output[1]) mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Mount failed") logging.debug("Unmounting disk") session.cmd_status_output('umount %s' % mount_disk) session.close() detach_status = virsh.detach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(detach_status) finally: vm.destroy(gracefully=False) vmxml_backup.sync() logging.debug('Destroying pool %s', pool_name) virsh.pool_destroy(pool_name) logging.debug('Undefining pool %s', pool_name) virsh.pool_undefine(pool_name) pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) if os.path.exists(pool_xml_f): os.remove(pool_xml_f) if os.path.exists(disk_xml): logging.debug("Cleanup disk xml") data_dir.clean_tmp_files()
def run(test, params, env): """ Test command: virsh pool-define; pool-define-as; pool-start; vol-list pool; attach-device LUN to guest; mount the device; dd to the mounted device; unmount; pool-destroy; pool-undefine; Pre-requiste: Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to SAN controller. """ pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pre_def_pool = params.get("pre_def_pool", "no") define_pool = params.get("define_pool", "no") define_pool_as = params.get("define_pool_as", "no") pool_create_as = params.get("pool_create_as", "no") need_pool_build = params.get("need_pool_build", "no") need_vol_create = params.get("need_vol_create", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") pool_adapter_type = params.get("pool_adapter_type", "") pool_adapter_parent = params.get("pool_adapter_parent", "") target_device = params.get("disk_target_dev", "sdc") pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE") pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE") vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE") vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE") volume_name = params.get("volume_name", "imagefrommapper.qcow2") volume_capacity = params.get("volume_capacity", '1G') allocation = params.get("allocation", '1G') vol_format = params.get("volume_format", 'raw') attach_method = params.get("attach_method", "hot") test_unit = None mount_disk = None pool_kwargs = {} pool_extra_args = "" emulated_image = "emulated-image" disk_xml = "" new_vhbas = [] source_dev = "" mpath_vol_path = "" old_mpath_conf = "" mpath_conf_path = "/etc/multipath.conf" original_mpath_conf_exist = os.path.exists(mpath_conf_path) if pool_type == "scsi": if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for npiv scsi pool.") if pool_type == "logical": if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for vhba.") online_hbas_list = utils_npiv.find_hbas("hba") logging.debug("The online hbas are: %s", online_hbas_list) old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, replace_existing=True) if not online_hbas_list: raise exceptions.TestSkipError( "Host doesn't have online hba cards") old_vhbas = utils_npiv.find_hbas("vhba") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if not vm.is_alive(): vm.start() libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) if pool_type == "scsi": if define_pool == "yes": if pool_adapter_parent == "": pool_adapter_parent = online_hbas_list[0] pool_kwargs = {'source_path': source_path, 'source_name': source_name, 'source_format': source_format, 'pool_adapter_type': pool_adapter_type, 'pool_adapter_parent': pool_adapter_parent, 'pool_wwnn': pool_wwnn, 'pool_wwpn': pool_wwpn} elif pool_type == "logical": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME*5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) source_dev = "/dev/mapper/" + new_mpath_devs[0] logging.debug("We are going to use \"%s\" as our source device" " to create a logical pool", source_dev) try: cmd = "parted %s mklabel msdos -s" % source_dev cmd_result = process.run(cmd, shell=True) except Exception as e: raise exceptions.TestError("Error occurred when parted mklable") if define_pool_as == "yes": pool_extra_args = "" if source_dev: pool_extra_args = ' --source-dev %s' % source_dev elif pool_type == "mpath": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME*2) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0] try: cmd = "parted %s mklabel msdos -s" % mpath_vol_path cmd_result = process.run(cmd, shell=True) except Exception as e: raise exceptions.TestError("Error occurred when parted mklable") if pre_def_pool == "yes": try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), _DELAY_TIME*2) virsh.pool_dumpxml(pool_name, to_file=pool_xml_f) virsh.pool_destroy(pool_name) except Exception as e: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) raise exceptions.TestError( "Error occurred when prepare pool xml:\n %s" % e) if os.path.exists(pool_xml_f): with open(pool_xml_f, 'r') as f: logging.debug("Create pool from file: %s", f.read()) try: # define/create/start the pool if (pre_def_pool == "yes") and (define_pool == "yes"): pool_define_status = virsh.pool_define(pool_xml_f, ignore_status=True, debug=True) utlv.check_exit_status(pool_define_status) if define_pool_as == "yes": pool_define_as_status = virsh.pool_define_as( pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True, debug=True ) utlv.check_exit_status(pool_define_as_status) if pool_create_as == "yes": if pool_type != "scsi": raise exceptions.TestSkipError("pool-create-as only needs to " "be covered by scsi pool for " "NPIV test.") cmd = "virsh pool-create-as %s %s \ --adapter-wwnn %s --adapter-wwpn %s \ --adapter-parent %s --target %s"\ % (pool_name, pool_type, pool_wwnn, pool_wwpn, online_hbas_list[0], pool_target) cmd_status = process.system(cmd, verbose=True) if cmd_status: raise exceptions.TestFail("pool-create-as scsi pool failed.") if need_pool_build == "yes": pool_build_status = virsh.pool_build(pool_name, "--overwrite") utlv.check_exit_status(pool_build_status) pool_ins = libvirt_storage.StoragePool() if not pool_ins.pool_exists(pool_name): raise exceptions.TestFail("define or create pool failed.") else: if not pool_ins.is_pool_active(pool_name): pool_start_status = virsh.pool_start(pool_name) utlv.check_exit_status(pool_start_status) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) # create vol if required if need_vol_create == "yes": vol_create_as_status = virsh.vol_create_as( volume_name, pool_name, volume_capacity, allocation, vol_format, "", debug=True ) utlv.check_exit_status(vol_create_as_status) virsh.pool_refresh(pool_name) vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_DELAY_TIME*3) logging.debug('Volume list is: %s' % vol_list) # use test_unit to save the first vol in pool if pool_type == "mpath": cmd = "virsh vol-list %s | grep \"%s\" |\ awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path) cmd_result = process.run(cmd, shell=True) status = cmd_result.exit_status output = cmd_result.stdout_text.strip() if cmd_result.exit_status: raise exceptions.TestFail("vol-list pool %s failed", pool_name) if not output: raise exceptions.TestFail("Newly added mpath dev not in pool.") test_unit = output logging.info( "Using %s to attach to a guest", test_unit) else: test_unit = list(vol_list.keys())[0] logging.info( "Using the first volume %s to attach to a guest", test_unit) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) session = vm.wait_for_login() output = session.cmd_status_output('lsblk') logging.debug("%s", output[1]) old_count = vmxml.get_disk_count(vm_name) bf_disks = libvirt_vm.get_disks() # prepare disk xml which will be hot/cold attached to vm disk_params = {'type_name': 'volume', 'target_dev': target_device, 'target_bus': 'virtio', 'source_pool': pool_name, 'source_volume': test_unit, 'driver_type': vol_format} disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml') lun_disk_xml = utlv.create_disk_xml(disk_params) copyfile(lun_disk_xml, disk_xml) disk_xml_str = open(lun_disk_xml).read() logging.debug("The disk xml is: %s", disk_xml_str) # hot attach disk xml to vm if attach_method == "hot": copyfile(lun_disk_xml, disk_xml) dev_attach_status = virsh.attach_device(vm_name, disk_xml, debug=True) # Pool/vol virtual disk is not supported by mpath pool yet. if dev_attach_status.exit_status and pool_type == "mpath": raise exceptions.TestSkipError("mpath pool vol is not " "supported in virtual disk yet," "the error message is: %s", dev_attach_status.stderr) session.close() utlv.check_exit_status(dev_attach_status) # cold attach disk xml to vm elif attach_method == "cold": if vm.is_alive(): vm.destroy(gracefully=False) new_disk = disk.Disk() new_disk.xml = disk_xml_str vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml.devices = vmxml.devices.append(new_disk) vmxml.sync() logging.debug(vmxml) try: vm.start() except virt_vm.VMStartError as e: logging.debug(e) if pool_type == "mpath": raise exceptions.TestSkipError("'mpath' pools for backing " "'volume' disks isn't " "supported for now") else: raise exceptions.TestFail("Failed to start vm") session = vm.wait_for_login() else: pass # checking attached disk in vm logging.info("Checking disk availability in domain") if not vmxml.get_disk_count(vm_name): raise exceptions.TestFail("No disk in domain %s." % vm_name) new_count = vmxml.get_disk_count(vm_name) if new_count <= old_count: raise exceptions.TestFail( "Failed to attach disk %s" % lun_disk_xml) logging.debug("Disks before attach: %s", bf_disks) af_disks = libvirt_vm.get_disks() logging.debug("Disks after attach: %s", af_disks) mount_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not mount_disk: raise exceptions.TestFail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", mount_disk) logging.debug("Creating file system for %s", mount_disk) output = session.cmd_status_output( 'echo yes | mkfs.ext4 %s' % mount_disk) logging.debug("%s", output[1]) if mount_disk: mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Mount failed") else: raise exceptions.TestFail("Partition not available for disk") logging.debug("Unmounting disk") session.cmd_status_output('umount %s' % mount_disk) output = session.cmd_status_output('mount') logging.debug("%s", output[1]) mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Mount failed") logging.debug("Unmounting disk") session.cmd_status_output('umount %s' % mount_disk) session.close() # detach disk from vm dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(dev_detach_status) finally: vm.destroy(gracefully=False) vmxml_backup.sync() logging.debug('Destroying pool %s', pool_name) virsh.pool_destroy(pool_name) logging.debug('Undefining pool %s', pool_name) virsh.pool_undefine(pool_name) if os.path.exists(pool_xml_f): os.remove(pool_xml_f) if os.path.exists(disk_xml): data_dir.clean_tmp_files() logging.debug("Cleanup disk xml") if pre_def_pool == "yes": # Do not apply cleanup_pool for logical pool, logical pool will # be cleaned below pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) if (test_unit and (need_vol_create == "yes" and (pre_def_pool == "no")) and (pool_type == "logical")): process.system('lvremove -f %s/%s' % (pool_name, test_unit), verbose=True) process.system('vgremove -f %s' % pool_name, verbose=True) process.system('pvremove -f %s' % source_dev, verbose=True) if new_vhbas: utils_npiv.vhbas_cleanup(new_vhbas) # Restart multipathd, this is to avoid bz1399075 if source_dev: utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(source_dev), _DELAY_TIME*5, 0.0, 5.0) elif mpath_vol_path: utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(mpath_vol_path), _DELAY_TIME*5, 0.0, 5.0) else: utils_npiv.restart_multipathd() if old_mpath_conf: utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, conf_content=old_mpath_conf, replace_existing=True) if not original_mpath_conf_exist and os.path.exists(mpath_conf_path): os.remove(mpath_conf_path)
def run(test, params, env): """ Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as; vol-list; attach-device; login; mount and dd; reboot; check persistence; detach-device; pool-destroy; pool-undefine; clear lv,vg and pv; Create a libvirt npiv pool from a vHBA's device mapper device and create a volume out of the newly created pool and attach it to a guest, mount it, reboot and check persistence after reboot. Pre-requisite : Host should have a vHBA associated with a mpath device """ pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pool_type = params.get("pool_type", "dir") scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE") scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE") pool_target = params.get("pool_target", "pool_target") target_device = params.get("disk_target_dev", "vda") volume_name = params.get("volume_name", "imagefrommapper.qcow2") volume_capacity = params.get("volume_capacity", '1G') allocation = params.get("allocation", '1G') frmt = params.get("volume_format", 'qcow2') vm_name = params.get("main_vm") vm = env.get_vm(vm_name) mount_disk = None test_unit = None if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn: raise exceptions.TestSkipError("Please provide proper WWPN/WWNN") if not vm.is_alive(): vm.start() pool_extra_args = "" libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) process.run("service multipathd restart", shell=True) online_hbas_list = nodedev.find_hbas("hba") first_online_hba = online_hbas_list[0] old_mpath_devs = nodedev.find_mpath_devs() logging.debug("the old mpath devs are: %s" % old_mpath_devs) new_vhbas = nodedev.nodedev_create_from_xml({ "nodedev_parent": first_online_hba, "scsi_wwnn": scsi_wwnn, "scsi_wwpn": scsi_wwpn }) logging.info("Newly created vHBA %s" % new_vhbas) process.run("service multipathd restart", shell=True) utils_misc.wait_for(lambda: nodedev.is_mpath_devs_added(old_mpath_devs), timeout=5) cur_mpath_devs = nodedev.find_mpath_devs() logging.debug("the current mpath devs are: %s" % cur_mpath_devs) new_mpath_devs = list(set(cur_mpath_devs).difference(set(old_mpath_devs))) logging.debug("newly added mpath devs are: %s" % new_mpath_devs) if not new_mpath_devs: raise exceptions.TestFail("No newly added mpath devices found, \ please check your FC settings") source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0]) logging.debug("We are going to use \"%s\" as our source device" " to create a logical pool" % source_dev) cmd = "parted %s mklabel msdos -s" % source_dev cmd_result = process.run(cmd, shell=True) utlv.check_exit_status(cmd_result) if source_dev: pool_extra_args = ' --source-dev %s' % source_dev else: raise exceptions.TestFail( "The vHBA %s does not have any associated mpath device" % new_vhbas) pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) # if no online hba cards on host, mark case failed if not online_hbas_list: raise exceptions.TestSkipError("Host doesn't have online hba cards") try: cmd_result = virsh.pool_define_as(pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True, debug=True) utlv.check_exit_status(cmd_result) cmd_result = virsh.pool_build(pool_name) utlv.check_exit_status(cmd_result) cmd_result = virsh.pool_start(pool_name) utlv.check_exit_status(cmd_result) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) cmd_result = virsh.vol_create_as(volume_name, pool_name, volume_capacity, allocation, frmt, "", debug=True) utlv.check_exit_status(cmd_result) vol_list = utlv.get_vol_list(pool_name, timeout=10) logging.debug('Volume list %s', vol_list) for unit in vol_list: test_unit = vol_list[unit] logging.debug(unit) disk_params = { 'type_name': "file", 'target_dev': target_device, 'target_bus': "virtio", 'source_file': test_unit, 'driver_name': "qemu", 'driver_type': "raw" } disk_xml = utlv.create_disk_xml(disk_params) session = vm.wait_for_login() bf_disks = libvirt_vm.get_disks() attach_success = virsh.attach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(attach_success) logging.debug("Disks before attach: %s", bf_disks) af_disks = libvirt_vm.get_disks() logging.debug("Disks after attach: %s", af_disks) mount_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not mount_disk: raise exceptions.TestFail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", mount_disk) output = session.cmd_status_output('lsblk', timeout=15) logging.debug("%s", output[1]) session.cmd_status_output('mkfs.ext4 %s' % mount_disk) if mount_disk: logging.info("%s", mount_disk) mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Can not find mounted device") session.close() virsh.reboot(vm_name, debug=True) session = vm.wait_for_login() output = session.cmd_status_output('mount') logging.debug("Mount output: %s", output[1]) if '/mnt' in output[1]: logging.debug("Mount Successful accross reboot") session.close() status = virsh.detach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(status) finally: vm.destroy(gracefully=False) logging.debug('Destroying pool %s', pool_name) virsh.pool_destroy(pool_name) logging.debug('Undefining pool %s', pool_name) virsh.pool_undefine(pool_name) if test_unit: process.system('lvremove -f %s' % test_unit, verbose=True) process.system('vgremove -f %s' % pool_name, verbose=True) process.system('pvremove -f %s' % source_dev, verbose=True) if new_vhbas: nodedev.vhbas_cleanup(new_vhbas.split()) process.run("service multipathd restart", shell=True)
if os.path.exists(pool_xml_f): f = open(pool_xml_f, 'r') try: logging.debug("Create pool from file:\n %s", f.read()) finally: f.close() try: cmd_result = virsh.pool_define(pool_xml_f, ignore_status=True, debug=True) utlv.check_exit_status(cmd_result) cmd_result = virsh.pool_start(pool_name) utlv.check_exit_status(cmd_result) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) vol_list = utlv.get_vol_list(pool_name, timeout=10) test_unit = vol_list.keys()[0] logging.info("Using the first LUN unit %s to attach to a guest", test_unit) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) session = vm.wait_for_login() output = session.cmd_status_output('lsblk') logging.debug("%s", output[1]) old_count = vmxml.get_disk_count(vm_name) bf_disks = libvirt_vm.get_disks() disk_params = {
online_hbas_list[0], pool_target) cmd_status = process.system(cmd, verbose=True) if cmd_status: raise exceptions.TestFail("pool-create-as scsi pool failed.") if need_pool_build == "yes": pool_build_status = virsh.pool_build(pool_name, "--overwrite") utlv.check_exit_status(pool_build_status) pool_ins = libvirt_storage.StoragePool() if not pool_ins.pool_exists(pool_name): raise exceptions.TestFail("define or create pool failed.") else: if not pool_ins.is_pool_active(pool_name): pool_start_status = virsh.pool_start(pool_name) utlv.check_exit_status(pool_start_status) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) # create vol if required if need_vol_create == "yes": vol_create_as_status = virsh.vol_create_as( volume_name, pool_name, volume_capacity, allocation, vol_format, "", debug=True ) utlv.check_exit_status(vol_create_as_status) virsh.pool_refresh(pool_name) vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_DELAY_TIME*3) logging.debug('Volume list is: %s' % vol_list)
def run(test, params, env): """ Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as; vol-list; attach-device; login; mount and dd; reboot; check persistence; detach-device; pool-destroy; pool-undefine; clear lv,vg and pv; Create a libvirt npiv pool from a vHBA's device mapper device and create a volume out of the newly created pool and attach it to a guest, mount it, reboot and check persistence after reboot. Pre-requisite : Host should have a vHBA associated with a mpath device """ pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pool_type = params.get("pool_type", "dir") scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE") scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE") pool_target = params.get("pool_target", "pool_target") target_device = params.get("disk_target_dev", "vda") volume_name = params.get("volume_name", "imagefrommapper.qcow2") volume_capacity = params.get("volume_capacity", '1G') allocation = params.get("allocation", '1G') frmt = params.get("volume_format", 'qcow2') vm_name = params.get("main_vm") vm = env.get_vm(vm_name) mount_disk = None test_unit = None if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn: raise exceptions.TestSkipError("Please provide proper WWPN/WWNN") if not vm.is_alive(): vm.start() pool_extra_args = "" libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) process.run("service multipathd restart", shell=True) online_hbas_list = nodedev.find_hbas("hba") first_online_hba = online_hbas_list[0] old_mpath_devs = nodedev.find_mpath_devs() logging.debug("the old mpath devs are: %s" % old_mpath_devs) new_vhbas = nodedev.nodedev_create_from_xml( {"nodedev_parent": first_online_hba, "scsi_wwnn": scsi_wwnn, "scsi_wwpn": scsi_wwpn}) logging.info("Newly created vHBA %s" % new_vhbas) process.run("service multipathd restart", shell=True) utils_misc.wait_for( lambda: nodedev.is_mpath_devs_added(old_mpath_devs), timeout=5) cur_mpath_devs = nodedev.find_mpath_devs() logging.debug("the current mpath devs are: %s" % cur_mpath_devs) new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("newly added mpath devs are: %s" % new_mpath_devs) if not new_mpath_devs: raise exceptions.TestFail("No newly added mpath devices found, \ please check your FC settings") source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0]) logging.debug("We are going to use \"%s\" as our source device" " to create a logical pool" % source_dev) cmd = "parted %s mklabel msdos -s" % source_dev cmd_result = process.run(cmd, shell=True) utlv.check_exit_status(cmd_result) if source_dev: pool_extra_args = ' --source-dev %s' % source_dev else: raise exceptions.TestFail( "The vHBA %s does not have any associated mpath device" % new_vhbas) pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) # if no online hba cards on host, mark case failed if not online_hbas_list: raise exceptions.TestSkipError("Host doesn't have online hba cards") try: cmd_result = virsh.pool_define_as( pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True, debug=True) utlv.check_exit_status(cmd_result) cmd_result = virsh.pool_build(pool_name) utlv.check_exit_status(cmd_result) cmd_result = virsh.pool_start(pool_name) utlv.check_exit_status(cmd_result) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) cmd_result = virsh.vol_create_as( volume_name, pool_name, volume_capacity, allocation, frmt, "", debug=True) utlv.check_exit_status(cmd_result) vol_list = utlv.get_vol_list(pool_name, timeout=10) logging.debug('Volume list %s', vol_list) for unit in vol_list: test_unit = vol_list[unit] logging.debug(unit) disk_params = {'type_name': "file", 'target_dev': target_device, 'target_bus': "virtio", 'source_file': test_unit, 'driver_name': "qemu", 'driver_type': "raw"} disk_xml = utlv.create_disk_xml(disk_params) session = vm.wait_for_login() bf_disks = libvirt_vm.get_disks() attach_success = virsh.attach_device( vm_name, disk_xml, debug=True) utlv.check_exit_status(attach_success) logging.debug("Disks before attach: %s", bf_disks) af_disks = libvirt_vm.get_disks() logging.debug("Disks after attach: %s", af_disks) mount_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not mount_disk: raise exceptions.TestFail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", mount_disk) output = session.cmd_status_output('lsblk', timeout=15) logging.debug("%s", output[1]) session.cmd_status_output('mkfs.ext4 %s' % mount_disk) if mount_disk: logging.info("%s", mount_disk) mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Can not find mounted device") session.close() virsh.reboot(vm_name, debug=True) session = vm.wait_for_login() output = session.cmd_status_output('mount') logging.debug("Mount output: %s", output[1]) if '/mnt' in output[1]: logging.debug("Mount Successful accross reboot") session.close() status = virsh.detach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(status) finally: vm.destroy(gracefully=False) logging.debug('Destroying pool %s', pool_name) virsh.pool_destroy(pool_name) logging.debug('Undefining pool %s', pool_name) virsh.pool_undefine(pool_name) if test_unit: process.system('lvremove -f %s' % test_unit, verbose=True) process.system('vgremove -f %s' % pool_name, verbose=True) process.system('pvremove -f %s' % source_dev, verbose=True) if new_vhbas: nodedev.vhbas_cleanup(new_vhbas.split()) process.run("service multipathd restart", shell=True)
def run(test, params, env): """ Test command: virsh pool-create. Create a libvirt pool from an XML file. The file could be given by tester or generated by dumpxml a pre-defined pool. """ pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") option = params.get("pool_create_extra_option", "") readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") pre_def_pool = "yes" == params.get("pre_def_pool", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") duplicate_element = params.get("pool_create_duplicate_element", "") new_pool_name = params.get("new_pool_create_name") no_disk_label = "yes" == params.get("no_disk_label", "no") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") if "/PATH/TO/POOL.XML" in pool_xml_f: test.cancel("Please replace %s with valid pool xml file" % pool_xml_f) pool_ins = libvirt_storage.StoragePool() if pre_def_pool and pool_ins.pool_exists(pool_name): test.fail("Pool %s already exist" % pool_name) emulated_image = "emulated-image" kwargs = {'image_size': '1G', 'source_path': source_path, 'source_name': source_name, 'source_format': source_format} pvt = utlv.PoolVolumeTest(test, params) old_uuid = None new_device_name = None if pre_def_pool: try: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) virsh.pool_dumpxml(pool_name, to_file=pool_xml_f) old_uuid = virsh.pool_uuid(pool_name).stdout.strip() if no_disk_label: # Update <device_path> logging.debug("Try to update device path") new_device_name = utlv.setup_or_cleanup_iscsi(True) p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) s_xml = pool_xml.SourceXML() s_xml.device_path = new_device_name p_xml.set_source(s_xml) pool_xml_f = p_xml.xml if duplicate_element == "name": pass elif duplicate_element == "uuid": pass elif duplicate_element == "source": # Remove <uuid> and update <name> cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f process.run(cmd, shell=True) cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (new_pool_name, pool_xml_f) process.run(cmd, shell=True) else: # The transient pool will gone after destroyed virsh.pool_destroy(pool_name) new_source_format = params.get("new_pool_src_format") if new_source_format: cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % ( source_format, new_source_format, pool_xml_f) process.run(cmd, shell=True) # Remove uuid cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f process.run(cmd, shell=True) except Exception as details: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) if new_device_name: utlv.setup_or_cleanup_iscsi(False) test.error("Error occurred when prepare pool xml:\n %s" % details) # Create an invalid pool xml file if pool_xml_f == "invalid-pool-xml": tmp_xml_f = xml_utils.TempXMLFile() tmp_xml_f.write('"<pool><<<BAD>>><\'XML</name\>' '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>') tmp_xml_f.flush() pool_xml_f = tmp_xml_f.name # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True # Run virsh test if os.path.exists(pool_xml_f): with open(pool_xml_f, 'r') as f: logging.debug("Create pool from file:\n %s", f.read()) try: cmd_result = virsh.pool_create(pool_xml_f, option, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: test.fail(err) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) if pool_detail['uuid'] == old_uuid: test.fail("New created pool still use the old UUID %s" % old_uuid) else: if status == 0: test.fail("Expect fail, but run successfully.") else: logging.debug("Command fail as expected") finally: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) if new_device_name: utlv.setup_or_cleanup_iscsi(False) if os.path.exists(pool_xml_f): os.remove(pool_xml_f)
def run(test, params, env): """ Test command: virsh pool-create. Create a libvirt pool from an XML file. The file could be given by tester or generated by dumpxml a pre-defined pool. """ pool_xml_f = params.get("pool_create_xml_file", "") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") option = params.get("pool_create_extra_option", "") readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") pre_def_pool = "yes" == params.get("pre_def_pool", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") duplicate_element = params.get("pool_create_duplicate_element", "") new_pool_name = params.get("new_pool_create_name") no_disk_label = "yes" == params.get("no_disk_label", "no") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") pool_ins = libvirt_storage.StoragePool() if pre_def_pool and pool_ins.pool_exists(pool_name): test.fail("Pool %s already exist" % pool_name) kwargs = {'image_size': '1G', 'source_path': source_path, 'source_name': source_name, 'source_format': source_format, 'emulated_image': "emulated-image", 'pool_target': pool_target, 'pool_name': pool_name} params.update(kwargs) pvt = utlv.PoolVolumeTest(test, params) old_uuid = None new_device_name = None if pre_def_pool: try: pvt.pre_pool(**params) virsh.pool_dumpxml(pool_name, to_file=pool_xml_f) old_uuid = virsh.pool_uuid(pool_name).stdout.strip() if no_disk_label: # Update <device_path> logging.debug("Try to update device path") new_device_name = utlv.setup_or_cleanup_iscsi(True) p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) s_xml = pool_xml.SourceXML() s_xml.device_path = new_device_name p_xml.set_source(s_xml) pool_xml_f = p_xml.xml if duplicate_element == "name": pass elif duplicate_element == "uuid": pass elif duplicate_element == "source": # Remove <uuid> and update <name> cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f process.run(cmd, shell=True) cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (new_pool_name, pool_xml_f) process.run(cmd, shell=True) else: # The transient pool will gone after destroyed virsh.pool_destroy(pool_name) new_source_format = params.get("new_pool_src_format") if new_source_format: cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % ( source_format, new_source_format, pool_xml_f) process.run(cmd, shell=True) # Remove uuid cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f process.run(cmd, shell=True) except Exception as details: pvt.cleanup_pool(**params) if new_device_name: utlv.setup_or_cleanup_iscsi(False) test.error("Error occurred when prepare pool xml:\n %s" % details) # Create an invalid pool xml file if pool_xml_f == "invalid-pool-xml": tmp_xml_f = xml_utils.TempXMLFile() tmp_xml_f.write('"<pool><<<BAD>>><\'XML</name\>' '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>') tmp_xml_f.flush() pool_xml_f = tmp_xml_f.name # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True # Run virsh test if os.path.exists(pool_xml_f): with open(pool_xml_f, 'r') as f: logging.debug("Create pool from file:\n %s", f.read()) try: cmd_result = virsh.pool_create(pool_xml_f, option, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: test.fail(err) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) if pool_detail['uuid'] == old_uuid: test.fail("New created pool still use the old UUID %s" % old_uuid) else: if status == 0: test.fail("Expect fail, but run successfully.") else: logging.debug("Command fail as expected") finally: pvt.cleanup_pool(**params) if new_device_name: utlv.setup_or_cleanup_iscsi(False) if os.path.exists(pool_xml_f): os.remove(pool_xml_f)