def run(test, params, env): vd_formats = [] disk_devices = [] driver_names = [] driver_types = [] device_targets = [] target_buses = [] wwnns = [] wwpns = [] vm_names = params.get("vms", "avocado-vt-vm1 avocado-vt-vm2").split() fc_host_dir = params.get("fc_host_dir", "/sys/class/fc_host") vm0_disk_type = params.get("vm0_disk_type", "block") vm1_disk_type = params.get("vm1_disk_type", "block") vm0_vd_format = params.get("vm0_vd_format", "by_path") vm1_vd_format = params.get("vm1_vd_foramt", "by_path") vm0_disk_device = vm1_disk_device = params.get("disk_device", "disk") vm0_driver_name = vm1_driver_name = params.get("driver_name", "qemu") vm0_driver_type = vm1_driver_type = params.get("driver_type", "qcow2") vm0_device_target = vm1_device_target = params.get("device_target", "vda") vm0_target_bus = vm1_target_bus = params.get("target_bus", "virtio") vm0_wwnn = params.get("vm0_wwnn", "ENTER.WWNN.FOR.VM0") vm0_wwpn = params.get("vm0_wwpn", "ENTER.WWPN.FOR.VM0") vm1_wwnn = params.get("vm1_wwnn", "ENTER.WWNN.FOR.VM1") vm1_wwpn = params.get("vm1_wwpn", "ENTER.WWPN.FOR.VM1") disk_types = [vm0_disk_type, vm1_disk_type] vd_formats = [vm0_vd_format, vm1_vd_format] disk_devices = [vm0_disk_device, vm1_disk_device] driver_names = [vm0_driver_name, vm1_driver_name] driver_types = [vm0_driver_type, vm1_driver_type] device_targets = [vm0_device_target, vm1_device_target] target_buses = [vm0_target_bus, vm1_target_bus] wwnns = [vm0_wwnn, vm1_wwnn] wwpns = [vm0_wwpn, vm1_wwpn] old_mpath_conf = "" mpath_conf_path = "/etc/multipath.conf" original_mpath_conf_exist = os.path.exists(mpath_conf_path) new_vhbas = [] path_to_blks = [] vmxml_backups = [] vms = [] try: online_hbas = utils_npiv.find_hbas("hba") if not online_hbas: test.cancel("There is no online hba cards.") old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, replace_existing=True) first_online_hba = online_hbas[0] if len(vm_names) != 2: test.cancel("This test needs exactly 2 vms.") for vm_index in range(len(vm_names)): logging.debug("prepare vm %s", vm_names[vm_index]) vm = env.get_vm(vm_names[vm_index]) vms.append(vm) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[vm_index]) vmxml_backup = vmxml.copy() vmxml_backups.append(vmxml_backup) old_vhbas = utils_npiv.find_hbas("vhba") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml( {"nodedev_parent": first_online_hba, "scsi_wwnn": wwnns[vm_index], "scsi_wwpn": wwpns[vm_index]}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT*2) if not new_vhba: test.fail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) if vd_formats[vm_index] == "mpath": utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_TIMEOUT*5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): test.fail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) path_to_blk = os.path.join(_MPATH_DIR, new_mpath_devs[0]) elif vd_formats[vm_index] == "by_path": new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(test, new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(test, new_vhba_scsibus) if not new_blks: test.fail("blk dev not found with scsi_%s" % new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for( lambda: get_symbols_by_blk(test, first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(test, first_blk_dev) if not lun_sl: test.fail("lun symbolic links not found in " "/dev/disk/by-path/ for %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BYPATH_DIR, lun_dev) path_to_blks.append(path_to_blk) img_src = vm.get_first_disk_devices()['source'] img_info = utils_misc.get_image_info(img_src) src_fmt = img_info["format"] dest_fmt = "qcow2" convert_img_to_dev(test, src_fmt, dest_fmt, img_src, path_to_blk) disk_obj = prepare_disk_obj(disk_types[vm_index], disk_devices[vm_index], driver_names[vm_index], driver_types[vm_index], path_to_blk, device_targets[vm_index], target_buses[vm_index]) replace_vm_first_vd(vm_names[vm_index], disk_obj) if vm.is_dead(): logging.debug("Start vm %s with updated vda", vm_names[vm_index]) vm.start() # concurrently create file in vm with threads create_file_in_vm_threads = [] for vm in vms: cli_t = threading.Thread(target=create_file_in_vm, args=(vm, _VM_FILE_PATH, vm.name, _REPEAT,) ) logging.debug("Start creating file in vm: %s", vm.name) create_file_in_vm_threads.append(cli_t) cli_t.start() for thrd in create_file_in_vm_threads: thrd.join() # reboot vm and check if previously create file still exist with # correct content for vm in vms: session = vm.wait_for_login() session.cmd_status_output("sync") if vm.is_alive: vm.destroy(gracefully=True) else: test.fail("%s is not running" % vm.name) vm.start() session = vm.wait_for_login() if check_file_in_vm(session, _VM_FILE_PATH, vm.name, _REPEAT): logging.debug("file exists after reboot with correct content") else: test.fail("Failed to check the test file in vm") session.close() except Exception as detail: test.fail("Test failed with exception: %s" % detail) finally: logging.debug("Start to clean up env...") for vmxml_backup in vmxml_backups: vmxml_backup.sync() for new_vhba in new_vhbas: virsh.nodedev_destroy(new_vhba) process.system('service multipathd restart', verbose=True) if old_mpath_conf: utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, conf_content=old_mpath_conf, replace_existing=True) if not original_mpath_conf_exist and os.path.exists(mpath_conf_path): os.remove(mpath_conf_path)
def run(test, params, env): """ Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as; vol-list; attach-device; login; mount and dd; reboot; check persistence; detach-device; pool-destroy; pool-undefine; clear lv,vg and pv; Create a libvirt npiv pool from a vHBA's device mapper device and create a volume out of the newly created pool and attach it to a guest, mount it, reboot and check persistence after reboot. Pre-requisite : Host should have a vHBA associated with a mpath device """ pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pool_type = params.get("pool_type", "dir") scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE") scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE") pool_target = params.get("pool_target", "pool_target") target_device = params.get("disk_target_dev", "vda") volume_name = params.get("volume_name", "imagefrommapper.qcow2") volume_capacity = params.get("volume_capacity", '1G') allocation = params.get("allocation", '1G') frmt = params.get("volume_format", 'qcow2') vm_name = params.get("main_vm") vm = env.get_vm(vm_name) mount_disk = None test_unit = None if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn: raise exceptions.TestSkipError("Please provide proper WWPN/WWNN") if not vm.is_alive(): vm.start() pool_extra_args = "" libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) process.run("service multipathd restart", shell=True) online_hbas_list = nodedev.find_hbas("hba") first_online_hba = online_hbas_list[0] old_mpath_devs = nodedev.find_mpath_devs() logging.debug("the old mpath devs are: %s" % old_mpath_devs) new_vhbas = nodedev.nodedev_create_from_xml({ "nodedev_parent": first_online_hba, "scsi_wwnn": scsi_wwnn, "scsi_wwpn": scsi_wwpn }) logging.info("Newly created vHBA %s" % new_vhbas) process.run("service multipathd restart", shell=True) utils_misc.wait_for(lambda: nodedev.is_mpath_devs_added(old_mpath_devs), timeout=5) cur_mpath_devs = nodedev.find_mpath_devs() logging.debug("the current mpath devs are: %s" % cur_mpath_devs) new_mpath_devs = list(set(cur_mpath_devs).difference(set(old_mpath_devs))) logging.debug("newly added mpath devs are: %s" % new_mpath_devs) if not new_mpath_devs: raise exceptions.TestFail("No newly added mpath devices found, \ please check your FC settings") source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0]) logging.debug("We are going to use \"%s\" as our source device" " to create a logical pool" % source_dev) cmd = "parted %s mklabel msdos -s" % source_dev cmd_result = process.run(cmd, shell=True) utlv.check_exit_status(cmd_result) if source_dev: pool_extra_args = ' --source-dev %s' % source_dev else: raise exceptions.TestFail( "The vHBA %s does not have any associated mpath device" % new_vhbas) pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) # if no online hba cards on host, mark case failed if not online_hbas_list: raise exceptions.TestSkipError("Host doesn't have online hba cards") try: cmd_result = virsh.pool_define_as(pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True, debug=True) utlv.check_exit_status(cmd_result) cmd_result = virsh.pool_build(pool_name) utlv.check_exit_status(cmd_result) cmd_result = virsh.pool_start(pool_name) utlv.check_exit_status(cmd_result) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) cmd_result = virsh.vol_create_as(volume_name, pool_name, volume_capacity, allocation, frmt, "", debug=True) utlv.check_exit_status(cmd_result) vol_list = utlv.get_vol_list(pool_name, timeout=10) logging.debug('Volume list %s', vol_list) for unit in vol_list: test_unit = vol_list[unit] logging.debug(unit) disk_params = { 'type_name': "file", 'target_dev': target_device, 'target_bus': "virtio", 'source_file': test_unit, 'driver_name': "qemu", 'driver_type': "raw" } disk_xml = utlv.create_disk_xml(disk_params) session = vm.wait_for_login() bf_disks = libvirt_vm.get_disks() attach_success = virsh.attach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(attach_success) logging.debug("Disks before attach: %s", bf_disks) af_disks = libvirt_vm.get_disks() logging.debug("Disks after attach: %s", af_disks) mount_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not mount_disk: raise exceptions.TestFail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", mount_disk) output = session.cmd_status_output('lsblk', timeout=15) logging.debug("%s", output[1]) session.cmd_status_output('mkfs.ext4 %s' % mount_disk) if mount_disk: logging.info("%s", mount_disk) mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Can not find mounted device") session.close() virsh.reboot(vm_name, debug=True) session = vm.wait_for_login() output = session.cmd_status_output('mount') logging.debug("Mount output: %s", output[1]) if '/mnt' in output[1]: logging.debug("Mount Successful accross reboot") session.close() status = virsh.detach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(status) finally: vm.destroy(gracefully=False) logging.debug('Destroying pool %s', pool_name) virsh.pool_destroy(pool_name) logging.debug('Undefining pool %s', pool_name) virsh.pool_undefine(pool_name) if test_unit: process.system('lvremove -f %s' % test_unit, verbose=True) process.system('vgremove -f %s' % pool_name, verbose=True) process.system('pvremove -f %s' % source_dev, verbose=True) if new_vhbas: nodedev.vhbas_cleanup(new_vhbas.split()) process.run("service multipathd restart", shell=True)
def run(test, params, env): """ 1. prepare a fc lun with one of following methods - create a scsi pool&vol - create a vhba 2. prepare the virtual disk xml, as one of following - source = /dev/disk/by-path - source = /dev/mapper/mpathX - source = pool&vol format 3. start a vm with above disk as vdb 4. create disk-only snapshot of vdb 5. check the snapshot-list and snapshot file's existence 6. mount vdb and touch file to it 7. revert the snapshot and check file's existence 8. delete snapshot 9. cleanup env. """ vm_name = params.get("main_vm", "avocado-vt-vm1") wwpn = params.get("wwpn", "WWPN_EXAMPLE") wwnn = params.get("wwnn", "WWNN_EXAMPLE") disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "file") disk_size = params.get("disk_size", "100M") device_target = params.get("device_target", "vdb") driver_name = params.get("driver_name", "qemu") driver_type = params.get("driver_type", "raw") target_bus = params.get("target_bus", "virtio") vd_format = params.get("vd_format", "") snapshot_dir = params.get("snapshot_dir", "/tmp") snapshot_name = params.get("snapshot_name", "s1") pool_name = params.get("pool_name", "") pool_target = params.get("pool_target", "/dev") snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no") new_vhbas = [] current_vhbas = [] new_vhba = [] path_to_blk = "" lun_sl = [] new_disk = "" pool_ins = None old_mpath_conf = "" mpath_conf_path = "/etc/multipath.conf" original_mpath_conf_exist = os.path.exists(mpath_conf_path) vm = env.get_vm(vm_name) online_hbas = utils_npiv.find_hbas("hba") if not online_hbas: raise exceptions.TestSkipError("There is no online hba cards.") old_mpath_conf = utils_npiv.prepare_multipath_conf( conf_path=mpath_conf_path, replace_existing=True) first_online_hba = online_hbas[0] old_vhbas = utils_npiv.find_hbas("vhba") if vm.is_dead(): vm.start() session = vm.wait_for_login() virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) old_disks = virt_vm.get_disks() if vm.is_alive(): vm.destroy(gracefully=False) if pool_name: pool_ins = libvirt_storage.StoragePool() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() try: # prepare a fc lun if vd_format in ['scsi_vol']: if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba, pool_target) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT) if not utils_npiv.is_vhbas_added(old_vhbas): raise exceptions.TestFail("vHBA not successfully created") current_vhbas = utils_npiv.find_hbas("vhba") new_vhba = list(set(current_vhbas).difference(set(old_vhbas)))[0] new_vhbas.append(new_vhba) new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail( "block device not found with scsi_%s", new_vhba_scsibus) vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_TIMEOUT * 3) path_to_blk = list(vol_list.values())[0] elif vd_format in ['mpath', 'by_path']: old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": first_online_hba, "scsi_wwnn": wwnn, "scsi_wwpn": wwpn }) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT * 2) if not new_vhba: raise exceptions.TestFail("vHBA not successfully generated.") new_vhbas.append(new_vhba) if vd_format == "mpath": utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_TIMEOUT * 5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list( set(cur_mpath_devs).difference(set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) path_to_blk = "/dev/mapper/" + new_mpath_devs[0] elif vd_format == "by_path": new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("blk dev not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail( "lun symbolic links not found in " "/dev/disk/by-path/ for %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) else: pass else: raise exceptions.TestSkipError("Not provided how to pass" "virtual disk to VM.") # create qcow2 file on the block device with specified size if path_to_blk: cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size) try: process.run(cmd, shell=True) except process.cmdError as detail: raise exceptions.TestFail( "Fail to create qcow2 on blk dev: %s", detail) else: raise exceptions.TestFail("Don't have a valid path to blk dev.") # prepare disk xml if "vol" in vd_format: vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_TIMEOUT * 3) test_vol = list(vol_list.keys())[0] disk_params = { 'type_name': disk_type, 'target_dev': device_target, 'target_bus': target_bus, 'source_pool': pool_name, 'source_volume': test_vol, 'driver_type': driver_type } else: disk_params = { 'type_name': disk_type, 'device': disk_device, 'driver_name': driver_name, 'driver_type': driver_type, 'source_file': path_to_blk, 'target_dev': device_target, 'target_bus': target_bus } if vm.is_alive(): vm.destroy(gracefully=False) new_disk = disk.Disk() new_disk.xml = open(utlv.create_disk_xml(disk_params)).read() # start vm with the virtual disk vmxml.devices = vmxml.devices.append(new_disk) vmxml.sync() vm.start() session = vm.wait_for_login() cur_disks = virt_vm.get_disks() mount_disk = "".join(list(set(old_disks) ^ set(cur_disks))) # mkfs and mount disk in vm, create a file on that disk. if not mount_disk: logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks) raise exceptions.TestFail("No new disk found in vm.") mkfs_and_mount(session, mount_disk) create_file_in_vm(session, "/mnt/before_snapshot.txt", "before") # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) options = "%s --disk-only" % snapshot_name for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name if os.path.exists(snapshot_file): os.remove(snapshot_file) options = options + " --diskspec %s,file=%s" % (vm_blk, snapshot_file) else: options = snapshot_name utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options)) # check virsh snapshot-list logging.debug("Running: snapshot-list %s", vm_name) snapshot_list = virsh.snapshot_list(vm_name) logging.debug("snapshot list is: %s", snapshot_list) if not snapshot_list: raise exceptions.TestFail("snapshots not found after creation.") # snapshot-revert doesn't support external snapshot for now. so # only check this with internal snapshot. if not snapshot_disk_only: create_file_in_vm(session, "/mnt/after_snapshot.txt", "after") logging.debug("Running: snapshot-revert %s %s", vm_name, snapshot_name) utlv.check_exit_status( virsh.snapshot_revert(vm_name, snapshot_name)) session = vm.wait_for_login() file_existence, file_content = get_file_in_vm( session, "/mnt/after_snapshot.txt") logging.debug("file exist = %s, file content = %s", file_existence, file_content) if file_existence: raise exceptions.TestFail("The file created " "after snapshot still exists.") file_existence, file_content = get_file_in_vm( session, "/mnt/before_snapshot.txt") logging.debug("file eixst = %s, file content = %s", file_existence, file_content) if ((not file_existence) or (file_content.strip() != "before")): raise exceptions.TestFail("The file created " "before snapshot is lost.") # delete snapshots # if diskonly, delete --metadata and remove files # if not diskonly, delete snapshot if snapshot_disk_only: options = "--metadata" else: options = "" for snap in snapshot_list: logging.debug("deleting snapshot %s with options %s", snap, options) result = virsh.snapshot_delete(vm_name, snap, options) logging.debug("result of snapshot-delete: %s", result.stdout.strip()) if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap if os.path.exists(snapshot_file): os.remove(snapshot_file) snapshot_list = virsh.snapshot_list(vm_name) if snapshot_list: raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list) except Exception as detail: raise exceptions.TestFail("exception happens: %s", detail) finally: logging.debug("Start to clean up env...") vmxml_backup.sync() if pool_ins and pool_ins.pool_exists(pool_name): virsh.pool_destroy(pool_name) for new_vhba in new_vhbas: virsh.nodedev_destroy(new_vhba) utils_npiv.restart_multipathd() if old_mpath_conf: utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, conf_content=old_mpath_conf, replace_existing=True) if not original_mpath_conf_exist and os.path.exists(mpath_conf_path): os.remove(mpath_conf_path)
def run(test, params, env): """ Test command: virsh pool-define; pool-define-as; pool-start; vol-list pool; attach-device LUN to guest; mount the device; dd to the mounted device; unmount; pool-destroy; pool-undefine; Pre-requiste: Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to SAN controller. """ pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pre_def_pool = params.get("pre_def_pool", "no") define_pool = params.get("define_pool", "no") define_pool_as = params.get("define_pool_as", "no") pool_create_as = params.get("pool_create_as", "no") need_pool_build = params.get("need_pool_build", "no") need_vol_create = params.get("need_vol_create", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") pool_adapter_type = params.get("pool_adapter_type", "") pool_adapter_parent = params.get("pool_adapter_parent", "") target_device = params.get("disk_target_dev", "sdc") pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE") pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE") vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE") vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE") volume_name = params.get("volume_name", "imagefrommapper.qcow2") volume_capacity = params.get("volume_capacity", '1G') allocation = params.get("allocation", '1G') vol_format = params.get("volume_format", 'raw') attach_method = params.get("attach_method", "hot") test_unit = None mount_disk = None pool_kwargs = {} pool_extra_args = "" emulated_image = "emulated-image" disk_xml = "" new_vhbas = [] source_dev = "" mpath_vol_path = "" if pool_type == "scsi": if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for npiv scsi pool.") if pool_type == "logical": if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for vhba.") online_hbas_list = utils_npiv.find_hbas("hba") logging.debug("The online hbas are: %s", online_hbas_list) if not online_hbas_list: raise exceptions.TestSkipError( "Host doesn't have online hba cards") old_vhbas = utils_npiv.find_hbas("vhba") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if not vm.is_alive(): vm.start() libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) if pool_type == "scsi": if define_pool == "yes": if pool_adapter_parent == "": pool_adapter_parent = online_hbas_list[0] pool_kwargs = {'source_path': source_path, 'source_name': source_name, 'source_format': source_format, 'pool_adapter_type': pool_adapter_type, 'pool_adapter_parent': pool_adapter_parent, 'pool_wwnn': pool_wwnn, 'pool_wwpn': pool_wwpn} elif pool_type == "logical": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME*5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) source_dev = "/dev/mapper/" + new_mpath_devs[0] logging.debug("We are going to use \"%s\" as our source device" " to create a logical pool", source_dev) try: cmd = "parted %s mklabel msdos -s" % source_dev cmd_result = process.run(cmd, shell=True) except Exception, e: raise exceptions.TestError("Error occurred when parted mklable") if define_pool_as == "yes": pool_extra_args = "" if source_dev: pool_extra_args = ' --source-dev %s' % source_dev
def run(test, params, env): """ Test command: virsh pool-define; pool-define-as; pool-start; vol-list pool; attach-device LUN to guest; mount the device; dd to the mounted device; unmount; pool-destroy; pool-undefine; Pre-requiste: Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to SAN controller. """ pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pre_def_pool = params.get("pre_def_pool", "no") define_pool = params.get("define_pool", "no") define_pool_as = params.get("define_pool_as", "no") pool_create_as = params.get("pool_create_as", "no") need_pool_build = params.get("need_pool_build", "no") need_vol_create = params.get("need_vol_create", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") pool_adapter_type = params.get("pool_adapter_type", "") pool_adapter_parent = params.get("pool_adapter_parent", "") target_device = params.get("disk_target_dev", "sdc") pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE") pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE") vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE") vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE") volume_name = params.get("volume_name", "imagefrommapper.qcow2") volume_capacity = params.get("volume_capacity", '1G') allocation = params.get("allocation", '1G') vol_format = params.get("volume_format", 'raw') attach_method = params.get("attach_method", "hot") test_unit = None mount_disk = None pool_kwargs = {} pool_extra_args = "" emulated_image = "emulated-image" disk_xml = "" new_vhbas = [] source_dev = "" mpath_vol_path = "" old_mpath_conf = "" mpath_conf_path = "/etc/multipath.conf" original_mpath_conf_exist = os.path.exists(mpath_conf_path) if pool_type == "scsi": if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for npiv scsi pool.") if pool_type == "logical": if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for vhba.") online_hbas_list = utils_npiv.find_hbas("hba") logging.debug("The online hbas are: %s", online_hbas_list) old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, replace_existing=True) if not online_hbas_list: raise exceptions.TestSkipError( "Host doesn't have online hba cards") old_vhbas = utils_npiv.find_hbas("vhba") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if not vm.is_alive(): vm.start() libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) if pool_type == "scsi": if define_pool == "yes": if pool_adapter_parent == "": pool_adapter_parent = online_hbas_list[0] pool_kwargs = {'source_path': source_path, 'source_name': source_name, 'source_format': source_format, 'pool_adapter_type': pool_adapter_type, 'pool_adapter_parent': pool_adapter_parent, 'pool_wwnn': pool_wwnn, 'pool_wwpn': pool_wwpn} elif pool_type == "logical": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME*5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) source_dev = "/dev/mapper/" + new_mpath_devs[0] logging.debug("We are going to use \"%s\" as our source device" " to create a logical pool", source_dev) try: cmd = "parted %s mklabel msdos -s" % source_dev cmd_result = process.run(cmd, shell=True) except Exception as e: raise exceptions.TestError("Error occurred when parted mklable") if define_pool_as == "yes": pool_extra_args = "" if source_dev: pool_extra_args = ' --source-dev %s' % source_dev elif pool_type == "mpath": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME*2) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0] try: cmd = "parted %s mklabel msdos -s" % mpath_vol_path cmd_result = process.run(cmd, shell=True) except Exception as e: raise exceptions.TestError("Error occurred when parted mklable") if pre_def_pool == "yes": try: pvt = utlv.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), _DELAY_TIME*2) virsh.pool_dumpxml(pool_name, to_file=pool_xml_f) virsh.pool_destroy(pool_name) except Exception as e: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) raise exceptions.TestError( "Error occurred when prepare pool xml:\n %s" % e) if os.path.exists(pool_xml_f): with open(pool_xml_f, 'r') as f: logging.debug("Create pool from file: %s", f.read()) try: # define/create/start the pool if (pre_def_pool == "yes") and (define_pool == "yes"): pool_define_status = virsh.pool_define(pool_xml_f, ignore_status=True, debug=True) utlv.check_exit_status(pool_define_status) if define_pool_as == "yes": pool_define_as_status = virsh.pool_define_as( pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True, debug=True ) utlv.check_exit_status(pool_define_as_status) if pool_create_as == "yes": if pool_type != "scsi": raise exceptions.TestSkipError("pool-create-as only needs to " "be covered by scsi pool for " "NPIV test.") cmd = "virsh pool-create-as %s %s \ --adapter-wwnn %s --adapter-wwpn %s \ --adapter-parent %s --target %s"\ % (pool_name, pool_type, pool_wwnn, pool_wwpn, online_hbas_list[0], pool_target) cmd_status = process.system(cmd, verbose=True) if cmd_status: raise exceptions.TestFail("pool-create-as scsi pool failed.") if need_pool_build == "yes": pool_build_status = virsh.pool_build(pool_name, "--overwrite") utlv.check_exit_status(pool_build_status) pool_ins = libvirt_storage.StoragePool() if not pool_ins.pool_exists(pool_name): raise exceptions.TestFail("define or create pool failed.") else: if not pool_ins.is_pool_active(pool_name): pool_start_status = virsh.pool_start(pool_name) utlv.check_exit_status(pool_start_status) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) # create vol if required if need_vol_create == "yes": vol_create_as_status = virsh.vol_create_as( volume_name, pool_name, volume_capacity, allocation, vol_format, "", debug=True ) utlv.check_exit_status(vol_create_as_status) virsh.pool_refresh(pool_name) vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_DELAY_TIME*3) logging.debug('Volume list is: %s' % vol_list) # use test_unit to save the first vol in pool if pool_type == "mpath": cmd = "virsh vol-list %s | grep \"%s\" |\ awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path) cmd_result = process.run(cmd, shell=True) status = cmd_result.exit_status output = cmd_result.stdout_text.strip() if cmd_result.exit_status: raise exceptions.TestFail("vol-list pool %s failed", pool_name) if not output: raise exceptions.TestFail("Newly added mpath dev not in pool.") test_unit = output logging.info( "Using %s to attach to a guest", test_unit) else: test_unit = list(vol_list.keys())[0] logging.info( "Using the first volume %s to attach to a guest", test_unit) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) session = vm.wait_for_login() output = session.cmd_status_output('lsblk') logging.debug("%s", output[1]) old_count = vmxml.get_disk_count(vm_name) bf_disks = libvirt_vm.get_disks() # prepare disk xml which will be hot/cold attached to vm disk_params = {'type_name': 'volume', 'target_dev': target_device, 'target_bus': 'virtio', 'source_pool': pool_name, 'source_volume': test_unit, 'driver_type': vol_format} disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml') lun_disk_xml = utlv.create_disk_xml(disk_params) copyfile(lun_disk_xml, disk_xml) disk_xml_str = open(lun_disk_xml).read() logging.debug("The disk xml is: %s", disk_xml_str) # hot attach disk xml to vm if attach_method == "hot": copyfile(lun_disk_xml, disk_xml) dev_attach_status = virsh.attach_device(vm_name, disk_xml, debug=True) # Pool/vol virtual disk is not supported by mpath pool yet. if dev_attach_status.exit_status and pool_type == "mpath": raise exceptions.TestSkipError("mpath pool vol is not " "supported in virtual disk yet," "the error message is: %s", dev_attach_status.stderr) session.close() utlv.check_exit_status(dev_attach_status) # cold attach disk xml to vm elif attach_method == "cold": if vm.is_alive(): vm.destroy(gracefully=False) new_disk = disk.Disk() new_disk.xml = disk_xml_str vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml.devices = vmxml.devices.append(new_disk) vmxml.sync() logging.debug(vmxml) try: vm.start() except virt_vm.VMStartError as e: logging.debug(e) if pool_type == "mpath": raise exceptions.TestSkipError("'mpath' pools for backing " "'volume' disks isn't " "supported for now") else: raise exceptions.TestFail("Failed to start vm") session = vm.wait_for_login() else: pass # checking attached disk in vm logging.info("Checking disk availability in domain") if not vmxml.get_disk_count(vm_name): raise exceptions.TestFail("No disk in domain %s." % vm_name) new_count = vmxml.get_disk_count(vm_name) if new_count <= old_count: raise exceptions.TestFail( "Failed to attach disk %s" % lun_disk_xml) logging.debug("Disks before attach: %s", bf_disks) af_disks = libvirt_vm.get_disks() logging.debug("Disks after attach: %s", af_disks) mount_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not mount_disk: raise exceptions.TestFail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", mount_disk) logging.debug("Creating file system for %s", mount_disk) output = session.cmd_status_output( 'echo yes | mkfs.ext4 %s' % mount_disk) logging.debug("%s", output[1]) if mount_disk: mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Mount failed") else: raise exceptions.TestFail("Partition not available for disk") logging.debug("Unmounting disk") session.cmd_status_output('umount %s' % mount_disk) output = session.cmd_status_output('mount') logging.debug("%s", output[1]) mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Mount failed") logging.debug("Unmounting disk") session.cmd_status_output('umount %s' % mount_disk) session.close() # detach disk from vm dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(dev_detach_status) finally: vm.destroy(gracefully=False) vmxml_backup.sync() logging.debug('Destroying pool %s', pool_name) virsh.pool_destroy(pool_name) logging.debug('Undefining pool %s', pool_name) virsh.pool_undefine(pool_name) if os.path.exists(pool_xml_f): os.remove(pool_xml_f) if os.path.exists(disk_xml): data_dir.clean_tmp_files() logging.debug("Cleanup disk xml") if pre_def_pool == "yes": # Do not apply cleanup_pool for logical pool, logical pool will # be cleaned below pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **pool_kwargs) if (test_unit and (need_vol_create == "yes" and (pre_def_pool == "no")) and (pool_type == "logical")): process.system('lvremove -f %s/%s' % (pool_name, test_unit), verbose=True) process.system('vgremove -f %s' % pool_name, verbose=True) process.system('pvremove -f %s' % source_dev, verbose=True) if new_vhbas: utils_npiv.vhbas_cleanup(new_vhbas) # Restart multipathd, this is to avoid bz1399075 if source_dev: utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(source_dev), _DELAY_TIME*5, 0.0, 5.0) elif mpath_vol_path: utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(mpath_vol_path), _DELAY_TIME*5, 0.0, 5.0) else: utils_npiv.restart_multipathd() if old_mpath_conf: utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, conf_content=old_mpath_conf, replace_existing=True) if not original_mpath_conf_exist and os.path.exists(mpath_conf_path): os.remove(mpath_conf_path)
def run(test, params, env): """ 1. prepare a fc lun with one of following methods - create a scsi pool&vol - create a vhba 2. prepare the virtual disk xml, as one of following - source = /dev/disk/by-path - source = /dev/mapper/mpathX - source = pool&vol format 3. start a vm with above disk as vdb 4. create disk-only snapshot of vdb 5. check the snapshot-list and snapshot file's existence 6. mount vdb and touch file to it 7. revert the snapshot and check file's existence 8. delete snapshot 9. cleanup env. """ vm_name = params.get("main_vm", "avocado-vt-vm1") wwpn = params.get("wwpn", "WWPN_EXAMPLE") wwnn = params.get("wwnn", "WWNN_EXAMPLE") disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "file") disk_size = params.get("disk_size", "100M") device_target = params.get("device_target", "vdb") driver_name = params.get("driver_name", "qemu") driver_type = params.get("driver_type", "raw") target_bus = params.get("target_bus", "virtio") vd_format = params.get("vd_format", "") snapshot_dir = params.get("snapshot_dir", "/tmp") snapshot_name = params.get("snapshot_name", "s1") pool_name = params.get("pool_name", "") pool_target = params.get("pool_target", "/dev") snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no") new_vhbas = [] current_vhbas = [] new_vhba = [] path_to_blk = "" lun_sl = [] new_disk = "" pool_ins = None vm = env.get_vm(vm_name) online_hbas = utils_npiv.find_hbas("hba") if not online_hbas: raise exceptions.TestSkipError("There is no online hba cards.") first_online_hba = online_hbas[0] old_vhbas = utils_npiv.find_hbas("vhba") if vm.is_dead(): vm.start() session = vm.wait_for_login() virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) old_disks = virt_vm.get_disks() if vm.is_alive(): vm.destroy(gracefully=False) if pool_name: pool_ins = libvirt_storage.StoragePool() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() try: # prepare a fc lun if vd_format in ['scsi_vol']: if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba, pool_target) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT) if not utils_npiv.is_vhbas_added(old_vhbas): raise exceptions.TestFail("vHBA not successfully created") current_vhbas = utils_npiv.find_hbas("vhba") new_vhba = list(set(current_vhbas).difference(set(old_vhbas)))[0] new_vhbas.append(new_vhba) new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail( "block device not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail("lun symbolic links not found under " "/dev/disk/by-path/ for blk dev %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) elif vd_format in ['mpath', 'by_path']: old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": first_online_hba, "scsi_wwnn": wwnn, "scsi_wwpn": wwpn }) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT * 2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) if vd_format == "mpath": utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_TIMEOUT * 5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list( set(cur_mpath_devs).difference(set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) path_to_blk = "/dev/mapper/" + new_mpath_devs[0] elif vd_format == "by_path": new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("blk dev not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail( "lun symbolic links not found in " "/dev/disk/by-path/ for %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) else: pass else: raise exceptions.TestSkipError("Not provided how to pass" "virtual disk to VM.") # create qcow2 file on the block device with specified size if path_to_blk: cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size) try: process.run(cmd, shell=True) except process.cmdError, detail: raise exceptions.TestFail( "Fail to create qcow2 on blk dev: %s", detail) else:
pool_extra_args = ' --source-dev %s' % source_dev elif pool_type == "mpath": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME*2) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0] try: cmd = "parted %s mklabel msdos -s" % mpath_vol_path cmd_result = process.run(cmd, shell=True) except Exception, e: raise exceptions.TestError("Error occurred when parted mklable") if pre_def_pool == "yes": try:
def run(test, params, env): """ Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as; vol-list; attach-device; login; mount and dd; reboot; check persistence; detach-device; pool-destroy; pool-undefine; clear lv,vg and pv; Create a libvirt npiv pool from a vHBA's device mapper device and create a volume out of the newly created pool and attach it to a guest, mount it, reboot and check persistence after reboot. Pre-requisite : Host should have a vHBA associated with a mpath device """ pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pool_type = params.get("pool_type", "dir") scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE") scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE") pool_target = params.get("pool_target", "pool_target") target_device = params.get("disk_target_dev", "vda") volume_name = params.get("volume_name", "imagefrommapper.qcow2") volume_capacity = params.get("volume_capacity", '1G') allocation = params.get("allocation", '1G') frmt = params.get("volume_format", 'qcow2') vm_name = params.get("main_vm") vm = env.get_vm(vm_name) mount_disk = None test_unit = None if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn: raise exceptions.TestSkipError("Please provide proper WWPN/WWNN") if not vm.is_alive(): vm.start() pool_extra_args = "" libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) process.run("service multipathd restart", shell=True) online_hbas_list = nodedev.find_hbas("hba") first_online_hba = online_hbas_list[0] old_mpath_devs = nodedev.find_mpath_devs() logging.debug("the old mpath devs are: %s" % old_mpath_devs) new_vhbas = nodedev.nodedev_create_from_xml( {"nodedev_parent": first_online_hba, "scsi_wwnn": scsi_wwnn, "scsi_wwpn": scsi_wwpn}) logging.info("Newly created vHBA %s" % new_vhbas) process.run("service multipathd restart", shell=True) utils_misc.wait_for( lambda: nodedev.is_mpath_devs_added(old_mpath_devs), timeout=5) cur_mpath_devs = nodedev.find_mpath_devs() logging.debug("the current mpath devs are: %s" % cur_mpath_devs) new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("newly added mpath devs are: %s" % new_mpath_devs) if not new_mpath_devs: raise exceptions.TestFail("No newly added mpath devices found, \ please check your FC settings") source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0]) logging.debug("We are going to use \"%s\" as our source device" " to create a logical pool" % source_dev) cmd = "parted %s mklabel msdos -s" % source_dev cmd_result = process.run(cmd, shell=True) utlv.check_exit_status(cmd_result) if source_dev: pool_extra_args = ' --source-dev %s' % source_dev else: raise exceptions.TestFail( "The vHBA %s does not have any associated mpath device" % new_vhbas) pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) # if no online hba cards on host, mark case failed if not online_hbas_list: raise exceptions.TestSkipError("Host doesn't have online hba cards") try: cmd_result = virsh.pool_define_as( pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True, debug=True) utlv.check_exit_status(cmd_result) cmd_result = virsh.pool_build(pool_name) utlv.check_exit_status(cmd_result) cmd_result = virsh.pool_start(pool_name) utlv.check_exit_status(cmd_result) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) cmd_result = virsh.vol_create_as( volume_name, pool_name, volume_capacity, allocation, frmt, "", debug=True) utlv.check_exit_status(cmd_result) vol_list = utlv.get_vol_list(pool_name, timeout=10) logging.debug('Volume list %s', vol_list) for unit in vol_list: test_unit = vol_list[unit] logging.debug(unit) disk_params = {'type_name': "file", 'target_dev': target_device, 'target_bus': "virtio", 'source_file': test_unit, 'driver_name': "qemu", 'driver_type': "raw"} disk_xml = utlv.create_disk_xml(disk_params) session = vm.wait_for_login() bf_disks = libvirt_vm.get_disks() attach_success = virsh.attach_device( vm_name, disk_xml, debug=True) utlv.check_exit_status(attach_success) logging.debug("Disks before attach: %s", bf_disks) af_disks = libvirt_vm.get_disks() logging.debug("Disks after attach: %s", af_disks) mount_disk = "".join(list(set(bf_disks) ^ set(af_disks))) if not mount_disk: raise exceptions.TestFail("Can not get attached device in vm.") logging.debug("Attached device in vm:%s", mount_disk) output = session.cmd_status_output('lsblk', timeout=15) logging.debug("%s", output[1]) session.cmd_status_output('mkfs.ext4 %s' % mount_disk) if mount_disk: logging.info("%s", mount_disk) mount_success = mount_and_dd(session, mount_disk) if not mount_success: raise exceptions.TestFail("Can not find mounted device") session.close() virsh.reboot(vm_name, debug=True) session = vm.wait_for_login() output = session.cmd_status_output('mount') logging.debug("Mount output: %s", output[1]) if '/mnt' in output[1]: logging.debug("Mount Successful accross reboot") session.close() status = virsh.detach_device(vm_name, disk_xml, debug=True) utlv.check_exit_status(status) finally: vm.destroy(gracefully=False) logging.debug('Destroying pool %s', pool_name) virsh.pool_destroy(pool_name) logging.debug('Undefining pool %s', pool_name) virsh.pool_undefine(pool_name) if test_unit: process.system('lvremove -f %s' % test_unit, verbose=True) process.system('vgremove -f %s' % pool_name, verbose=True) process.system('pvremove -f %s' % source_dev, verbose=True) if new_vhbas: nodedev.vhbas_cleanup(new_vhbas.split()) process.run("service multipathd restart", shell=True)
def run(test, params, env): """ Test command: virsh pool-define; pool-define-as; pool-start; vol-list pool; attach-device LUN to guest; mount the device; dd to the mounted device; unmount; pool-destroy; pool-undefine; Pre-requiste: Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to SAN controller. """ pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") pre_def_pool = params.get("pre_def_pool", "no") define_pool = params.get("define_pool", "no") define_pool_as = params.get("define_pool_as", "no") pool_create_as = params.get("pool_create_as", "no") need_pool_build = params.get("need_pool_build", "no") need_vol_create = params.get("need_vol_create", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") pool_adapter_type = params.get("pool_adapter_type", "") pool_adapter_parent = params.get("pool_adapter_parent", "") target_device = params.get("disk_target_dev", "sdc") pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE") pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE") vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE") vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE") volume_name = params.get("volume_name", "imagefrommapper.qcow2") volume_capacity = params.get("volume_capacity", '1G') allocation = params.get("allocation", '1G') vol_format = params.get("volume_format", 'raw') attach_method = params.get("attach_method", "hot") test_unit = None mount_disk = None pool_kwargs = {} pool_extra_args = "" emulated_image = "emulated-image" disk_xml = "" new_vhbas = [] source_dev = "" mpath_vol_path = "" if pool_type == "scsi": if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for npiv scsi pool.") if pool_type == "logical": if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn): raise exceptions.TestSkipError( "No wwpn and wwnn provided for vhba.") online_hbas_list = utils_npiv.find_hbas("hba") logging.debug("The online hbas are: %s", online_hbas_list) if not online_hbas_list: raise exceptions.TestSkipError("Host doesn't have online hba cards") old_vhbas = utils_npiv.find_hbas("vhba") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if not vm.is_alive(): vm.start() libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) pool_ins = libvirt_storage.StoragePool() if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) if pool_type == "scsi": if define_pool == "yes": if pool_adapter_parent == "": pool_adapter_parent = online_hbas_list[0] pool_kwargs = { 'source_path': source_path, 'source_name': source_name, 'source_format': source_format, 'pool_adapter_type': pool_adapter_type, 'pool_adapter_parent': pool_adapter_parent, 'pool_wwnn': pool_wwnn, 'pool_wwpn': pool_wwpn } elif pool_type == "logical": if (not vhba_wwnn) or (not vhba_wwpn): raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.") old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn }) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME * 2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME * 5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list( set(cur_mpath_devs).difference(set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) source_dev = "/dev/mapper/" + new_mpath_devs[0] logging.debug( "We are going to use \"%s\" as our source device" " to create a logical pool", source_dev) try: cmd = "parted %s mklabel msdos -s" % source_dev cmd_result = process.run(cmd, shell=True) except Exception, e: raise exceptions.TestError("Error occurred when parted mklable") if define_pool_as == "yes": pool_extra_args = "" if source_dev: pool_extra_args = ' --source-dev %s' % source_dev
old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml({ "nodedev_parent": online_hbas_list[0], "scsi_wwnn": vhba_wwnn, "scsi_wwpn": vhba_wwpn }) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME * 2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_DELAY_TIME * 2) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list( set(cur_mpath_devs).difference(set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0] try: cmd = "parted %s mklabel msdos -s" % mpath_vol_path cmd_result = process.run(cmd, shell=True) except Exception, e: raise exceptions.TestError("Error occurred when parted mklable") if pre_def_pool == "yes": try:
def run(test, params, env): """ 1. prepare a fc lun with one of following methods - create a scsi pool&vol - create a vhba 2. prepare the virtual disk xml, as one of following - source = /dev/disk/by-path - source = /dev/mapper/mpathX - source = pool&vol format 3. start a vm with above disk as vdb 4. create disk-only snapshot of vdb 5. check the snapshot-list and snapshot file's existence 6. mount vdb and touch file to it 7. revert the snapshot and check file's existence 8. delete snapshot 9. cleanup env. """ vm_name = params.get("main_vm", "avocado-vt-vm1") wwpn = params.get("wwpn", "WWPN_EXAMPLE") wwnn = params.get("wwnn", "WWNN_EXAMPLE") disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "file") disk_size = params.get("disk_size", "100M") device_target = params.get("device_target", "vdb") driver_name = params.get("driver_name", "qemu") driver_type = params.get("driver_type", "raw") target_bus = params.get("target_bus", "virtio") vd_format = params.get("vd_format", "") snapshot_dir = params.get("snapshot_dir", "/tmp") snapshot_name = params.get("snapshot_name", "s1") pool_name = params.get("pool_name", "") pool_target = params.get("pool_target", "/dev") snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no") new_vhbas = [] current_vhbas = [] new_vhba = [] path_to_blk = "" lun_sl = [] new_disk = "" pool_ins = None vm = env.get_vm(vm_name) online_hbas = utils_npiv.find_hbas("hba") if not online_hbas: raise exceptions.TestSkipError("There is no online hba cards.") first_online_hba = online_hbas[0] old_vhbas = utils_npiv.find_hbas("vhba") if vm.is_dead(): vm.start() session = vm.wait_for_login() virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) old_disks = virt_vm.get_disks() if vm.is_alive(): vm.destroy(gracefully=False) if pool_name: pool_ins = libvirt_storage.StoragePool() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() try: # prepare a fc lun if vd_format in ['scsi_vol']: if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba, pool_target) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT) if not utils_npiv.is_vhbas_added(old_vhbas): raise exceptions.TestFail("vHBA not successfully created") current_vhbas = utils_npiv.find_hbas("vhba") new_vhba = list(set(current_vhbas).difference( set(old_vhbas)))[0] new_vhbas.append(new_vhba) new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("block device not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for( lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail("lun symbolic links not found under " "/dev/disk/by-path/ for blk dev %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) elif vd_format in ['mpath', 'by_path']: old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml( {"nodedev_parent": first_online_hba, "scsi_wwnn": wwnn, "scsi_wwpn": wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) if vd_format == "mpath": utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_TIMEOUT*5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) path_to_blk = "/dev/mapper/" + new_mpath_devs[0] elif vd_format == "by_path": new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("blk dev not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for( lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail("lun symbolic links not found in " "/dev/disk/by-path/ for %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) else: pass else: raise exceptions.TestSkipError("Not provided how to pass" "virtual disk to VM.") # create qcow2 file on the block device with specified size if path_to_blk: cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size) try: process.run(cmd, shell=True) except process.cmdError, detail: raise exceptions.TestFail("Fail to create qcow2 on blk dev: %s", detail) else:
def run(test, params, env): """ 1. prepare a fc lun with one of following methods - create a scsi pool&vol - create a vhba 2. prepare the virtual disk xml, as one of following - source = /dev/disk/by-path - source = /dev/mapper/mpathX - source = pool&vol format 3. start a vm with above disk as vdb 4. create disk-only snapshot of vdb 5. check the snapshot-list and snapshot file's existence 6. mount vdb and touch file to it 7. revert the snapshot and check file's existence 8. delete snapshot 9. cleanup env. """ vm_name = params.get("main_vm", "avocado-vt-vm1") wwpn = params.get("wwpn", "WWPN_EXAMPLE") wwnn = params.get("wwnn", "WWNN_EXAMPLE") disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "file") disk_size = params.get("disk_size", "100M") device_target = params.get("device_target", "vdb") driver_name = params.get("driver_name", "qemu") driver_type = params.get("driver_type", "raw") target_bus = params.get("target_bus", "virtio") vd_format = params.get("vd_format", "") snapshot_dir = params.get("snapshot_dir", "/tmp") snapshot_name = params.get("snapshot_name", "s1") pool_name = params.get("pool_name", "") pool_target = params.get("pool_target", "/dev") snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no") new_vhbas = [] current_vhbas = [] new_vhba = [] path_to_blk = "" lun_sl = [] new_disk = "" pool_ins = None old_mpath_conf = "" mpath_conf_path = "/etc/multipath.conf" original_mpath_conf_exist = os.path.exists(mpath_conf_path) vm = env.get_vm(vm_name) online_hbas = utils_npiv.find_hbas("hba") if not online_hbas: raise exceptions.TestSkipError("There is no online hba cards.") old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, replace_existing=True) first_online_hba = online_hbas[0] old_vhbas = utils_npiv.find_hbas("vhba") if vm.is_dead(): vm.start() session = vm.wait_for_login() virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache) old_disks = virt_vm.get_disks() if vm.is_alive(): vm.destroy(gracefully=False) if pool_name: pool_ins = libvirt_storage.StoragePool() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() try: # prepare a fc lun if vd_format in ['scsi_vol']: if pool_ins.pool_exists(pool_name): raise exceptions.TestFail("Pool %s already exist" % pool_name) prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba, pool_target) utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT) if not utils_npiv.is_vhbas_added(old_vhbas): raise exceptions.TestFail("vHBA not successfully created") current_vhbas = utils_npiv.find_hbas("vhba") new_vhba = list(set(current_vhbas).difference( set(old_vhbas)))[0] new_vhbas.append(new_vhba) new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("block device not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for( lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail("lun symbolic links not found under " "/dev/disk/by-path/ for blk dev %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) elif vd_format in ['mpath', 'by_path']: old_mpath_devs = utils_npiv.find_mpath_devs() new_vhba = utils_npiv.nodedev_create_from_xml( {"nodedev_parent": first_online_hba, "scsi_wwnn": wwnn, "scsi_wwpn": wwpn}) utils_misc.wait_for( lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_TIMEOUT*2) if not new_vhba: raise exceptions.TestFail("vHBA not sucessfully generated.") new_vhbas.append(new_vhba) if vd_format == "mpath": utils_misc.wait_for( lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs), timeout=_TIMEOUT*5) if not utils_npiv.is_mpath_devs_added(old_mpath_devs): raise exceptions.TestFail("mpath dev not generated.") cur_mpath_devs = utils_npiv.find_mpath_devs() new_mpath_devs = list(set(cur_mpath_devs).difference( set(old_mpath_devs))) logging.debug("The newly added mpath dev is: %s", new_mpath_devs) path_to_blk = "/dev/mapper/" + new_mpath_devs[0] elif vd_format == "by_path": new_vhba_scsibus = re.sub("\D", "", new_vhba) utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus), timeout=_TIMEOUT) new_blks = get_blks_by_scsi(new_vhba_scsibus) if not new_blks: raise exceptions.TestFail("blk dev not found with scsi_%s", new_vhba_scsibus) first_blk_dev = new_blks[0] utils_misc.wait_for( lambda: get_symbols_by_blk(first_blk_dev), timeout=_TIMEOUT) lun_sl = get_symbols_by_blk(first_blk_dev) if not lun_sl: raise exceptions.TestFail("lun symbolic links not found in " "/dev/disk/by-path/ for %s" % first_blk_dev) lun_dev = lun_sl[0] path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev) else: pass else: raise exceptions.TestSkipError("Not provided how to pass" "virtual disk to VM.") # create qcow2 file on the block device with specified size if path_to_blk: cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size) try: process.run(cmd, shell=True) except process.cmdError as detail: raise exceptions.TestFail("Fail to create qcow2 on blk dev: %s", detail) else: raise exceptions.TestFail("Don't have a vaild path to blk dev.") # prepare disk xml if "vol" in vd_format: vol_list = utlv.get_vol_list(pool_name, vol_check=True, timeout=_TIMEOUT*3) test_vol = list(vol_list.keys())[0] disk_params = {'type_name': disk_type, 'target_dev': device_target, 'target_bus': target_bus, 'source_pool': pool_name, 'source_volume': test_vol, 'driver_type': driver_type} else: disk_params = {'type_name': disk_type, 'device': disk_device, 'driver_name': driver_name, 'driver_type': driver_type, 'source_file': path_to_blk, 'target_dev': device_target, 'target_bus': target_bus} if vm.is_alive(): vm.destroy(gracefully=False) new_disk = disk.Disk() new_disk.xml = open(utlv.create_disk_xml(disk_params)).read() # start vm with the virtual disk vmxml.devices = vmxml.devices.append(new_disk) vmxml.sync() vm.start() session = vm.wait_for_login() cur_disks = virt_vm.get_disks() mount_disk = "".join(list(set(old_disks) ^ set(cur_disks))) # mkfs and mount disk in vm, create a file on that disk. if not mount_disk: logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks) raise exceptions.TestFail("No new disk found in vm.") mkfs_and_mount(session, mount_disk) create_file_in_vm(session, "/mnt/before_snapshot.txt", "before") # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) options = "%s --disk-only" % snapshot_name for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name if os.path.exists(snapshot_file): os.remove(snapshot_file) options = options + " --diskspec %s,file=%s" % (vm_blk, snapshot_file) else: options = snapshot_name utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options)) # check virsh snapshot-list logging.debug("Running: snapshot-list %s", vm_name) snapshot_list = virsh.snapshot_list(vm_name) logging.debug("snapshot list is: %s", snapshot_list) if not snapshot_list: raise exceptions.TestFail("snapshots not found after creation.") # snapshot-revert doesn't support external snapshot for now. so # only check this with internal snapshot. if not snapshot_disk_only: create_file_in_vm(session, "/mnt/after_snapshot.txt", "after") logging.debug("Running: snapshot-revert %s %s", vm_name, snapshot_name) utlv.check_exit_status(virsh.snapshot_revert(vm_name, snapshot_name)) session = vm.wait_for_login() file_existence, file_content = get_file_in_vm(session, "/mnt/after_snapshot.txt") logging.debug("file exist = %s, file content = %s", file_existence, file_content) if file_existence: raise exceptions.TestFail("The file created " "after snapshot still exists.") file_existence, file_content = get_file_in_vm(session, "/mnt/before_snapshot.txt") logging.debug("file eixst = %s, file content = %s", file_existence, file_content) if ((not file_existence) or (file_content.strip() != "before")): raise exceptions.TestFail("The file created " "before snapshot is lost.") # delete snapshots # if diskonly, delete --metadata and remove files # if not diskonly, delete snapshot if snapshot_disk_only: options = "--metadata" else: options = "" for snap in snapshot_list: logging.debug("deleting snapshot %s with options %s", snap, options) result = virsh.snapshot_delete(vm_name, snap, options) logging.debug("result of snapshot-delete: %s", result.stdout.strip()) if snapshot_disk_only: vm_blks = list(vm.get_disk_devices().keys()) for vm_blk in vm_blks: snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap if os.path.exists(snapshot_file): os.remove(snapshot_file) snapshot_list = virsh.snapshot_list(vm_name) if snapshot_list: raise exceptions.TestFail("Snapshot not deleted: %s", snapshot_list) except Exception as detail: raise exceptions.TestFail("exception happens: %s", detail) finally: logging.debug("Start to clean up env...") vmxml_backup.sync() if pool_ins and pool_ins.pool_exists(pool_name): virsh.pool_destroy(pool_name) for new_vhba in new_vhbas: virsh.nodedev_destroy(new_vhba) utils_npiv.restart_multipathd() if old_mpath_conf: utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path, conf_content=old_mpath_conf, replace_existing=True) if not original_mpath_conf_exist and os.path.exists(mpath_conf_path): os.remove(mpath_conf_path)