def create_iscsi_pool(): """ Setup iSCSI target,and create one iSCSI pool. """ libvirt.setup_or_cleanup_iscsi(is_setup=False) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size='1G', chap_user="", chap_passwd="", portal_ip=disk_src_host) # Define an iSCSI pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = pool_name poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iSCSI pool. virsh.pool_destroy(pool_name, **virsh_dargs) cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result)
def restore(self, name): pool = name name = pool['name'] pools = self.current_state if name in pools: self.remove(pools[name]) pool_file = tempfile.NamedTemporaryFile(delete=False) fname = pool_file.name pool_file.writelines(pool['inactive xml']) pool_file.close() try: if pool['persistent'] == 'yes': res = virsh.pool_define(fname) if res.exit_status: raise Exception(str(res)) if pool['state'] == 'running': res = virsh.pool_start(name) if res.exit_status: raise Exception(str(res)) else: res = virsh.pool_create(fname) if res.exit_status: raise Exception(str(res)) except Exception, e: raise e
def create_iscsi_pool(): """ Setup iSCSI target,and create one iSCSI pool. """ libvirt.setup_or_cleanup_iscsi(is_setup=False) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size='1G', chap_user="", chap_passwd="", portal_ip=disk_src_host) # Define an iSCSI pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = pool_name poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iSCSI pool. virsh.pool_destroy(pool_name, **virsh_dargs) cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") pool_target = params.get("pool_target", "/dev/disk/by-path") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") vg_name = params.get("virt_disk_vg_name", "vg_test_0") lv_name = params.get("virt_disk_lv_name", "lv_test_0") driver_packed = params.get("driver_packed", "on") disk_packed = "yes" == params.get("disk_packed", "no") scsi_packed = "yes" == params.get("scsi_packed", "no") # Indicate the PPC platform on_ppc = False if platform.platform().count('ppc64'): on_ppc = True if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): test.cancel("'volume' type disk doesn't support in" " current libvirt version.") if pool_type == "iscsi-direct": if not libvirt_version.version_compare(4, 7, 0): test.cancel("iscsi-direct pool is not supported in" " current libvirt version.") if ((disk_packed or scsi_packed) and not libvirt_version.version_compare(6, 3, 0)): test.cancel("The virtio packed attribute is not supported in" " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Fix no more PCI slots issue in certain cases. vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) machine_type = params.get("machine_type", "pc") if machine_type == 'q35': vm_dump_xml.remove_all_device_by_type('controller') machine_list = vm_dump_xml.os.machine.split("-") vm_dump_xml.set_os_attrs( **{"machine": machine_list[0] + "-q35-" + machine_list[2]}) q35_pcie_dict0 = { 'controller_model': 'pcie-root', 'controller_type': 'pci', 'controller_index': 0 } q35_pcie_dict1 = { 'controller_model': 'pcie-root-port', 'controller_type': 'pci' } vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0)) # Add enough controllers to match multiple times disk attaching requirements for i in list(range(1, 12)): q35_pcie_dict1.update({'controller_index': "%d" % i}) vm_dump_xml.add_device( libvirt.create_controller_xml(q35_pcie_dict1)) vm_dump_xml.sync() virsh_dargs = {'debug': True, 'ignore_status': True} try: start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login() else: if not vm.is_dead(): vm.destroy() if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target with open(secret_xml.xml) as f: logging.debug("Define secret by XML: %s", f.read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: test.error("Fail to get new created secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode( chap_passwd.encode(encoding)).decode(encoding) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target if disk_type == "block": iscsi_target = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=True, image_size="1G", chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) else: iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size='1G', chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = pool_target if chap_auth: pool_src_xml.auth_type = "chap" pool_src_xml.auth_username = chap_user pool_src_xml.secret_usage = secret_usage_target poolxml.set_source(pool_src_xml) if pool_type == "iscsi-direct": iscsi_initiator = params.get('iscsi_initiator') pool_src_xml.iqn_name = iscsi_initiator poolxml.set_source(pool_src_xml) # Create iscsi/iscsi-direct pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) xml = virsh.pool_dumpxml(disk_src_pool) logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)", str(cmd_result.stdout.strip())) if len(vol_list) > 1: return vol_list[1] else: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: vol_name, vol_path = vol_info else: test.error("Failed to get volume info") # Snapshot doesn't support raw disk format, create a qcow2 volume # disk for snapshot operation. if pool_type == "iscsi": process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'), shell=True, verbose=True) else: # Get iscsi URL to create a qcow2 volume disk disk_path = ("iscsi://[%s]/%s/%s" % (disk_src_host, iscsi_target, lun_num)) blk_source = "/mnt/test.qcow2" process.run('qemu-img create -f qcow2 %s %s' % (blk_source, '100M'), shell=True, verbose=True) process.run('qemu-img convert -O qcow2 %s %s' % (blk_source, disk_path), shell=True, verbose=True) # Create block device if disk_type == "block": logging.debug("iscsi dev name: %s", iscsi_target) lv_utils.vg_create(vg_name, iscsi_target) device_source = libvirt.create_local_disk("lvm", size="10M", vgname=vg_name, lvname=lv_name) logging.debug("New created volume: %s", lv_name) # Create iscsi network disk XML disk_params = { 'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly } disk_params_src = {} if disk_type == "network": disk_params_src = { 'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': disk_src_host, 'source_host_port': disk_src_port } elif disk_type == "volume": if pool_type == "iscsi": disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_mode': disk_src_mode } # iscsi-direct pool don't include source_mode option else: disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'driver_type': 'qcow2' } elif disk_type == "block": disk_params_src = { 'source_file': device_source, 'driver_type': 'raw' } # Start guest with packed attribute in disk if disk_packed: disk_params_src['driver_packed'] = driver_packed # Start guest with packed attribute in scsi controller if scsi_packed: scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.model = "virtio-scsi" scsi_controller.driver = {'packed': driver_packed} vm_dump_xml.add_device(scsi_controller) vm_dump_xml.sync() else: test.cancel("Unsupported disk type in this test") disk_params.update(disk_params_src) if chap_auth and disk_type != "volume": disk_params_auth = { 'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target } disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) attach_option = params.get("attach_option", "") cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Wait for domain is stable vm.wait_for_login().close() domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create # virsh snapshot-revert is not supported on combined internal and external snapshots # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173 snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: test.fail("Failed getting snapshots list for %s" % vm_name) try: virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) except process.CmdError: test.fail("Failed getting snapshots info for %s" % vm_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) virsh.snapshot_create_as(vm_name, snapshot_name2, ignore_status=False, debug=True) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) if snapshot_name2 not in cmd_result: test.error("Snapshot %s not found" % snapshot_name2) elif domain_operation == "start_with_packed": expect_xml_line = "packed=\"%s\"" % driver_packed libvirt.check_dumpxml(vm, expect_xml_line) expect_qemu_line = "packed=%s" % driver_packed libvirt.check_qemu_cmd_line(expect_qemu_line) elif domain_operation == "": logging.debug("No domain operation provided, so skip it") else: logging.error("Unsupported operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): test.error("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() # Here the script needs wait for a while for the guest to # recognize the hotplugged disk on PPC if on_ppc: time.sleep(10) cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError) as e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: test.error("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target, wait_remove_event=True) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False) finally: # Clean up snapshot # Shut down before cleaning up snapshots if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) # Restore vm vmxml_backup.sync("--snapshots-metadata") # Destroy pool and undefine secret, which may not exist try: if disk_type == "volume": virsh.pool_destroy(disk_src_pool) if disk_type == "block": clean_up_lvm(iscsi_target, vg_name, lv_name) if chap_auth: virsh.secret_undefine(secret_uuid) except Exception: pass libvirt.setup_or_cleanup_iscsi(is_setup=False)
def run(test, params, env): """ Test command: virsh pool-create. Create a libvirt pool from an XML file. 1) Use xml file from test parameters to create pool. 2) Dump the exist pool to create pool. 3) Negatively create pool(invalid option, no file, invalid file, duplicate pool name, and create pool under readonly mode). """ pool_xml = params.get("pool_create_xml_file") pool_name = params.get("pool_create_name") option = params.get("pool_create_extra_option", "") use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no") exist_pool_name = params.get("pool_create_exist_pool_name", "default") undefine_exist_pool = "yes" == params.get( "pool_create_undefine_exist_pool", "no") readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") exist_active = False # Deal with each parameters # backup the exist pool pool_ins = libvirt_storage.StoragePool() if use_exist_pool: if not pool_ins.pool_exists(exist_pool_name): raise error.TestFail("Require pool: %s exist", exist_pool_name) backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name) pool_xml = backup_xml exist_active = pool_ins.is_pool_active(exist_pool_name) # backup pool state pool_ins_state = virsh.pool_state_dict() logging.debug("Backed up pool(s): %s", pool_ins_state) created_pool_path = False if "--file" in option: # As of commit id 'c5f6c685' in virt-install (v1.0.0), virt-install # will attempt to create a storage pool for ease of management of # volumes. Thus if the test was started with --install or by chance # an 'images' pool exists using get_data_dir(), then the attempt # to define/create this pool will fail since two pools cannot use # the same path. Therefore, rather than use the data dir for our # test, let's just use 'tmp' dir and create an images pool there. pool_path = os.path.join(data_dir.get_tmp_dir(), 'images') if not os.path.exists(pool_path): os.mkdir(pool_path) created_pool_path = True dir_xml = """ <pool type='dir'> <name>%s</name> <target> <path>%s</path> </target> </pool> """ % (pool_name, pool_path) pool_xml = os.path.join(test.tmpdir, pool_xml) xml_object = open(pool_xml, 'w') xml_object.write(dir_xml) xml_object.close() # Delete the exist pool start_pool = False if undefine_exist_pool: poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name) poolxml.name = pool_name pool_xml = poolxml.xml if pool_ins.is_pool_active(exist_pool_name): start_pool = True if not pool_ins.delete_pool(exist_pool_name): raise error.TestFail("Delete pool: %s fail", exist_pool_name) # Create an invalid pool xml file if pool_xml == "invalid-pool-xml": tmp_xml = xml_utils.TempXMLFile() tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>' '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>') tmp_xml.flush() pool_xml = tmp_xml.name logging.info(pool_xml) # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True # Run virsh test if os.path.isfile(pool_xml): logging.debug("Create pool from file:\n %s", open(pool_xml, 'r').read()) try: cmd_result = virsh.pool_create(pool_xml, option, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: raise error.TestFail(err) elif not pool_check(pool_name, pool_ins): raise error.TestFail("Pool check fail") elif status_error and status == 0: # For an inactive 'default' pool, when the test runs to create # an existing pool of 'default', the test will pass because the # 'default' pool will be considered a transient pool when the # 'name' and 'uuid' match (which they do in this case). So # don't fail the test unnecessarily if (pool_name == exist_pool_name and not exist_active): pass else: raise error.TestFail("Expect fail, but run successfully.") finally: # Recover env # If we have a different pool name than default OR # we need to undefine this tests created default pool OR # we had a transient, active default pool created above, then # we need to destroy what the test created. # NB: When the active, transient pool is destroyed the # previously defined, but inactive pool will now exist. if pool_name != exist_pool_name or undefine_exist_pool or \ (pool_name == exist_pool_name and not exist_active): virsh.pool_destroy(pool_name) # restore the undefined default pool if undefine_exist_pool: # and not pool_ins.pool_exists(exist_pool_name): virsh.pool_define(backup_xml) if start_pool: pool_ins.start_pool(exist_pool_name) # Recover autostart if pool_ins_state[exist_pool_name]['autostart']: virsh.pool_autostart(exist_pool_name, ignore_status=False) # If we created the pool_path, then remove it if created_pool_path: os.rmdir(pool_path)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocal = params.get("disk_source_protocal", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" + " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocal secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, chap_user=chap_user, chap_passwd=chap_passwd) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.hostname = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_host poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get volume name") # Create iscsi network disk XML disk_params = {'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly} disk_params_src = {} if disk_type == "network": disk_params_src = {'source_protocol': disk_src_protocal, 'source_name': iscsi_target + "/1", 'source_host_name': disk_src_host, 'source_host_port': disk_src_port} elif disk_type == "volume": disk_params_src = {'source_pool': disk_src_pool, 'source_volume': vol_name, 'source_mode': disk_src_mode} else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = {'auth_user': chap_user, 'secret_type': disk_src_protocal, 'secret_usage': secret_xml.target} disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") # Attach the iscsi network disk to domain logging.debug("Attach disk by XML: %s", open(disk_xml).read()) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstrs=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): vm.start() cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2, **virsh_dargs) libvirt.check_exit_status(cmd_result) pass else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def pre_pool(pool_name, pool_type, pool_target, emulated_image): """ Preapare the specific type pool Note: 1. For scsi type pool, it only could be created from xml file 2. Other type pools can be created by pool_creat_as function """ extra = "" if pool_type == "dir": logging.info(test.tmpdir) pool_target = os.path.join(test.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) elif pool_type == "disk": device_name = login_iscsi(emulated_image, "1G") mk_part(device_name) extra = " --source-dev %s" % device_name elif pool_type == "fs": device_name = login_iscsi(emulated_image, "1G") cmd = "mkfs.ext4 -F %s" % device_name pool_target = os.path.join(test.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) extra = " --source-dev %s" % device_name utils.run(cmd) elif pool_type == "logical": logical_device = login_iscsi(emulated_image, "1G") cmd_pv = "pvcreate %s" % logical_device vg_name = "vg_%s" % pool_type cmd_vg = "vgcreate %s %s" % (vg_name, logical_device) extra = "--source-name %s" % vg_name utils.run(cmd_pv) utils.run(cmd_vg) elif pool_type == "netfs": nfs_path = os.path.join(test.tmpdir, nfs_server_dir) if not os.path.exists(nfs_path): os.mkdir(nfs_path) pool_target = os.path.join(test.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) set_nfs_server("%s *(rw,async,no_root_squash)" % nfs_path) extra = "--source-host %s --source-path %s" % (source_host, nfs_path) elif pool_type == "iscsi": logical_device = login_iscsi(emulated_image, "100M") iscsi_session = iscsi.iscsi_get_sessions() iscsi_device = () for iscsi_node in iscsi_session: if iscsi_node[1].count(emulated_image): iscsi_device = iscsi_node break if iscsi_device == (): raise error.TestFail("No iscsi device.") if "::" in iscsi_device[0]: iscsi_device = ('localhost', iscsi_device[1]) extra = " --source-host %s --source-dev %s" % iscsi_device elif pool_type == "scsi": scsi_xml_file = params.get("scsi_xml_file") if not os.path.exists(scsi_xml_file): scsi_xml_file = os.path.join(test.tmpdir, scsi_xml_file) logical_device = login_iscsi(emulated_image, "100M") cmd = "iscsiadm -m session -P 3 |grep -B3 %s| \ grep Host|awk '{print $3}'" % logical_device.split('/')[2] scsi_host = utils.system_output(cmd) scsi_xml = """ <pool type='scsi'> <name>%s</name> <source> <adapter type='scsi_host' name='host%s'/> </source> <target> <path>/dev/disk/by-path</path> </target> </pool> """ % (pool_name, scsi_host) logging.debug("Prepare the scsi pool xml: %s", scsi_xml) xml_object = open(scsi_xml_file, 'w') xml_object.write(scsi_xml) xml_object.close() # Create pool if pool_type == "scsi": re_v = virsh.pool_create(scsi_xml_file) else: re_v = virsh.pool_create_as(pool_name, pool_type, pool_target, extra) if not re_v: raise error.TestFail("Create pool failed.") # Check the created pool if not check_pool(pool_name): raise error.TestFail("Can't find active pool: %s", pool_name)
def pre_pool(pool_name, pool_type, pool_target, emulated_image): """ Preapare the specific type pool Note: 1. For scsi type pool, it only could be created from xml file 2. Other type pools can be created by pool_creat_as function """ extra = "" if pool_type == "dir": logging.info(test.tmpdir) pool_target = os.path.join(test.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) elif pool_type == "disk": device_name = login_iscsi(emulated_image, "1G") mk_part(device_name) extra = " --source-dev %s" % device_name elif pool_type == "fs": device_name = login_iscsi(emulated_image, "1G") cmd = "mkfs.ext4 -F %s" % device_name pool_target = os.path.join(test.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) extra = " --source-dev %s" % device_name utils.run(cmd) elif pool_type == "logical": logical_device = login_iscsi(emulated_image, "1G") cmd_pv = "pvcreate %s" % logical_device vg_name = "vg_%s" % pool_type cmd_vg = "vgcreate %s %s" % (vg_name, logical_device) extra = "--source-name %s" % vg_name utils.run(cmd_pv) utils.run(cmd_vg) elif pool_type == "netfs": nfs_path = os.path.join(test.tmpdir, nfs_server_dir) if not os.path.exists(nfs_path): os.mkdir(nfs_path) pool_target = os.path.join(test.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) set_nfs_server("%s *(rw,async,no_root_squash)" % nfs_path) extra = "--source-host %s --source-path %s" % (source_host, nfs_path) elif pool_type == "iscsi": logical_device = login_iscsi(emulated_image, "100M") iscsi_session = iscsi.iscsi_get_sessions() iscsi_device = () for iscsi_node in iscsi_session: if iscsi_node[1].count(emulated_image): iscsi_device = iscsi_node break if iscsi_device == (): raise error.TestFail("No iscsi device.") if "::" in iscsi_device[0]: iscsi_device = ('localhost', iscsi_device[1]) extra = " --source-host %s --source-dev %s" % iscsi_device elif pool_type == "scsi": scsi_xml_file = params.get("scsi_xml_file") if not os.path.exists(scsi_xml_file): scsi_xml_file = os.path.join(test.tmpdir, scsi_xml_file) logical_device = login_iscsi(emulated_image, "100M") cmd = "iscsiadm -m session -P 3 |grep -B3 %s| \ grep Host|awk '{print $3}'" % logical_device.split( '/')[2] scsi_host = utils.system_output(cmd) scsi_xml = """ <pool type='scsi'> <name>%s</name> <source> <adapter type='scsi_host' name='host%s'/> </source> <target> <path>/dev/disk/by-path</path> </target> </pool> """ % (pool_name, scsi_host) logging.debug("Prepare the scsi pool xml: %s", scsi_xml) xml_object = open(scsi_xml_file, 'w') xml_object.write(scsi_xml) xml_object.close() # Create pool if pool_type == "scsi": re_v = virsh.pool_create(scsi_xml_file) else: re_v = virsh.pool_create_as(pool_name, pool_type, pool_target, extra) if not re_v: raise error.TestFail("Create pool failed.") # Check the created pool if not check_pool(pool_name): raise error.TestFail("Can't find active pool: %s", pool_name)
def pre_pool(self, pool_name, pool_type, pool_target, emulated_image, image_size="100M", pre_disk_vol=[]): """ Preapare the specific type pool Note: 1. For scsi type pool, it only could be created from xml file 2. Other type pools can be created by pool_creat_as function 3. Disk pool will not allow to create volume with virsh commands So we can prepare it before pool created :param pool_name: created pool name :param pool_type: dir, disk, logical, fs, netfs or else :param pool_target: target of storage pool :param emulated_image: use an image file to simulate a scsi disk it could be used for disk, logical pool :param image_size: the size for emulated image :param pre_disk_vol: a list include partition size to be created no more than 4 partition because msdos label """ extra = "" if pool_type == "dir": logging.info("Pool path:%s", self.tmpdir) pool_target = os.path.join(self.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) elif pool_type == "disk": device_name = setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size) # If pre_vol is None, disk pool will have no volume if type(pre_disk_vol) == list and len(pre_disk_vol): for vol in pre_disk_vol: mk_part(device_name, vol) extra = " --source-dev %s" % device_name elif pool_type == "fs": device_name = setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size) cmd = "mkfs.ext4 -F %s" % device_name pool_target = os.path.join(self.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) extra = " --source-dev %s" % device_name utils.run(cmd) elif pool_type == "logical": logical_device = setup_or_cleanup_iscsi( is_setup=True, emulated_image=emulated_image, image_size=image_size) cmd_pv = "pvcreate %s" % logical_device vg_name = "vg_%s" % pool_type cmd_vg = "vgcreate %s %s" % (vg_name, logical_device) extra = "--source-name %s" % vg_name utils.run(cmd_pv) utils.run(cmd_vg) # Create a small volume for verification # And VG path will not exist if no any volume in.(bug?) cmd_lv = "lvcreate --name default_lv --size 1M %s" % vg_name utils.run(cmd_lv) elif pool_type == "netfs": nfs_server_dir = self.params.get("nfs_server_dir", "nfs-server") nfs_path = os.path.join(self.tmpdir, nfs_server_dir) if not os.path.exists(nfs_path): os.mkdir(nfs_path) pool_target = os.path.join(self.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) setup_or_cleanup_nfs(is_setup=True, export_options="rw,async,no_root_squash", mount_src=nfs_path) source_host = self.params.get("source_host", "localhost") extra = "--source-host %s --source-path %s" % (source_host, nfs_path) elif pool_type == "iscsi": logical_device = setup_or_cleanup_iscsi( is_setup=True, emulated_image=emulated_image, image_size=image_size) iscsi_session = iscsi.iscsi_get_sessions() iscsi_device = () for iscsi_node in iscsi_session: if iscsi_node[1].count(emulated_image): iscsi_device = iscsi_node break if iscsi_device == (): raise error.TestFail("No iscsi device.") if "::" in iscsi_device[0]: iscsi_device = ('localhost', iscsi_device[1]) extra = " --source-host %s --source-dev %s" % iscsi_device elif pool_type == "scsi": scsi_xml_file = self.params.get("scsi_xml_file") if not os.path.exists(scsi_xml_file): scsi_xml_file = os.path.join(self.tmpdir, scsi_xml_file) logical_device = setup_or_cleanup_iscsi( is_setup=True, emulated_image=emulated_image, image_size=image_size) cmd = ("iscsiadm -m session -P 3 |grep -B3 %s| grep Host|awk " "'{print $3}'" % logical_device.split('/')[2]) scsi_host = utils.system_output(cmd) scsi_xml = """ <pool type='scsi'> <name>%s</name> <source> <adapter type='scsi_host' name='host%s'/> </source> <target> <path>/dev/disk/by-path</path> </target> </pool> """ % (pool_name, scsi_host) logging.debug("Prepare the scsi pool xml: %s", scsi_xml) xml_object = open(scsi_xml_file, 'w') xml_object.write(scsi_xml) xml_object.close() # Create pool if pool_type == "scsi": re_v = virsh.pool_create(scsi_xml_file) else: re_v = virsh.pool_create_as(pool_name, pool_type, pool_target, extra) if not re_v: raise error.TestFail("Create pool failed.") # Check the created pool check_actived_pool(pool_name)
def run(test, params, env): """ Test command: virsh pool-create. Create a libvirt pool from an XML file. 1) Use xml file from test parameters to create pool. 2) Dump the exist pool to create pool. 3) Negatively create pool(invalid option, no file, invalid file, duplicate pool name, and create pool under readonly mode). """ pool_xml = params.get("pool_create_xml_file") pool_name = params.get("pool_create_name") option = params.get("pool_create_extra_option", "") use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no") exist_pool_name = params.get("pool_create_exist_pool_name", "default") undefine_exist_pool = "yes" == params.get( "pool_create_undefine_exist_pool", "no") readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") # Deal with each parameters # backup the exist pool pool_ins = libvirt_storage.StoragePool() if use_exist_pool: if not pool_ins.pool_exists(exist_pool_name): raise error.TestFail("Require pool: %s exist", exist_pool_name) backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name) pool_xml = backup_xml # Delete the exist pool start_pool = False if undefine_exist_pool: poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name) poolxml.name = pool_name pool_xml = poolxml.xml if pool_ins.is_pool_active(exist_pool_name): start_pool = True if not pool_ins.delete_pool(exist_pool_name): raise error.TestFail("Delete pool: %s fail", exist_pool_name) # Create an invalid pool xml file if pool_xml == "invalid-pool-xml": tmp_xml = xml_utils.TempXMLFile() tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>' '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>') tmp_xml.flush() pool_xml = tmp_xml.name logging.info(pool_xml) # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True # Run virsh test if os.path.isfile(pool_xml): logging.debug("Create pool from file:\n %s", open(pool_xml, 'r').read()) try: cmd_result = virsh.pool_create( pool_xml, option, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: raise error.TestFail(err) elif not pool_check(pool_name, pool_ins): raise error.TestFail("Pool check fail") elif status_error and status == 0: raise error.TestFail("Expect fail, but run successfully.") finally: # Recover env if pool_ins.is_pool_active(pool_name): virsh.pool_destroy(pool_name) if undefine_exist_pool and not pool_ins.pool_exists(exist_pool_name): virsh.pool_define(backup_xml) if start_pool: pool_ins.start_pool(exist_pool_name)
def run(test, params, env): """ Test command: virsh pool-create. Create a libvirt pool from an XML file. 1) Use xml file from test parameters to create pool. 2) Dump the exist pool to create pool. 3) Negatively create pool(invalid option, no file, invalid file, duplicate pool name, and create pool under readonly mode). """ pool_xml = params.get("pool_create_xml_file") pool_name = params.get("pool_create_name") option = params.get("pool_create_extra_option", "") use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no") exist_pool_name = params.get("pool_create_exist_pool_name", "default") undefine_exist_pool = "yes" == params.get( "pool_create_undefine_exist_pool", "no") readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") exist_active = False # Deal with each parameters # backup the exist pool pool_ins = libvirt_storage.StoragePool() if use_exist_pool: if not pool_ins.pool_exists(exist_pool_name): raise error.TestFail("Require pool: %s exist", exist_pool_name) backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name) pool_xml = backup_xml exist_active = pool_ins.is_pool_active(exist_pool_name) # backup pool state pool_ins_state = virsh.pool_state_dict() logging.debug("Backed up pool(s): %s", pool_ins_state) if "--file" in option: pool_path = os.path.join(data_dir.get_data_dir(), 'images') dir_xml = """ <pool type='dir'> <name>%s</name> <target> <path>%s</path> </target> </pool> """ % (pool_name, pool_path) pool_xml = os.path.join(test.tmpdir, pool_xml) xml_object = open(pool_xml, 'w') xml_object.write(dir_xml) xml_object.close() # Delete the exist pool start_pool = False if undefine_exist_pool: poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name) poolxml.name = pool_name pool_xml = poolxml.xml if pool_ins.is_pool_active(exist_pool_name): start_pool = True if not pool_ins.delete_pool(exist_pool_name): raise error.TestFail("Delete pool: %s fail", exist_pool_name) # Create an invalid pool xml file if pool_xml == "invalid-pool-xml": tmp_xml = xml_utils.TempXMLFile() tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>' '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>') tmp_xml.flush() pool_xml = tmp_xml.name logging.info(pool_xml) # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True # Run virsh test if os.path.isfile(pool_xml): logging.debug("Create pool from file:\n %s", open(pool_xml, 'r').read()) try: cmd_result = virsh.pool_create( pool_xml, option, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: raise error.TestFail(err) elif not pool_check(pool_name, pool_ins): raise error.TestFail("Pool check fail") elif status_error and status == 0: # For an inactive 'default' pool, when the test runs to create # an existing pool of 'default', the test will pass because the # 'default' pool will be considered a transient pool when the # 'name' and 'uuid' match (which they do in this case). So # don't fail the test unnecessarily if (pool_name == exist_pool_name and not exist_active): pass else: raise error.TestFail("Expect fail, but run successfully.") finally: # Recover env # If we have a different pool name than default OR # we need to undefine this tests created default pool OR # we had a transient, active default pool created above, then # we need to destroy what the test created. # NB: When the active, transient pool is destroyed the # previously defined, but inactive pool will now exist. if pool_name != exist_pool_name or undefine_exist_pool or \ (pool_name == exist_pool_name and not exist_active): virsh.pool_destroy(pool_name) # restore the undefined default pool if undefine_exist_pool: # and not pool_ins.pool_exists(exist_pool_name): virsh.pool_define(backup_xml) if start_pool: pool_ins.start_pool(exist_pool_name) # Recover autostart if pool_ins_state[exist_pool_name]['autostart']: virsh.pool_autostart(exist_pool_name, ignore_status=False)
# Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True # Run virsh test if os.path.exists(pool_xml_f): f = open(pool_xml_f, 'r') try: logging.debug("Create pool from file:\n %s", f.read()) finally: f.close() try: cmd_result = virsh.pool_create(pool_xml_f, option, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: raise error.TestFail(err) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) if pool_detail['uuid'] == old_uuid: error.TestFail("New created pool still use the old UUID %s" % old_uuid) elif status_error and status == 0: raise error.TestFail("Expect fail, but run successfully.") finally: pvt.cleanup_pool(pool_name, pool_type, pool_target,
def run_virsh_pool_create(test, params, env): """ Test command: virsh pool-create. Create a libvirt pool from an XML file. 1) Use xml file from test parameters to create pool. 2) Dump the exist pool to create pool. 3) Negatively create pool(invalid option, no file, invalid file, duplicate pool name, and create pool under readonly mode). """ pool_xml = params.get("pool_create_xml_file") pool_name = params.get("pool_create_name") option = params.get("pool_create_extra_option", "") use_exist_pool = "yes" == params.get("pool_create_use_exist_pool", "no") exist_pool_name = params.get("pool_create_exist_pool_name", "default") undefine_exist_pool = "yes" == params.get( "pool_create_undefine_exist_pool", "no") readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") # Deal with each parameters # backup the exist pool pool_ins = libvirt_storage.StoragePool() if use_exist_pool: if not pool_ins.pool_exists(exist_pool_name): raise error.TestFail("Require pool: %s exist", exist_pool_name) backup_xml = libvirt_xml.PoolXML.backup_xml(exist_pool_name) pool_xml = backup_xml # Delete the exist pool start_pool = False if undefine_exist_pool: poolxml = libvirt_xml.PoolXML.new_from_dumpxml(exist_pool_name) poolxml.name = pool_name pool_xml = poolxml.xml if pool_ins.is_pool_active(exist_pool_name): start_pool = True if not pool_ins.delete_pool(exist_pool_name): raise error.TestFail("Delete pool: %s fail", exist_pool_name) # Create an invalid pool xml file if pool_xml == "invalid-pool-xml": tmp_xml = xml_utils.TempXMLFile() tmp_xml.write('"<pool><<<BAD>>><\'XML</name\>' '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>') tmp_xml.flush() pool_xml = tmp_xml.name logging.info(pool_xml) # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True # Run virsh test if os.path.isfile(pool_xml): logging.debug("Create pool from file:\n %s", open(pool_xml, 'r').read()) try: cmd_result = virsh.pool_create(pool_xml, option, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: raise error.TestFail(err) elif not pool_check(pool_name, pool_ins): raise error.TestFail("Pool check fail") elif status_error and status == 0: raise error.TestFail("Expect fail, but run successfully.") finally: # Recover env if pool_ins.is_pool_active(pool_name): virsh.pool_destroy(pool_name) if undefine_exist_pool and not pool_ins.pool_exists(exist_pool_name): virsh.pool_define(backup_xml) if start_pool: pool_ins.start_pool(exist_pool_name)
def run(test, params, env): """ Test command: virsh pool-create. Create a libvirt pool from an XML file. The file could be given by tester or generated by dumpxml a pre-defined pool. """ pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") option = params.get("pool_create_extra_option", "") readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") pre_def_pool = "yes" == params.get("pre_def_pool", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") duplicate_element = params.get("pool_create_duplicate_element", "") new_pool_name = params.get("new_pool_create_name") no_disk_label = "yes" == params.get("no_disk_label", "no") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") if "/PATH/TO/POOL.XML" in pool_xml_f: test.cancel("Please replace %s with valid pool xml file" % pool_xml_f) pool_ins = libvirt_storage.StoragePool() if pre_def_pool and pool_ins.pool_exists(pool_name): test.fail("Pool %s already exist" % pool_name) emulated_image = "emulated-image" kwargs = {'image_size': '1G', 'source_path': source_path, 'source_name': source_name, 'source_format': source_format} pvt = utlv.PoolVolumeTest(test, params) old_uuid = None new_device_name = None if pre_def_pool: try: pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) virsh.pool_dumpxml(pool_name, to_file=pool_xml_f) old_uuid = virsh.pool_uuid(pool_name).stdout.strip() if no_disk_label: # Update <device_path> logging.debug("Try to update device path") new_device_name = utlv.setup_or_cleanup_iscsi(True) p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) s_xml = pool_xml.SourceXML() s_xml.device_path = new_device_name p_xml.set_source(s_xml) pool_xml_f = p_xml.xml if duplicate_element == "name": pass elif duplicate_element == "uuid": pass elif duplicate_element == "source": # Remove <uuid> and update <name> cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f process.run(cmd, shell=True) cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (new_pool_name, pool_xml_f) process.run(cmd, shell=True) else: # The transient pool will gone after destroyed virsh.pool_destroy(pool_name) new_source_format = params.get("new_pool_src_format") if new_source_format: cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % ( source_format, new_source_format, pool_xml_f) process.run(cmd, shell=True) # Remove uuid cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f process.run(cmd, shell=True) except Exception as details: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) if new_device_name: utlv.setup_or_cleanup_iscsi(False) test.error("Error occurred when prepare pool xml:\n %s" % details) # Create an invalid pool xml file if pool_xml_f == "invalid-pool-xml": tmp_xml_f = xml_utils.TempXMLFile() tmp_xml_f.write('"<pool><<<BAD>>><\'XML</name\>' '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>') tmp_xml_f.flush() pool_xml_f = tmp_xml_f.name # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True # Run virsh test if os.path.exists(pool_xml_f): with open(pool_xml_f, 'r') as f: logging.debug("Create pool from file:\n %s", f.read()) try: cmd_result = virsh.pool_create(pool_xml_f, option, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: test.fail(err) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) if pool_detail['uuid'] == old_uuid: test.fail("New created pool still use the old UUID %s" % old_uuid) else: if status == 0: test.fail("Expect fail, but run successfully.") else: logging.debug("Command fail as expected") finally: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image, **kwargs) if new_device_name: utlv.setup_or_cleanup_iscsi(False) if os.path.exists(pool_xml_f): os.remove(pool_xml_f)
ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True # Run virsh test if os.path.exists(pool_xml_f): f = open(pool_xml_f, 'r') try: logging.debug("Create pool from file:\n %s", f.read()) finally: f.close() try: cmd_result = virsh.pool_create(pool_xml_f, option, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: raise error.TestFail(err) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) if pool_detail['uuid'] == old_uuid: error.TestFail("New created pool still use the old UUID %s" % old_uuid) elif status_error and status == 0: raise error.TestFail("Expect fail, but run successfully.")
def run(test, params, env): """ Test command: virsh pool-create. Create a libvirt pool from an XML file. The file could be given by tester or generated by dumpxml a pre-defined pool. """ pool_xml_f = params.get("pool_create_xml_file", "") pool_name = params.get("pool_create_name", "virt_test_pool_tmp") option = params.get("pool_create_extra_option", "") readonly_mode = "yes" == params.get("pool_create_readonly_mode", "no") status_error = "yes" == params.get("status_error", "no") pre_def_pool = "yes" == params.get("pre_def_pool", "no") pool_type = params.get("pool_type", "dir") source_format = params.get("pool_src_format", "") source_name = params.get("pool_source_name", "") source_path = params.get("pool_source_path", "/") pool_target = params.get("pool_target", "pool_target") duplicate_element = params.get("pool_create_duplicate_element", "") new_pool_name = params.get("new_pool_create_name") no_disk_label = "yes" == params.get("no_disk_label", "no") if not libvirt_version.version_compare(1, 0, 0): if pool_type == "gluster": test.cancel("Gluster pool is not supported in current" " libvirt version.") pool_ins = libvirt_storage.StoragePool() if pre_def_pool and pool_ins.pool_exists(pool_name): test.fail("Pool %s already exist" % pool_name) kwargs = {'image_size': '1G', 'source_path': source_path, 'source_name': source_name, 'source_format': source_format, 'emulated_image': "emulated-image", 'pool_target': pool_target, 'pool_name': pool_name} params.update(kwargs) pvt = utlv.PoolVolumeTest(test, params) old_uuid = None new_device_name = None if pre_def_pool: try: pvt.pre_pool(**params) virsh.pool_dumpxml(pool_name, to_file=pool_xml_f) old_uuid = virsh.pool_uuid(pool_name).stdout.strip() if no_disk_label: # Update <device_path> logging.debug("Try to update device path") new_device_name = utlv.setup_or_cleanup_iscsi(True) p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name) s_xml = pool_xml.SourceXML() s_xml.device_path = new_device_name p_xml.set_source(s_xml) pool_xml_f = p_xml.xml if duplicate_element == "name": pass elif duplicate_element == "uuid": pass elif duplicate_element == "source": # Remove <uuid> and update <name> cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f process.run(cmd, shell=True) cmd = "sed -i 's/<name>.*<\/name>/<name>%s<\/name>/g' %s" % (new_pool_name, pool_xml_f) process.run(cmd, shell=True) else: # The transient pool will gone after destroyed virsh.pool_destroy(pool_name) new_source_format = params.get("new_pool_src_format") if new_source_format: cmd = "sed -i s/type=\\\'%s\\\'/type=\\\'%s\\\'/g %s" % ( source_format, new_source_format, pool_xml_f) process.run(cmd, shell=True) # Remove uuid cmd = "sed -i '/<uuid>/d' %s" % pool_xml_f process.run(cmd, shell=True) except Exception as details: pvt.cleanup_pool(**params) if new_device_name: utlv.setup_or_cleanup_iscsi(False) test.error("Error occurred when prepare pool xml:\n %s" % details) # Create an invalid pool xml file if pool_xml_f == "invalid-pool-xml": tmp_xml_f = xml_utils.TempXMLFile() tmp_xml_f.write('"<pool><<<BAD>>><\'XML</name\>' '!@#$%^&*)>(}>}{CORRUPTE|>!</pool>') tmp_xml_f.flush() pool_xml_f = tmp_xml_f.name # Readonly mode ro_flag = False if readonly_mode: logging.debug("Readonly mode test") ro_flag = True # Run virsh test if os.path.exists(pool_xml_f): with open(pool_xml_f, 'r') as f: logging.debug("Create pool from file:\n %s", f.read()) try: cmd_result = virsh.pool_create(pool_xml_f, option, ignore_status=True, debug=True, readonly=ro_flag) err = cmd_result.stderr.strip() status = cmd_result.exit_status if not status_error: if status: test.fail(err) utlv.check_actived_pool(pool_name) pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name) logging.debug("Pool detail: %s", pool_detail) if pool_detail['uuid'] == old_uuid: test.fail("New created pool still use the old UUID %s" % old_uuid) else: if status == 0: test.fail("Expect fail, but run successfully.") else: logging.debug("Command fail as expected") finally: pvt.cleanup_pool(**params) if new_device_name: utlv.setup_or_cleanup_iscsi(False) if os.path.exists(pool_xml_f): os.remove(pool_xml_f)
def pre_pool(self, pool_name, pool_type, pool_target, emulated_image, image_size="100M", pre_disk_vol=[]): """ Preapare the specific type pool Note: 1. For scsi type pool, it only could be created from xml file 2. Other type pools can be created by pool_creat_as function 3. Disk pool will not allow to create volume with virsh commands So we can prepare it before pool created :param pool_name: created pool name :param pool_type: dir, disk, logical, fs, netfs or else :param pool_target: target of storage pool :param emulated_image: use an image file to simulate a scsi disk it could be used for disk, logical pool :param image_size: the size for emulated image :param pre_disk_vol: a list include partition size to be created no more than 4 partition because msdos label """ extra = "" if pool_type == "dir": logging.info("Pool path:%s", self.tmpdir) pool_target = os.path.join(self.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) elif pool_type == "disk": device_name = setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size) # If pre_vol is None, disk pool will have no volume if type(pre_disk_vol) == list and len(pre_disk_vol): for vol in pre_disk_vol: mk_part(device_name, vol) extra = " --source-dev %s" % device_name elif pool_type == "fs": device_name = setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size) cmd = "mkfs.ext4 -F %s" % device_name pool_target = os.path.join(self.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) extra = " --source-dev %s" % device_name utils.run(cmd) elif pool_type == "logical": logical_device = setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size) cmd_pv = "pvcreate %s" % logical_device vg_name = "vg_%s" % pool_type cmd_vg = "vgcreate %s %s" % (vg_name, logical_device) extra = "--source-name %s" % vg_name utils.run(cmd_pv) utils.run(cmd_vg) # Create a small volume for verification # And VG path will not exist if no any volume in.(bug?) cmd_lv = "lvcreate --name default_lv --size 1M %s" % vg_name utils.run(cmd_lv) elif pool_type == "netfs": nfs_server_dir = self.params.get("nfs_server_dir", "nfs-server") nfs_path = os.path.join(self.tmpdir, nfs_server_dir) if not os.path.exists(nfs_path): os.mkdir(nfs_path) pool_target = os.path.join(self.tmpdir, pool_target) if not os.path.exists(pool_target): os.mkdir(pool_target) setup_or_cleanup_nfs(is_setup=True, export_options="rw,async,no_root_squash", mount_src=nfs_path) source_host = self.params.get("source_host", "localhost") extra = "--source-host %s --source-path %s" % (source_host, nfs_path) elif pool_type == "iscsi": logical_device = setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size) iscsi_session = iscsi.iscsi_get_sessions() iscsi_device = () for iscsi_node in iscsi_session: if iscsi_node[1].count(emulated_image): iscsi_device = iscsi_node break if iscsi_device == (): raise error.TestFail("No iscsi device.") if "::" in iscsi_device[0]: iscsi_device = ('localhost', iscsi_device[1]) extra = " --source-host %s --source-dev %s" % iscsi_device elif pool_type == "scsi": scsi_xml_file = self.params.get("scsi_xml_file") if not os.path.exists(scsi_xml_file): scsi_xml_file = os.path.join(self.tmpdir, scsi_xml_file) logical_device = setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size) cmd = ("iscsiadm -m session -P 3 |grep -B3 %s| grep Host|awk " "'{print $3}'" % logical_device.split('/')[2]) scsi_host = utils.system_output(cmd) scsi_xml = """ <pool type='scsi'> <name>%s</name> <source> <adapter type='scsi_host' name='host%s'/> </source> <target> <path>/dev/disk/by-path</path> </target> </pool> """ % (pool_name, scsi_host) logging.debug("Prepare the scsi pool xml: %s", scsi_xml) xml_object = open(scsi_xml_file, 'w') xml_object.write(scsi_xml) xml_object.close() # Create pool if pool_type == "scsi": re_v = virsh.pool_create(scsi_xml_file) else: re_v = virsh.pool_create_as(pool_name, pool_type, pool_target, extra) if not re_v: raise error.TestFail("Create pool failed.") # Check the created pool check_actived_pool(pool_name)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" + " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" + " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, chap_user=chap_user, chap_passwd=chap_passwd) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout))[1][0] except IndexError: raise error.TestError("Fail to get volume name") # Create iscsi network disk XML disk_params = { 'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly } disk_params_src = {} if disk_type == "network": disk_params_src = { 'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/1", 'source_host_name': disk_src_host, 'source_host_port': disk_src_port } elif disk_type == "volume": disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'source_mode': disk_src_mode } else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = { 'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target } disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") disk_xml_f = open(disk_xml) disk_xml_content = disk_xml_f.read() disk_xml_f.close() logging.debug("Attach disk by XML: %s", disk_xml_content) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2, **virsh_dargs) libvirt.check_exit_status(cmd_result) pass else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") # Indicate the PPC platform on_ppc = False if platform.platform().count('ppc64'): on_ppc = True if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): raise error.TestNAError("'volume' type disk doesn't support in" " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target logging.debug("Define secret by XML: %s", open(secret_xml.xml).read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: raise error.TestError("Fail to get new created secret uuid") # Set secret value secret_string = base64.b64encode(chap_passwd) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size='1G', chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(cmd_result.stdout)) if len(vol_list) > 1: return vol_list[1] else: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: vol_name, vol_path = vol_info else: raise error.TestError("Failed to get volume info") # Snapshot doesn't support raw disk format, create a qcow2 volume # disk for snapshot operation. process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'), shell=True) # Create iscsi network disk XML disk_params = { 'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly } disk_params_src = {} if disk_type == "network": disk_params_src = { 'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': disk_src_host, 'source_host_port': disk_src_port } elif disk_type == "volume": disk_params_src = { 'source_pool': disk_src_pool, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_mode': disk_src_mode } else: error.TestNAError("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = { 'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target } disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login() else: if not vm.is_dead(): vm.destroy() attach_option = params.get("attach_option", "") disk_xml_f = open(disk_xml) disk_xml_content = disk_xml_f.read() disk_xml_f.close() logging.debug("Attach disk by XML: %s", disk_xml_content) cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Wait for domain is stable vm.wait_for_login().close() domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(test.tmpdir, "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: error.TestFail("Failed getting snapshots list for %s" % vm_name) try: virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) except process.CmdError: error.TestFail("Failed getting snapshots info for %s" % vm_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) snapshot_file = os.path.join(test.tmpdir, snapshot_name2) sn_create_op = ("%s --disk-only --diskspec %s,file=%s" % (snapshot_name2, disk_target, snapshot_file)) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) if snapshot_name2 not in cmd_result: raise error.TestError("Snapshot %s not found" % snapshot_name2) else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): raise error.TestError("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() # Here the script needs wait for a while for the guest to # recognize the hotplugged disk on PPC if on_ppc: time.sleep(10) cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError), e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: raise error.TestError("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False)
def run(test, params, env): """ Attach/Detach an iscsi network/volume disk to domain 1. For secret usage testing: 1.1. Setup an iscsi target with CHAP authentication. 1.2. Define a secret for iscsi target usage 1.3. Set secret value 2. Create 4. Create an iscsi network disk XML 5. Attach disk with the XML file and check the disk inside the VM 6. Detach the disk """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) disk_device = params.get("disk_device", "disk") disk_type = params.get("disk_type", "network") disk_src_protocol = params.get("disk_source_protocol", "iscsi") disk_src_host = params.get("disk_source_host", "127.0.0.1") disk_src_port = params.get("disk_source_port", "3260") disk_src_pool = params.get("disk_source_pool") disk_src_mode = params.get("disk_source_mode", "host") pool_type = params.get("pool_type", "iscsi") pool_src_host = params.get("pool_source_host", "127.0.0.1") disk_target = params.get("disk_target", "vdb") disk_target_bus = params.get("disk_target_bus", "virtio") disk_readonly = params.get("disk_readonly", "no") chap_auth = "yes" == params.get("chap_auth", "no") chap_user = params.get("chap_username", "") chap_passwd = params.get("chap_password", "") secret_usage_target = params.get("secret_usage_target") secret_ephemeral = params.get("secret_ephemeral", "no") secret_private = params.get("secret_private", "yes") status_error = "yes" == params.get("status_error", "no") # Indicate the PPC platform on_ppc = False if platform.platform().count('ppc64'): on_ppc = True if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if disk_type == "volume": if not libvirt_version.version_compare(1, 0, 5): test.cancel("'volume' type disk doesn't support in" " current libvirt version.") # Back VM XML vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} try: start_vm = "yes" == params.get("start_vm", "yes") if start_vm: if vm.is_dead(): vm.start() vm.wait_for_login() else: if not vm.is_dead(): vm.destroy() if chap_auth: # Create a secret xml to define it secret_xml = SecretXML(secret_ephemeral, secret_private) secret_xml.auth_type = "chap" secret_xml.auth_username = chap_user secret_xml.usage = disk_src_protocol secret_xml.target = secret_usage_target with open(secret_xml.xml) as f: logging.debug("Define secret by XML: %s", f.read()) # Define secret cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Get secret uuid try: secret_uuid = cmd_result.stdout.strip().split()[1] except IndexError: test.error("Fail to get new created secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding) cmd_result = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(cmd_result) else: # Set chap_user and chap_passwd to empty to avoid setup # CHAP authentication when export iscsi target chap_user = "" chap_passwd = "" # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True, is_login=False, image_size='1G', chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=disk_src_host) # Create iscsi pool if disk_type == "volume": # Create an iscsi pool xml to create it pool_src_xml = pool_xml.SourceXML() pool_src_xml.host_name = pool_src_host pool_src_xml.device_path = iscsi_target poolxml = pool_xml.PoolXML(pool_type=pool_type) poolxml.name = disk_src_pool poolxml.set_source(pool_src_xml) poolxml.target_path = "/dev/disk/by-path" # Create iscsi pool cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs) libvirt.check_exit_status(cmd_result) def get_vol(): """Get the volume info""" # Refresh the pool cmd_result = virsh.pool_refresh(disk_src_pool) libvirt.check_exit_status(cmd_result) # Get volume name cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs) libvirt.check_exit_status(cmd_result) vol_list = [] vol_list = re.findall(r"(\S+)\ +(\S+)", str(cmd_result.stdout.strip())) if len(vol_list) > 1: return vol_list[1] else: return None # Wait for a while so that we can get the volume info vol_info = utils_misc.wait_for(get_vol, 10) if vol_info: vol_name, vol_path = vol_info else: test.error("Failed to get volume info") # Snapshot doesn't support raw disk format, create a qcow2 volume # disk for snapshot operation. process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'), shell=True) # Create iscsi network disk XML disk_params = {'device_type': disk_device, 'type_name': disk_type, 'target_dev': disk_target, 'target_bus': disk_target_bus, 'readonly': disk_readonly} disk_params_src = {} if disk_type == "network": disk_params_src = {'source_protocol': disk_src_protocol, 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': disk_src_host, 'source_host_port': disk_src_port} elif disk_type == "volume": disk_params_src = {'source_pool': disk_src_pool, 'source_volume': vol_name, 'driver_type': 'qcow2', 'source_mode': disk_src_mode} else: test.cancel("Unsupport disk type in this test") disk_params.update(disk_params_src) if chap_auth: disk_params_auth = {'auth_user': chap_user, 'secret_type': disk_src_protocol, 'secret_usage': secret_xml.target} disk_params.update(disk_params_auth) disk_xml = libvirt.create_disk_xml(disk_params) attach_option = params.get("attach_option", "") cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml, flagstr=attach_option, dargs=virsh_dargs) libvirt.check_exit_status(cmd_result, status_error) if vm.is_dead(): cmd_result = virsh.start(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) # Wait for domain is stable vm.wait_for_login().close() domain_operation = params.get("domain_operation", "") if domain_operation == "save": save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save") cmd_result = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.restore(save_file) libvirt.check_exit_status(cmd_result) if os.path.exists(save_file): os.remove(save_file) elif domain_operation == "snapshot": # Run snapshot related commands: snapshot-create-as, snapshot-list # snapshot-info, snapshot-dumpxml, snapshot-create snapshot_name1 = "snap1" snapshot_name2 = "snap2" cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) try: virsh.snapshot_list(vm_name, **virsh_dargs) except process.CmdError: test.fail("Failed getting snapshots list for %s" % vm_name) try: virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs) except process.CmdError: test.fail("Failed getting snapshots info for %s" % vm_name) cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs) libvirt.check_exit_status(cmd_result) snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2) sn_create_op = ("%s --disk-only --diskspec %s,file=%s" % (snapshot_name2, disk_target, snapshot_file)) cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op, **virsh_dargs) libvirt.check_exit_status(cmd_result) cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1, **virsh_dargs) cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs) if snapshot_name2 not in cmd_result: test.error("Snapshot %s not found" % snapshot_name2) elif domain_operation == "": logging.debug("No domain operation provided, so skip it") else: logging.error("Unsupport operation %s in this case, so skip it", domain_operation) def find_attach_disk(expect=True): """ Find attached disk inside the VM """ found_disk = False if vm.is_dead(): test.error("Domain %s is not running" % vm_name) else: try: session = vm.wait_for_login() # Here the script needs wait for a while for the guest to # recognize the hotplugged disk on PPC if on_ppc: time.sleep(10) cmd = "grep %s /proc/partitions" % disk_target s, o = session.cmd_status_output(cmd) logging.info("%s output: %s", cmd, o) session.close() if s == 0: found_disk = True except (LoginError, VMError, ShellError) as e: logging.error(str(e)) if found_disk == expect: logging.debug("Check disk inside the VM PASS as expected") else: test.error("Check disk inside the VM FAIL") # Check disk inside the VM, expect is False if status_error=True find_attach_disk(not status_error) # Detach disk cmd_result = virsh.detach_disk(vm_name, disk_target) libvirt.check_exit_status(cmd_result, status_error) # Check disk inside the VM find_attach_disk(False) finally: # Clean up snapshot # Shut down before cleaning up snapshots if vm.is_alive(): vm.destroy() libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) # Restore vm vmxml_backup.sync("--snapshots-metadata") # Destroy pool and undefine secret, which may not exist try: if disk_type == "volume": virsh.pool_destroy(disk_src_pool) if chap_auth: virsh.secret_undefine(secret_uuid) except Exception: pass libvirt.setup_or_cleanup_iscsi(is_setup=False)