def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] elif target.startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False utils_disk.linux_disk_check(session, added_part) session.close() return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False
def test_blockcopy_extended_l2(): """ Do blockcopy after creating snapshot with extended_l2 in disk image """ # create snap chain and check snap path extended_l2 status test_obj.prepare_snapshot(snap_num=1) check_obj.check_image_info(test_obj.snap_path_list[0], check_item='extended l2', expected_value='true') # Do blockcopy virsh.blockcopy(vm_name, device, tmp_copy_path, options=blockcopy_options, ignore_status=False, debug=True) # Check domain exist blockcopy file and extended_l2 status if len(vmxml.get_disk_source(vm_name)) < 2: test.fail('Domain disk num is less than 2, may attach failed') else: image_file = vmxml.get_disk_source(vm_name)[1].find('source').get( 'file') if image_file != tmp_copy_path: test.fail( 'Blockcopy path is not in domain disk ,' ' blockcopy image path is %s ,actual image path ' 'is :%s', tmp_copy_path, image_file) check_obj.check_image_info(tmp_copy_path, check_item='extended l2', expected_value='true') # Check domain write file session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(test_obj.old_parts))) utils_disk.linux_disk_check(session, added_parts[0]) session.close()
def post_migration_check(self, vms, params, uptime=None, uri=None): """ Validating migration by performing checks in this method * check vm state after migration * uptime of the migrated vm > uptime of vm before migration * ping vm from target host by setting "check_network_accessibility_after_mig" to "yes" * As default, check system disk on the migrated vm * check disk operations on the migrated VM by setting "check_disk_after_mig" to "yes" :param vms: VM objects of migrating vms :param uptime: uptime dict of vms before migration :param uri: target virsh uri :return: updated dict of uptime """ vm_state = params.get("virsh_migrated_state", "running") for vm in vms: if not libvirt.check_vm_state(vm.name, vm_state, uri=uri): raise exceptions.TestFail("Migrated VMs failed to be in %s " "state at destination" % vm_state) logging.info("Guest state is '%s' at destination is as expected", vm_state) if "offline" not in params.get( "migrate_options", params.get("virsh_migrate_options", "")): if uptime: vm_uptime = vm.uptime(connect_uri=uri) logging.info("uptime of migrated VM %s: %s", vm.name, vm_uptime) if vm_uptime < uptime[vm.name]: raise exceptions.TestFail( "vm went for a reboot during " "migration") # update vm uptime to check when migrating back uptime[vm.name] = vm_uptime vm.verify_dmesg(connect_uri=uri) if params.get("check_network_accessibility_after_mig", "no") == "yes": ping_count = int(params.get("ping_count", 10)) self.ping_vm(vm, params, uri=uri, ping_count=ping_count) if params.get("simple_disk_check_after_mig", 'yes') == "yes": backup_uri, vm.connect_uri = vm.connect_uri, uri vm.create_serial_console() vm_session_after_mig = vm.wait_for_serial_login( timeout=360) vm_session_after_mig.cmd( "echo libvirt_simple_disk_check >> /tmp/libvirt_simple_disk_check" ) vm_session_after_mig.close() vm.connect_uri = backup_uri if params.get("check_disk_after_mig", "no") == "yes": disk_kname = params.get("check_disk_kname_after_mig", "vdb") backup_uri, vm.connect_uri = vm.connect_uri, uri vm.create_serial_console() vm_session_after_mig = vm.wait_for_serial_login( timeout=360) utils_disk.linux_disk_check(vm_session_after_mig, disk_kname) vm_session_after_mig.close() vm.connect_uri = backup_uri return uptime
def run(test, params, env): """ Test disk encryption option. 1.Prepare test environment, destroy or suspend a VM. 2.Prepare tgtd and secret config. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. 6.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def check_save_restore(save_file): """ Test domain save and restore. """ # Save the domain. ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) # Restore the domain. ret = virsh.restore(save_file, **virsh_dargs) libvirt.check_exit_status(ret) def check_snapshot(): """ Test domain snapshot operation. """ snapshot1 = "s1" snapshot2 = "s2" ret = virsh.snapshot_create_as(vm_name, snapshot1) libvirt.check_exit_status(ret) ret = virsh.snapshot_create_as( vm_name, "%s --disk-only --diskspec vda," "file=/tmp/testvm-snap1" % snapshot2) libvirt.check_exit_status(ret, True) ret = virsh.snapshot_create_as( vm_name, "%s --memspec file=%s,snapshot=external" " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2)) libvirt.check_exit_status(ret, True) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param vm. VM guest. :param target. Disk dev in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() new_parts = utils_disk.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False added_part = None if target.startswith("vd"): if added_parts[0].startswith("vd"): added_part = added_parts[0] elif target.startswith("hd"): if added_parts[0].startswith("sd"): added_part = added_parts[0] elif target.startswith("sd"): added_part = added_parts[0] if not added_part: logging.error("Can't see added partition in VM") return False utils_disk.linux_disk_check(session, added_part) session.close() return True except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False def check_qemu_cmd(): """ Check qemu-kvm command line options """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if driver_iothread: cmd += " | grep iothread=iothread%s" % driver_iothread if process.system(cmd, ignore_status=True, shell=True): test.fail("Can't see disk option '%s' " "in command line" % cmd) def check_auth_plaintext(vm_name, password): """ Check if libvirt passed the plaintext of the chap authentication password to qemu. :param vm_name: The name of vm to be checked. :param password: The plaintext of password used for chap authentication. :return: True if using plaintext, False if not. """ cmd = ("ps -ef | grep -v grep | grep qemu-kvm | grep %s | grep %s" % (vm_name, password)) return process.system(cmd, ignore_status=True, shell=True) == 0 # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") # Controller specific attributes. cntlr_type = params.get('controller_type', None) cntlr_model = params.get('controller_model', None) cntlr_index = params.get('controller_index', None) controller_addr_options = params.get('controller_addr_options', None) driver_iothread = params.get("driver_iothread") # iscsi options. iscsi_target = params.get("iscsi_target") iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") emulated_size = params.get("iscsi_image_size", "1") uuid = params.get("uuid", "") auth_uuid = "yes" == params.get("auth_uuid", "") auth_usage = "yes" == params.get("auth_usage", "") status_error = "yes" == params.get("status_error") define_error = "yes" == params.get("define_error", "no") test_save_snapshot = "yes" == params.get("test_save_snapshot", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") secret_uuid = "" # Start vm and get all partitions in vm. if device == "lun": if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = utils_disk.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: chap_user = "" chap_passwd = "" if auth_uuid or auth_usage: auth_place_in_location = params.get("auth_place_in_location") if 'source' in auth_place_in_location and not libvirt_version.version_compare( 3, 9, 0): test.cancel( "place auth in source is not supported in current libvirt version" ) auth_type = params.get("auth_type") secret_usage_target = params.get("secret_usage_target") secret_usage_type = params.get("secret_usage_type") chap_user = params.get("iscsi_user") chap_passwd = params.get("iscsi_password") sec_xml = secret_xml.SecretXML("no", "yes") sec_xml.description = "iSCSI secret" sec_xml.auth_type = auth_type sec_xml.auth_username = chap_user sec_xml.usage = secret_usage_type sec_xml.target = secret_usage_target sec_xml.xmltreefile.write() ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid == "": test.error("Failed to get secret uuid") # Set secret value encoding = locale.getpreferredencoding() secret_string = base64.b64encode( chap_passwd.encode(encoding)).decode(encoding) ret = virsh.secret_set_value(secret_uuid, secret_string, **virsh_dargs) libvirt.check_exit_status(ret) # Setup iscsi target iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=emulated_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # If we use qcow2 disk format, should format iscsi disk first. if device_format == "qcow2": cmd = ( "qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" % (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size)) process.run(cmd, shell=True) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} # For lun type device, iothread attribute need to be set in controller. if driver_iothread and device != "lun": driver_dict.update({"iothread": driver_iothread}) vmxml.iothreads = int(driver_iothread) elif driver_iothread: vmxml.iothreads = int(driver_iothread) disk_xml.driver = driver_dict # Check if we want to use a faked uuid. if not uuid: uuid = secret_uuid auth_dict = {} if auth_uuid: auth_dict = { "auth_user": chap_user, "secret_type": secret_usage_type, "secret_uuid": uuid } elif auth_usage: auth_dict = { "auth_user": chap_user, "secret_type": secret_usage_type, "secret_usage": secret_usage_target } disk_source = disk_xml.new_disk_source( **{ "attrs": { "protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num) }, "hosts": [{ "name": iscsi_host, "port": iscsi_port }] }) if auth_dict: disk_auth = disk_xml.new_auth(**auth_dict) if 'source' in auth_place_in_location: disk_source.auth = disk_auth if 'disk' in auth_place_in_location: disk_xml.auth = disk_auth disk_xml.source = disk_source if device != "lun": device_str = "serial_" + device_target disk_xml.serial = device_str # Sync VM xml. vmxml.add_device(disk_xml) # After virtio 1.0 is enabled, lun type device need use virtio-scsi # instead of virtio, so additional controller is needed. # Add controller. if device == "lun": ctrl = Controller(type_name=cntlr_type) if cntlr_model is not None: ctrl.model = cntlr_model if cntlr_index is not None: ctrl.index = cntlr_index if controller_addr_options: ctrl_addr_dict = {} for addr_option in controller_addr_options.split(','): if addr_option != "": addr_part = addr_option.split('=') ctrl_addr_dict.update( {addr_part[0].strip(): addr_part[1].strip()}) ctrl.address = ctrl.new_controller_address( attrs=ctrl_addr_dict) # If driver_iothread is true, need add iothread attribute in controller. if driver_iothread: ctrl_driver_dict = {} ctrl_driver_dict.update({"iothread": driver_iothread}) ctrl.driver = ctrl_driver_dict logging.debug("Controller XML is:%s", ctrl) if cntlr_type: vmxml.del_controller(cntlr_type) else: vmxml.del_controller("scsi") vmxml.add_device(ctrl) try: # Start the VM and check status. vmxml.sync() vm.start() if status_error: test.fail("VM started unexpectedly.") # Check Qemu command line if test_qemu_cmd: check_qemu_cmd() except virt_vm.VMStartError as e: if status_error: if re.search(uuid, str(e)): pass else: test.fail("VM failed to start." "Error: %s" % str(e)) except xcepts.LibvirtXMLError as xml_error: if not define_error: test.fail("Failed to define VM:\n%s" % xml_error) else: # Check partitions in VM. if check_partitions: if device == "lun": if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") else: session = vm.wait_for_login() added_part = utils_disk.get_disk_by_serial(device_str, session=session) if not added_part: test.fail("Unable to get disk with serial {}".format( device_str)) utils_disk.linux_disk_check(session, added_part) session.close() # Test domain save/restore/snapshot. if test_save_snapshot: save_file = os.path.join(data_dir.get_data_dir(), "%.save" % vm_name) check_save_restore(save_file) check_snapshot() if os.path.exists(save_file): os.remove(save_file) # Test libvirt doesn't pass the plaintext of chap password to qemu, # this function is implemented in libvirt 4.3.0-1. if (libvirt_version.version_compare(4, 3, 0) and (auth_uuid or auth_usage) and chap_passwd): if (check_auth_plaintext(vm_name, chap_passwd)): test.fail("Libvirt should not pass plaintext of chap " "password to qemu-kvm.") finally: # Close session. if 'session' in locals(): session.close() # Delete snapshots. libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup) # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Delete the tmp files. libvirt.setup_or_cleanup_iscsi(is_setup=False) # Clean up secret if secret_uuid: virsh.secret_undefine(secret_uuid)
def run(test, params, env): """ Test migration over unix socket. 1) Migrate vm over unix socket 2) Migrate vm over unix socket - libvirt tunnelled(--tunnelled) 3) Migrate vm over unix socket - enable multifd(--parallel) 4) Migrate vm with copy storage over unix socket - one disk 5) Migrate vm with copy storage over unix socket - multiple disks 6) Abort migration over unix socket and migrate again 7) Abort migration with copy storage over unix socket, and migrate again 8) Migrate vm with copy storage over unix socket - multiple disks - enable multifd(--parallel) :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def update_disk(vm, params): """ Update disk for testing. :param vm: vm object. :param params: the parameters used. :return: updated images. """ local_image_list = [] remote_image_list = [] vm_did_list = [] # Change the disk of the vm if storage_type == "nfs": libvirt.set_vm_disk(vm, params) else: disk_format = params.get("disk_format", "qcow2") disk_num = eval(params.get("disk_num", "1")) blk_source = vm.get_first_disk_devices()['source'] vsize = utils_misc.get_image_info(blk_source).get("vsize") remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') # Create disk on remote host utils_misc.make_dirs(os.path.dirname(blk_source), remote_session) libvirt_disk.create_disk("file", disk_format=disk_format, path=blk_source, size=vsize, session=remote_session) remote_image_list.append(blk_source) for idx in range(2, disk_num + 1): disk_path = os.path.join(os.path.dirname(blk_source), "test%s.img" % str(idx)) # Create disk on local libvirt_disk.create_disk("file", disk_format=disk_format, path=disk_path) local_image_list.append(disk_path) target_dev = 'vd' + chr(idx + ord('a') - 1) new_disk_dict = {"driver_type": disk_format} vm_was_running = vm.is_alive() libvirt_pcicontr.reset_pci_num(vm_name) if vm_was_running and not vm.is_alive(): vm.start() vm.wait_for_login().close() result = libvirt.attach_additional_device( vm_name, target_dev, disk_path, new_disk_dict, False) libvirt.check_exit_status(result) libvirt_disk.create_disk("file", disk_format=disk_format, path=disk_path, session=remote_session) remote_image_list.append(disk_path) vm_did_list.append(target_dev) remote_session.close() return local_image_list, remote_image_list, vm_did_list def check_socket(params): """ Check sockets' number :param params: the parameters used :raise: test.fail when command fails """ postcopy_options = params.get("postcopy_options") vm_name = params.get("migrate_main_vm") exp_num = params.get("expected_socket_num", "2") if postcopy_options: migration_test.set_migratepostcopy(vm_name) cmd = ("netstat -xanp|grep -E \"CONNECTED" ".*(desturi-socket|migrateuri-socket)\" | wc -l") res = process.run(cmd, shell=True).stdout_text.strip() if res != exp_num: test.fail("There should be {} connected unix sockets, " "but found {} sockets.".format(exp_num, res)) migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Local variables virsh_args = {"debug": True} server_ip = params["server_ip"] = params.get("remote_ip") server_user = params["server_user"] = params.get("remote_user", "root") server_pwd = params["server_pwd"] = params.get("remote_pwd") client_ip = params["client_ip"] = params.get("local_ip") client_pwd = params["client_pwd"] = params.get("local_pwd") virsh_options = params.get("virsh_options", "") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options", "--live --p2p --verbose") storage_type = params.get("storage_type") disk_num = params.get("disk_num") desturi_port = params.get("desturi_port", "22222") migrateuri_port = params.get("migrateuri_port", "33333") disks_uri_port = params.get("disks_uri_port", "44444") migrate_again = "yes" == params.get("migrate_again", "no") action_during_mig = params.get("action_during_mig") if action_during_mig: action_during_mig = eval(action_during_mig) extra_args = migration_test.update_virsh_migrate_extra_args(params) mig_result = None local_image_list = [] remote_image_list = [] vm_did_list = [] if not libvirt_version.version_compare(6, 6, 0): test.cancel("This libvirt version doesn't support " "migration over unix.") if storage_type == "nfs": # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # params for migration connection params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") dest_uri_ssh = libvirt_vm.complete_uri(params.get("migrate_dest_host")) unix_obj = None vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() bk_uri = vm.connect_uri postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: unix_obj = utils_conn.UNIXSocketConnection(params) unix_obj.conn_setup() unix_obj.auto_recover = True local_image_list, remote_image_list, vm_did_list = update_disk( vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) vm_session = vm.wait_for_login() for did in vm_did_list: utils_disk.linux_disk_check(vm_session, did) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=600, ignore_status=True, virsh_opt=virsh_options, func=action_during_mig, extra_opts=extra, **extra_args) mig_result = migration_test.ret if migrate_again: logging.debug( "Sleeping 10 seconds before rerunning the migration.") time.sleep(10) if params.get("migrate_again_clear_func", "yes") == "yes": action_during_mig = None extra_args["status_error"] = params.get( "migrate_again_status_error", "no") migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, func=action_during_mig, **extra_args) mig_result = migration_test.ret if int(mig_result.exit_status) == 0: vm.connect_uri = dest_uri_ssh if not utils_misc.wait_for( lambda: virsh.is_alive( vm_name, uri=dest_uri_ssh, debug=True), 60): test.fail("The migrated VM should be alive!") if vm_did_list: vm_session_after_mig = vm.wait_for_serial_login(timeout=240) for did in vm_did_list: vm_session_after_mig.cmd("echo mytest >> /mnt/%s1/mytest" % did) finally: logging.info("Recover test environment") vm.connect_uri = bk_uri # Clean VM on destination and source migration_test.cleanup_vm(vm, dest_uri) orig_config_xml.sync() # Remove image files for source_file in local_image_list: libvirt.delete_local_disk("file", path=source_file) for img in remote_image_list: remote.run_remote_cmd("rm -rf %s" % img, params)