def check_vm_state_after_abort(vm_name, vm_state_after_abort, src_uri, dest_uri, test): """ Check the VM state after domjobabort the migration :param vm_name: str, vm name :param vm_state_after_abort: str, like "{'source': 'running', 'target': 'nonexist'}" source: local host, target: remote host :param src_uri: uri for source host :param dest_uri: uri for target host :param test: test object """ state_dict = eval(vm_state_after_abort) logging.debug("Check guest state should be {} on source host".format( state_dict['source'])) libvirt.check_vm_state(vm_name, state=state_dict['source'], uri=src_uri) logging.debug("Check guest persistent on source host") cmd_res = virsh.domstats(vm_name, '--list-persistent', debug=True, ignore_status=False) if not cmd_res.stdout_text.count(vm_name): test.fail( "The guest is expected to be persistent on source host, but it isn't" ) logging.debug("Check guest state should be {} on target host".format( state_dict['target'])) if state_dict['target'] == 'nonexist': if virsh.domain_exists(vm_name, uri=dest_uri): test.fail("The domain on target host is found, but expected not") else: libvirt.check_vm_state(vm_name, state=state_dict['target'], uri=dest_uri)
def test_save_restore_hostdev_device_with_teaming(): logging.info("Save/restore VM.") save_file = os.path.join(data_dir.get_tmp_dir(), "save_file") virsh.save(vm_name, save_file, debug=True, ignore_status=False, timeout=10) if not libvirt.check_vm_state(vm_name, "shut off"): test.fail("The guest should be down after executing 'virsh save'.") virsh.restore(save_file, debug=True, ignore_status=False) if not libvirt.check_vm_state(vm_name, "running"): test.fail( "The guest should be running after executing 'virsh restore'.") vm.cleanup_serial_console() vm.create_serial_console() vm_session = vm.wait_for_serial_login() ping_ip = get_ping_dest(vm_session, mac_addr) check_vm_network_accessed(vm_session, ping_dest=ping_ip, tcpdump_iface=bridge_name, tcpdump_status_error=True) logging.info("Detach the hostdev device.") hostdev_dev = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).devices.\ by_device_tag("hostdev") virsh.detach_device(vm_name, hostdev_dev.xml, wait_remove_event=True, debug=True, ignore_status=False) check_hostdev = vm_xml.VMXML.new_from_dumpxml(vm_name)\ .devices.by_device_tag('hostdev') if check_hostdev: test.fail("The hostdev device exists after detaching %s." % check_hostdev) check_vm_network_accessed(vm_session, 2, ping_dest=ping_ip, tcpdump_iface=bridge_name, tcpdump_status_error=False) logging.info("Attach the hostdev device.") virsh.attach_device(vm_name, hostdev_dev.xml, debug=True, ignore_status=False) check_vm_network_accessed(vm_session, ping_dest=ping_ip, tcpdump_iface=bridge_name, tcpdump_status_error=True)
def post_migration_check(self, vms, params, uptime, uri=None): """ Validating migration by performing checks in this method * uptime of the migrated vm > uptime of vm before migration * ping vm from target host * check vm state after migration :param vms: VM objects of migrating vms :param uptime: uptime dict of vms before migration :param uri: target virsh uri :return: updated dict of uptime """ vm_state = params.get("virsh_migrated_state", "running") ping_count = int(params.get("ping_count", 10)) for vm in vms: if not libvirt.check_vm_state(vm.name, vm_state, uri=uri): raise exceptions.TestFail("Migrated VMs failed to be in %s " "state at destination" % vm_state) logging.info("Guest state is '%s' at destination is as expected", vm_state) if "offline" not in params.get("migrate_options"): vm_uptime = vm.uptime(connect_uri=uri) logging.info("uptime of migrated VM %s: %s", vm.name, vm_uptime) if vm_uptime < uptime[vm.name]: raise exceptions.TestFail("vm went for a reboot during " "migration") self.ping_vm(vm, params, uri=uri, ping_count=ping_count) # update vm uptime to check when migrating back uptime[vm.name] = vm_uptime vm.verify_dmesg(connect_uri=uri) return uptime
def _run_complex_func(vm, one_func, virsh_event_session=None): """ Run a function based on a dict definition :param vm: the VM object :param one_func: the function to be executed :param virsh_event_session: VirshSession to collect domain events :raises: exceptions.TestError if any error happens """ logging.debug("Handle function invoking:%s", one_func) before_vm_pause = 'yes' == one_func.get('before_pause', 'no') after_event = one_func.get('after_event') before_event = one_func.get('before_event') func = one_func.get('func') if after_event and not virsh_event_session: raise exceptions.TestError( "virsh session for collecting domain " "events is not provided") if after_event: logging.debug("Below events are received:" "%s", virsh_event_session.get_stripped_output()) if not utils_misc.wait_for( lambda: re.findall( after_event, virsh_event_session.get_stripped_output()), 30): raise exceptions.TestError("Unable to find " "event {}".format(after_event)) logging.debug("Receive the event '{}'".format(after_event)) # If 'before_event' is provided, then 'after_event' must be provided if before_event and re.findall( before_event, virsh_event_session.get_stripped_output()): raise exceptions.TestError("The function '{}' should " "be run before the event " "'{}', but the event has " "been received".format( func, before_event)) # Check if VM state is paused if before_vm_pause and libvirt.check_vm_state( vm.name, 'paused', uri=desturi): raise exceptions.TestError("The function '{}' should " "be run before VM is paused, " "but VM is already " "paused".format(func)) func_param = one_func.get("func_param") if func_param: #one_param_dict = args['multi_func_params'][func] logging.debug("Run function {} with " "parameters '{}'".format(func, func_param)) self.func_ret.update({func: func(func_param)}) else: logging.debug("Run function {}".format(func)) self.func_ret.update({func: func()})
def test_save_restore_hostdev_iface_with_teaming(): logging.info("Save/restore VM.") save_file = os.path.join(data_dir.get_tmp_dir(), "save_file") virsh.save(vm_name, save_file, debug=True, ignore_status=False, timeout=10) if not libvirt.check_vm_state(vm_name, "shut off"): test.fail("The guest should be down after executing 'virsh save'.") virsh.restore(save_file, debug=True, ignore_status=False) if not libvirt.check_vm_state(vm_name, "running"): test.fail( "The guest should be running after executing 'virsh restore'.") vm.cleanup_serial_console() vm.create_serial_console() vm_session = vm.wait_for_serial_login() check_vm_network_accessed(vm_session, tcpdump_iface=bridge_name, tcpdump_status_error=True)
def guest_lifecycle(): if operation == "suspend": # Suspend logging.info("Performing VM Suspend with device pass-through") result = virsh.suspend(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) libvirt.check_vm_state(vm_name, 'paused') time.sleep(10) # Resume logging.info("Performing VM Resume with device pass-through") result = virsh.resume(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) libvirt.check_vm_state(vm_name, 'running') elif operation == "shutdown": # Shutdown and Start the VM try: logging.info("Performing VM Shutdown with device pass-through") vm.shutdown() vm.wait_for_shutdown() libvirt.check_vm_state(vm_name, 'shut off') logging.info("Performing VM Start with device pass-through") vm.start() libvirt.check_vm_state(vm_name, 'running') vm.wait_for_login().close() except virt_vm.VMStartError as detail: test.fail("VM failed to start." "Error: %s" % str(detail)) elif operation == "reboot": # Reboot logging.info("Performing VM Reboot with device pass-through") result = virsh.reboot(vm_name, ignore_status=True, debug=True) if supported_err in result.stderr.strip(): logging.info("Reboot is not supported") else: libvirt.check_exit_status(result) else: logging.debug("No operation for the domain") if sorted(vm.get_pci_devices()) != sorted(nic_list_before): logging.debug("Adapter found after lifecycle operation") else: test.fail("Passthroughed adapter not found after lifecycle operation")
def check_state(): try: return libvirt.check_vm_state(dest_vm_name, state, uri=uri) except Exception: return False
def post_migration_check(self, vms, params, uptime=None, uri=None): """ Validating migration by performing checks in this method * check vm state after migration * uptime of the migrated vm > uptime of vm before migration * ping vm from target host by setting "check_network_accessibility_after_mig" to "yes" * As default, check system disk on the migrated vm * check disk operations on the migrated VM by setting "check_disk_after_mig" to "yes" :param vms: VM objects of migrating vms :param uptime: uptime dict of vms before migration :param uri: target virsh uri :return: updated dict of uptime """ vm_state = params.get("virsh_migrated_state", "running") for vm in vms: if not libvirt.check_vm_state(vm.name, vm_state, uri=uri): raise exceptions.TestFail("Migrated VMs failed to be in %s " "state at destination" % vm_state) logging.info("Guest state is '%s' at destination is as expected", vm_state) if "offline" not in params.get( "migrate_options", params.get("virsh_migrate_options", "")): if uptime: vm_uptime = vm.uptime(connect_uri=uri) logging.info("uptime of migrated VM %s: %s", vm.name, vm_uptime) if vm_uptime < uptime[vm.name]: raise exceptions.TestFail( "vm went for a reboot during " "migration") # update vm uptime to check when migrating back uptime[vm.name] = vm_uptime vm.verify_dmesg(connect_uri=uri) if params.get("check_network_accessibility_after_mig", "no") == "yes": ping_count = int(params.get("ping_count", 10)) self.ping_vm(vm, params, uri=uri, ping_count=ping_count) if params.get("simple_disk_check_after_mig", 'yes') == "yes": backup_uri, vm.connect_uri = vm.connect_uri, uri vm.create_serial_console() vm_session_after_mig = vm.wait_for_serial_login( timeout=360) vm_session_after_mig.cmd( "echo libvirt_simple_disk_check >> /tmp/libvirt_simple_disk_check" ) vm_session_after_mig.close() vm.connect_uri = backup_uri if params.get("check_disk_after_mig", "no") == "yes": disk_kname = params.get("check_disk_kname_after_mig", "vdb") backup_uri, vm.connect_uri = vm.connect_uri, uri vm.create_serial_console() vm_session_after_mig = vm.wait_for_serial_login( timeout=360) utils_disk.linux_disk_check(vm_session_after_mig, disk_kname) vm_session_after_mig.close() vm.connect_uri = backup_uri return uptime
def run_test(dev_type, params, test_obj=None): """ Test domain lifecycle 1) Start the vm and check network 2) Destroy and start the VM, and check network 3) Save and restore, and check network 4) Suspend and resume, and check network 5) Reboot the VM and check the network """ # Setup Iface device vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_dict = eval(params.get('iface_dict', '{}')) iface_dev = interface_base.create_iface(dev_type, iface_dict) libvirt.add_vm_device(vmxml, iface_dev) logging.info("Start a VM with a '%s' type interface.", dev_type) vm.start() vm.wait_for_serial_login(timeout=240).close() check_points.check_network_accessibility(vm, test_obj=test_obj, **params) logging.info("Destroy and start the VM.") virsh.destroy(vm.name, **VIRSH_ARGS) virsh.start(vm.name, **VIRSH_ARGS) check_points.check_network_accessibility(vm, test_obj=test_obj, config_vdpa=True, **params) logging.info("Save the VM.") save_error = "yes" == params.get("save_error", "no") save_path = os.path.join(data_dir.get_tmp_dir(), vm.name + '.save') res = virsh.save(vm.name, 'sss', debug=True) libvirt.check_exit_status(res, expect_error=save_error) if not save_error: logging.info("Restore vm.") virsh.restore(save_path, **VIRSH_ARGS) check_points.check_network_accessibility(vm, test_obj=test_obj, config_vdpa=False, **params) logging.info("Suspend and resume the vm.") virsh.suspend(vm.name, **VIRSH_ARGS) if not libvirt.check_vm_state(vm_name, "paused"): test.fail("VM should be paused!") virsh.resume(vm.name, **VIRSH_ARGS) if not libvirt.check_vm_state(vm_name, "running"): test.fail("VM should be running!") check_points.check_network_accessibility(vm, test_obj=test_obj, config_vdpa=False, **params) logging.debug("Reboot VM and check network.") virsh.reboot(vm.name, **VIRSH_ARGS) check_points.check_network_accessibility(vm, test_obj=test_obj, config_vdpa=False, **params)
def run(test, params, env): """ Test virsh migrate command. """ def cleanup_vm(vm, vm_name='', uri=''): """ Clean up vm in the src or destination host environment when doing the uni-direction migration. """ # Backup vm name and uri uri_bak = vm.connect_uri vm_name_bak = vm.name # Destroy and undefine vm vm.connect_uri = uri if uri else uri_bak vm.name = vm_name if vm_name else vm_name_bak logging.info("Cleaning up VM %s on %s", vm.name, vm.connect_uri) if vm.is_alive(): vm.destroy() if vm.is_persistent(): vm.undefine() # Restore vm connect_uri vm.connect_uri = uri_bak vm.name = vm_name_bak # Check whether there are unset parameters for v in list(itervalues(params)): if isinstance(v, string_types) and v.count("EXAMPLE"): test.cancel("Please set real value for %s" % v) # Params for virsh migrate options: live_migration = params.get("live_migration") == "yes" offline_migration = params.get("offline_migration") == "yes" persistent = params.get("persistent") == "yes" undefinesource = params.get("undefinesource") == "yes" p2p = params.get("p2p") == "yes" tunnelled = params.get("tunnelled") == "yes" postcopy = params.get("postcopy") == "yes" dname = params.get("dname") xml_option = params.get("xml_option") == "yes" persistent_xml_option = params.get("persistent_xml_option") == "yes" extra_options = params.get("virsh_migrate_extra", "") if live_migration and not extra_options.count("--live"): extra_options = "%s --live" % extra_options if offline_migration and not extra_options.count("--offline"): extra_options = "%s --offline" % extra_options if persistent and not extra_options.count("--persistent"): extra_options = "%s --persistent" % extra_options if undefinesource and not extra_options.count("--undefinesource"): extra_options = "%s --undefinesource" % extra_options if p2p and not extra_options.count("--p2p"): extra_options = "%s --p2p" % extra_options if tunnelled and not extra_options.count("--tunnelled"): extra_options = "%s --tunnelled" % extra_options if tunnelled and not extra_options.count("--p2p"): extra_options = "%s --p2p" % extra_options if postcopy and not extra_options.count("--postcopy"): extra_options = "%s --postcopy" % extra_options if dname and not extra_options.count("--dname"): extra_options = "%s --dname %s" % (extra_options, dname) if xml_option: pass if persistent_xml_option and not extra_options.count("--persistent"): extra_options = "%s --persistent" % extra_options if persistent_xml_option: pass # Set param migrate_options in case it is used somewhere: params.setdefault("migrate_options", extra_options) # Params for postcopy migration postcopy_timeout = int(params.get("postcopy_migration_timeout", "180")) # Params for migrate hosts: server_cn = params.get("server_cn") client_cn = params.get("client_cn") migrate_source_host = client_cn if client_cn else params.get( "migrate_source_host") migrate_dest_host = server_cn if server_cn else params.get( "migrate_dest_host") # Params for migrate uri transport = params.get("transport", "tls") transport_port = params.get("transport_port") uri_port = ":%s" % transport_port if transport_port else '' hypervisor_driver = params.get("hypervisor_driver", "qemu") hypervisor_mode = params.get("hypervisor_mode", 'system') if "virsh_migrate_desturi" not in list(params.keys()): params["virsh_migrate_desturi"] = "%s+%s://%s%s/%s" % ( hypervisor_driver, transport, migrate_dest_host, uri_port, hypervisor_mode) if "virsh_migrate_srcuri" not in list(params.keys()): params["virsh_migrate_srcuri"] = "%s:///%s" % (hypervisor_driver, hypervisor_mode) dest_uri = params.get("virsh_migrate_desturi") src_uri = params.get("virsh_migrate_srcuri") # Params for src vm cfg: src_vm_cfg = params.get("src_vm_cfg") src_vm_status = params.get("src_vm_status") with_graphic_passwd = params.get("with_graphic_passwd") graphic_passwd = params.get("graphic_passwd") # For test result check cancel_exception = False fail_exception = False exception = False result_check_pass = True # Objects(SSH, TLS and TCP, etc) to be cleaned up in finally objs_list = [] # VM objects for migration test vms = [] try: # Get a MigrationTest() Object logging.debug("Get a MigrationTest() object") obj_migration = migration.MigrationTest() # Setup libvirtd remote connection TLS connection env if transport == "tls": tls_obj = TLSConnection(params) # Setup CA, server(on dest host) and client(on src host) tls_obj.conn_setup() # Add tls_obj to objs_list objs_list.append(tls_obj) # Enable libvirtd remote connection transport port if transport == 'tls': transport_port = '16514' elif transport == 'tcp': transport_port = '16509' obj_migration.migrate_pre_setup(dest_uri, params, ports=transport_port) # Back up vm name for recovery in finally vm_name_backup = params.get("migrate_main_vm") # Get a vm object for migration logging.debug("Get a vm object for migration") vm = env.get_vm(vm_name_backup) # Back up vm xml for recovery in finally logging.debug("Backup vm xml before migration") vm_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if not vm_xml_backup: test.error("Backing up xmlfile failed.") # Prepare shared disk in vm xml for live migration: # Change the source of the first disk of vm to shared disk if live_migration: logging.debug("Prepare shared disk in vm xml for live migration") storage_type = params.get("storage_type") if storage_type == 'nfs': logging.debug("Prepare nfs shared disk in vm xml") nfs_mount_dir = params.get("nfs_mount_dir") libvirt.update_vm_disk_source(vm.name, nfs_mount_dir) libvirt.update_vm_disk_driver_cache(vm.name, driver_cache="none") else: # TODO:Other storage types test.cancel("Other storage type is not supported for now") pass # Prepare graphic password in vm xml if with_graphic_passwd in ["yes", "no"]: logging.debug("Set VM graphic passwd in vm xml") # Get graphics list in vm xml vmxml_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) graphics_list = vmxml_tmp.get_graphics_devices if not graphics_list: # Add spice graphic with passwd to vm xml logging.debug("Add spice graphic to vm xml") graphics.Graphics.add_graphic(vm.name, graphic_passwd, "spice") elif graphic_passwd: # Graphics already exist in vm xml and passwd is required # Add passwd to the first graphic device in vm xml logging.debug("Add graphic passwd to vm xml") vm_xml.VMXML.add_security_info(vmxml_tmp, graphic_passwd) vmxml_tmp.sync() else: # Graphics already exist in vm xml and non-passwd is required # Do nothing here as passwd has been removed by new_from_inactive_dumpxml() pass # Prepare for required src vm status. logging.debug("Turning %s into certain state.", vm.name) if src_vm_status == "running" and not vm.is_alive(): vm.start() elif src_vm_status == "shut off" and not vm.is_dead(): vm.destroy() # Prepare for required src vm persistency. logging.debug("Prepare for required src vm persistency") if src_vm_cfg == "persistent" and not vm.is_persistent(): logging.debug("Make src vm persistent") vm_xml_backup.define() elif src_vm_cfg == "transient" and vm.is_persistent(): logging.debug("Make src vm transient") vm.undefine() # Prepare for postcopy migration: install and run stress in VM if postcopy and src_vm_status == "running": logging.debug( "Install and run stress in vm for postcopy migration") pkg_name = 'stress' # Get a vm session logging.debug("Get a vm session") vm_session = vm.wait_for_login() if not vm_session: test.error("Can't get a vm session successfully") # Install package stress if it is not installed in vm logging.debug( "Check if stress tool is installed for postcopy migration") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) # Run stress in vm logging.debug("Run stress in vm") stress_args = params.get("stress_args") vm_session.cmd('stress %s' % stress_args) # Prepare for --xml <updated_xml_file>. if xml_option: logging.debug("Preparing new xml file for --xml option.") # Get the vm xml vmxml_tmp = vm_xml.VMXML.new_from_dumpxml( vm.name, "--security-info --migratable") # Update something in the xml file: e.g. title # Note: VM ABI shall not be broken when migrating with updated_xml updated_title = "VM Title in updated xml" vmxml_tmp.title = updated_title # Add --xml to migrate extra_options extra_options = ("%s --xml=%s" % (extra_options, vmxml_tmp.xml)) # Prepare for --persistent-xml <updated_xml_file>. if persistent_xml_option: logging.debug( "Preparing new xml file for --persistent-xml option.") # Get the vm xml vmxml_persist_tmp = vm_xml.VMXML.new_from_inactive_dumpxml( vm.name, "--security-info") # Update something in the xml file: e.g. title # Note: VM ABI shall not be broken when migrating with updated_xml updated_persist_title = "VM Title in updated persist xml" vmxml_persist_tmp.title = updated_persist_title # Add --persistent-xml to migrate extra_options extra_options = ("%s --persistent-xml=%s" % (extra_options, vmxml_persist_tmp.xml)) # Prepare host env: clean up vm on dest host logging.debug("Clean up vm on dest host before migration") if dname: cleanup_vm(vm, dname, dest_uri) else: cleanup_vm(vm, vm.name, dest_uri) # Prepare host env: set selinux state before migration logging.debug("Set selinux to enforcing before migration") utils_selinux.set_status(params.get("selinux_state", "enforcing")) # Check vm network connectivity by ping before migration logging.debug("Check vm network before migration") if src_vm_status == "running": obj_migration.ping_vm(vm, params) # Get VM uptime before migration if src_vm_status == "running": vm_uptime = vm.uptime() logging.info("Check VM uptime before migration: %s", vm_uptime) # Print vm active xml before migration process.system_output("virsh dumpxml %s --security-info" % vm.name, shell=True) # Print vm inactive xml before migration process.system_output("virsh dumpxml %s --security-info --inactive" % vm.name, shell=True) # Do uni-direction migration. # NOTE: vm.connect_uri will be set to dest_uri once migration is complete successfully logging.debug("Start to do migration test.") vms.append(vm) if postcopy: # Monitor the qemu monitor event of "postcopy-active" for postcopy migration logging.debug( "Monitor the qemu monitor event for postcopy migration") virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True) cmd = "qemu-monitor-event --loop --domain %s --event MIGRATION" % vm.name virsh_session.sendline(cmd) # Do live migration and switch to postcopy by "virsh migrate-postcopy" logging.debug("Start to do postcopy migration") obj_migration.do_migration(vms, src_uri, dest_uri, "orderly", options="", thread_timeout=postcopy_timeout, ignore_status=True, func=virsh.migrate_postcopy, extra_opts=extra_options, shell=True) # Check migration result obj_migration.check_result(obj_migration.ret, params) # Check "postcopy-active" event after postcopy migration logging.debug( "Check postcopy-active event after postcopy migration") virsh_session.send_ctrl("^C") events_output = virsh_session.get_stripped_output() logging.debug("events_output are %s", events_output) pattern = "postcopy-active" if not re.search(pattern, events_output): test.fail("Migration didn't switch to postcopy mode") virsh_session.close() virsh_session.close() else: logging.debug("Start to do precopy migration") obj_migration.do_migration(vms, src_uri, dest_uri, "orderly", options="", ignore_status=True, extra_opts=extra_options) # Check migration result obj_migration.check_result(obj_migration.ret, params) """ # Check src vm after migration # First, update vm name and connect_uri to src vm's """ vm.name = vm_name_backup vm.connect_uri = src_uri logging.debug("Start to check %s state on src %s after migration.", vm.name, src_uri) # Check src vm status after migration: existence, running, shutoff, etc logging.debug("Check vm status on source after migration") if offline_migration: if src_vm_status == "shut off" and undefinesource: if vm.exists(): result_check_pass = False logging.error( "Src vm should not exist after offline migration" " with --undefinesource") logging.debug("Src vm state is %s" % vm.state()) elif not libvirt.check_vm_state( vm.name, src_vm_status, uri=vm.connect_uri): result_check_pass = False logging.error("Src vm should be %s after offline migration" % src_vm_status) logging.debug("Src vm state is %s" % vm.state()) if live_migration: if not undefinesource and src_vm_cfg == "persistent": if not libvirt.check_vm_state( vm.name, "shut off", uri=vm.connect_uri): result_check_pass = False logging.error( "Src vm should be shutoff after live migration") logging.debug("Src vm state is %s" % vm.state()) elif vm.exists(): result_check_pass = False logging.error("Src vm should not exist after live migration") logging.debug("Src vm state is %s" % vm.state()) # Check src vm status after migration: persistency logging.debug("Check vm persistency on source after migration") if src_vm_cfg == "persistent" and not undefinesource: if not vm.is_persistent(): # Src vm should be persistent after migration without --undefinesource result_check_pass = False logging.error("Src vm should be persistent after migration") elif vm.is_persistent(): result_check_pass = False logging.error("Src vm should be not be persistent after migration") """ # Check dst vm after migration # First, update vm name and connect_uri to dst vm's """ vm.name = dname if dname else vm.name vm.connect_uri = dest_uri logging.debug("Start to check %s state on target %s after migration.", vm.name, vm.connect_uri) # Check dst vm status after migration: running, shutoff, etc logging.debug("Check vm status on target after migration") if live_migration: if not libvirt.check_vm_state( vm.name, src_vm_status, uri=vm.connect_uri): result_check_pass = False logging.error("Dst vm should be %s after live migration", src_vm_status) elif vm.is_alive(): result_check_pass = False logging.error("Dst vm should not be alive after offline migration") # Print vm active xml after migration process.system_output("virsh -c %s dumpxml %s --security-info" % (vm.connect_uri, vm.name), shell=True) # Print vm inactive xml after migration process.system_output( "virsh -c %s dumpxml %s --security-info --inactive" % (vm.connect_uri, vm.name), shell=True) # Check dst vm xml after migration logging.debug("Check vm xml on target after migration") remote_virsh = virsh.Virsh(uri=vm.connect_uri) vmxml_active_tmp = vm_xml.VMXML.new_from_dumpxml( vm.name, "--security-info", remote_virsh) vmxml_inactive_tmp = vm_xml.VMXML.new_from_inactive_dumpxml( vm.name, "--security-info", remote_virsh) # Check dst vm xml after migration: --xml <updated_xml_file> if xml_option and not offline_migration: logging.debug("Check vm active xml for --xml") if not vmxml_active_tmp.title == updated_title: print("vmxml active tmp title is %s" % vmxml_active_tmp.title) result_check_pass = False logging.error("--xml doesn't take effect in migration") if xml_option and offline_migration: logging.debug("Check vm inactive xml for --xml") if not vmxml_active_tmp.title == updated_title: result_check_pass = False logging.error("--xml doesn't take effect in migration") # Check dst vm xml after migration: --persistent-xml <updated_xml_file> if persistent_xml_option: logging.debug("Check vm inactive xml for --persistent-xml") if not offline_migration and not vmxml_inactive_tmp.title == updated_persist_title: print("vmxml inactive tmp title is %s" % vmxml_inactive_tmp.title) result_check_pass = False logging.error( "--persistent-xml doesn't take effect in live migration") elif offline_migration and vmxml_inactive_tmp.title == updated_persist_title: result_check_pass = False logging.error( "--persistent-xml should not take effect in offline " "migration") # Check dst vm xml after migration: graphic passwd if with_graphic_passwd == "yes": logging.debug("Check graphic passwd in vm xml after migration") graphic_active = vmxml_active_tmp.devices.by_device_tag( 'graphics')[0] graphic_inactive = vmxml_inactive_tmp.devices.by_device_tag( 'graphics')[0] try: logging.debug("Check graphic passwd in active vm xml") if graphic_active.passwd != graphic_passwd: result_check_pass = False logging.error( "Graphic passwd in active xml of dst vm should be %s", graphic_passwd) logging.debug("Check graphic passwd in inactive vm xml") if graphic_inactive.passwd != graphic_passwd: result_check_pass = False logging.error( "Graphic passwd in inactive xml of dst vm should be %s", graphic_passwd) except LibvirtXMLNotFoundError: result_check_pass = False logging.error("Graphic passwd lost in dst vm xml") # Check dst vm uptime, network, etc after live migration if live_migration: # Check dst VM uptime after migration # Note: migrated_vm_uptime should be greater than the vm_uptime got # before migration migrated_vm_uptime = vm.uptime(connect_uri=dest_uri) logging.info( "Check VM uptime in destination after " "migration: %s", migrated_vm_uptime) if not migrated_vm_uptime: result_check_pass = False logging.error("Failed to check vm uptime after migration") elif vm_uptime > migrated_vm_uptime: result_check_pass = False logging.error( "VM went for a reboot while migrating to destination") # Check dst VM network connectivity after migration logging.debug("Check VM network connectivity after migrating") obj_migration.ping_vm(vm, params, uri=dest_uri) # Restore vm.connect_uri as it is set to src_uri in ping_vm() logging.debug( "Restore vm.connect_uri as it is set to src_uri in ping_vm()") vm.connect_uri = dest_uri # Check dst vm status after migration: persistency logging.debug("Check vm persistency on target after migration") if persistent: if not vm.is_persistent(): result_check_pass = False logging.error("Dst vm should be persistent after migration " "with --persistent") time.sleep(10) # Destroy vm and check vm state should be shutoff. BZ#1076354 vm.destroy() if not libvirt.check_vm_state( vm.name, "shut off", uri=vm.connect_uri): result_check_pass = False logging.error( "Dst vm with name %s should exist and be shutoff", vm.name) elif vm.is_persistent(): result_check_pass = False logging.error("Dst vm should not be persistent after migration " "without --persistent") finally: logging.debug("Start to clean up env") # Clean up vm on dest and src host for vm in vms: cleanup_vm(vm, vm_name=dname, uri=dest_uri) cleanup_vm(vm, vm_name=vm_name_backup, uri=src_uri) # Recover source vm defination (just in case). logging.info("Recover vm defination on source") if vm_xml_backup: vm_xml_backup.define() # Clean up SSH, TCP, TLS test env if objs_list and len(objs_list) > 0: logging.debug("Clean up test env: SSH, TCP, TLS, etc") for obj in objs_list: obj.auto_recover = True obj.__del__() # Disable libvirtd remote connection transport port obj_migration.migrate_pre_setup(dest_uri, params, cleanup=True, ports=transport_port) # Check test result. if not result_check_pass: test.fail("Migration succeed, but some check points didn't pass." "Please check the error log for details")
def run(test, params, env): """ Run the test :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ libvirt_version.is_libvirt_feature_supported(params) vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() bk_uri = vm.connect_uri migration_test = migration.MigrationTest() migration_test.check_parameters(params) extra_args = migration_test.update_virsh_migrate_extra_args(params) extra = params.get("virsh_migrate_extra") postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") options = params.get("virsh_migrate_options", "--live --p2p --persistent --verbose") virsh_options = params.get("virsh_options", "") migrate_again = "yes" == params.get("migrate_again", "no") src_state = params.get("virsh_migrate_src_state", "shut off") set_src_and_dst_hostname = "yes" == params.get("set_src_and_dst_hostname", "no") src_hostname = params.get("src_hostname") dst_hostname = params.get("dst_hostname") server_ip = params.get("remote_ip") server_user = params.get("remote_user", "root") server_pwd = params.get("remote_pwd") server_params = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd } dst_session = None dst_libvirtd = None src_libvirtd = None src_NM_service = None dest_NM_service = None old_dst_hostname = None old_source_hostname = None # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Update guest disk xml libvirt.set_vm_disk(vm, params) if set_src_and_dst_hostname: old_dst_hostname = get_hostname(test, remote_params=server_params) old_source_hostname = get_hostname(test) dst_session = remote_old.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") dst_libvirtd = utils_libvirtd.Libvirtd(session=dst_session) src_libvirtd = utils_libvirtd.Libvirtd() try: # Check local NetworkManager service NM = utils_path.find_command("NetworkManager") except utils_path.CmdNotFoundError: logging.debug("No NetworkManager command found on source.") NM = None if NM is not None: # Stop local NetworkManager service src_NM_service = service.Factory.create_service( "NetworkManager") if src_NM_service is not None: stop_NM(src_NM_service) set_hostname(src_hostname, test) src_libvirtd.restart() # Check remote NetworkManager service check_cmd = "rpm -q NetworkManager" check_result = remote_old.run_remote_cmd(check_cmd, server_params, ignore_status=False) if check_result.exit_status: logging.debug("No NetworkManager command found on target.") else: # Stop remote NetworkManager service remote_runner = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) runner = remote_runner.run dest_NM_service = service.Factory.create_service( "NetworkManager", run=runner) if dest_NM_service is not None: stop_NM(dest_NM_service) set_hostname(dst_hostname, test, remote_params=server_params) dst_libvirtd.restart() if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) vm.wait_for_login() # Execute migration process migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, None, extra_args) func_returns = dict(migration_test.func_ret) migration_test.func_ret.clear() logging.debug("Migration returns function results:%s", func_returns) if migrate_again: if not vm.is_alive(): vm.start() vm.wait_for_login() extra_args['status_error'] = params.get( "migrate_again_status_error", "no") if params.get("virsh_migrate_extra_mig_again"): extra = params.get("virsh_migrate_extra_mig_again") migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, None, extra_args) if int(migration_test.ret.exit_status) == 0: migration_test.post_migration_check([vm], params, uri=dest_uri) if not libvirt.check_vm_state(vm_name, state=src_state, uri=bk_uri): test.fail("Can't get the expected vm state '%s'" % src_state) finally: logging.info("Recover test environment") vm.connect_uri = bk_uri # Clean VM on destination and source migration_test.cleanup_vm(vm, dest_uri) if set_src_and_dst_hostname: if src_NM_service is not None: start_NM(src_NM_service) if dest_NM_service is not None: start_NM(dest_NM_service) set_hostname(old_dst_hostname, test, remote_params=server_params) if dst_libvirtd: dst_libvirtd.restart() if dst_session: dst_session.close() set_hostname(old_source_hostname, test) if src_libvirtd: src_libvirtd.restart() orig_config_xml.sync()