def run_test(dev_type, params, test_obj=None): """ Test the connectivity of vm's interface 1) Start the vm with a interface 2) Check the network driver of VM's interface 3) Check the network connectivity 4) Destroy the VM """ # Setup Iface device vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_dict = interface_base.parse_iface_dict(params) iface_dev = interface_base.create_iface(dev_type, iface_dict) libvirt.add_vm_device(vmxml, iface_dev) logging.info("Start a VM with a '%s' type interface.", dev_type) vm.start() vm_session = vm.wait_for_serial_login(timeout=240) vm_iface_info = interface_base.get_vm_iface_info(vm_session) if params.get('vm_iface_driver'): if vm_iface_info.get('driver') != params.get('vm_iface_driver'): test.fail("VM iface should be {}, but got {}." .format(params.get('vm_iface_driver'), vm_iface_info.get('driver'))) logging.info("Check the network connectivity") check_points.check_network_accessibility( vm, test_obj=test_obj, **params) virsh.destroy(vm.name, **VIRSH_ARGS)
def setup_at_memory_to_vm_with_iface(dev_type): """ Prepare a vm with max memory, numa, and an interface :param dev_type: interface type """ test_env_obj = setup_test(dev_type) # Add interface device iface_dict = eval(params.get('iface_dict', '{}')) iface_dev = interface_base.create_iface(dev_type, iface_dict) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) libvirt.add_vm_device(vmxml, iface_dev) logging.debug("VM xml afater updating ifaces: %s.", vm_xml.VMXML.new_from_dumpxml(vm_name)) return test_env_obj
def update_iface_xml(vm_name, iface_dict): """ Update interfaces for guest :param vm_name: The name of VM :param iface_dict: The interface configurations params """ logging.debug("update iface xml") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml.remove_all_device_by_type('interface') vmxml.sync() iface = interface.Interface('network') iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict) libvirt.add_vm_device(vmxml, iface)
def setup_test_mem_device(vm, params, test): """ Setup steps for memory device :param vm: VM object :param params: dict, test parameters :param test: test object """ test.log.debug("Setup for testing memory device") guest_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) libvirt_cpu.add_cpu_settings(guest_xml, params) dimm_params = { k.replace('memdev_', ''): v for k, v in params.items() if k.startswith('memdev_') } dimm_xml = utils_hotplug.create_mem_xml(**dimm_params) libvirt.add_vm_device(guest_xml, dimm_xml) # Change the disk of the vm libvirt.set_vm_disk(vm, params)
def run_test(dev_type, params, test_obj=None): """ Test domain lifecycle 1) Start the vm and check network 2) Destroy and start the VM, and check network 3) Save and restore, and check network 4) Suspend and resume, and check network 5) Reboot the VM and check the network """ # Setup Iface device vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_dict = eval(params.get('iface_dict', '{}')) iface_dev = interface_base.create_iface(dev_type, iface_dict) libvirt.add_vm_device(vmxml, iface_dev) logging.info("Start a VM with a '%s' type interface.", dev_type) vm.start() vm.wait_for_serial_login(timeout=240).close() check_points.check_network_accessibility(vm, test_obj=test_obj, **params) logging.info("Destroy and start the VM.") virsh.destroy(vm.name, **VIRSH_ARGS) virsh.start(vm.name, **VIRSH_ARGS) check_points.check_network_accessibility(vm, test_obj=test_obj, config_vdpa=True, **params) logging.info("Save the VM.") save_error = "yes" == params.get("save_error", "no") save_path = os.path.join(data_dir.get_tmp_dir(), vm.name + '.save') res = virsh.save(vm.name, 'sss', debug=True) libvirt.check_exit_status(res, expect_error=save_error) if not save_error: logging.info("Restore vm.") virsh.restore(save_path, **VIRSH_ARGS) check_points.check_network_accessibility(vm, test_obj=test_obj, config_vdpa=False, **params) logging.info("Suspend and resume the vm.") virsh.suspend(vm.name, **VIRSH_ARGS) if not libvirt.check_vm_state(vm_name, "paused"): test.fail("VM should be paused!") virsh.resume(vm.name, **VIRSH_ARGS) if not libvirt.check_vm_state(vm_name, "running"): test.fail("VM should be running!") check_points.check_network_accessibility(vm, test_obj=test_obj, config_vdpa=False, **params) logging.debug("Reboot VM and check network.") virsh.reboot(vm.name, **VIRSH_ARGS) check_points.check_network_accessibility(vm, test_obj=test_obj, config_vdpa=False, **params)
def run(test, params, env): """ Test virsh migrate command. """ def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ if not result: test.error("No migration result is returned.") logging.info("Migration out: %s", result.stdout_text.strip()) logging.info("Migration error: %s", result.stderr_text.strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, result.stderr_text.strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, result.stderr_text.strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(result.stderr_text.strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") guest_src_url = params.get("guest_src_url") guest_src_path = params.get("guest_src_path", "/var/lib/libvirt/images/guest.img") check_disk = "yes" == params.get("check_disk") disk_model = params.get("disk_model") disk_target = params.get("disk_target", "vda") controller_model = params.get("controller_model") check_interface = "yes" == params.get("check_interface") iface_type = params.get("iface_type", "network") iface_model = params.get("iface_model", "virtio") iface_params = { 'type': iface_type, 'model': iface_model, 'del_addr': True, 'source': '{"network": "default"}' } check_memballoon = "yes" == params.get("check_memballoon") membal_model = params.get("membal_model") check_rng = "yes" == params.get("check_rng") rng_model = params.get("rng_model") migr_vm_back = "yes" == params.get("migrate_vm_back", "no") status_error = "yes" == params.get("status_error", "no") remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } xml_check_after_mig = params.get("guest_xml_check_after_mig") err_msg = params.get("err_msg") vm_session = None remote_virsh_session = None vm = None mig_result = None if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Make sure all of parameters are assigned a valid value check_parameters(test, params) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() migration_test = migration.MigrationTest() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # download guest source and update interface model to keep guest up if guest_src_url: blk_source = download.get_file(guest_src_url, guest_src_path) if not blk_source: test.error("Fail to download image.") params["blk_source_name"] = blk_source if (not check_interface) and iface_model: iface_dict = {'model': iface_model} libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict) if not check_disk: params["disk_model"] = "virtio-transitional" if check_interface: libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) if check_memballoon: membal_dict = {'membal_model': membal_model} dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.update_memballoon_xml(dom_xml, membal_dict) if check_rng: rng_dict = {'rng_model': rng_model} rng_xml = libvirt.create_rng_xml(rng_dict) dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.add_vm_device(dom_xml, rng_xml) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm_session = vm.wait_for_login(restart_network=True) check_vm_network_accessed() # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra) mig_result = migration_test.ret check_migration_res(mig_result) if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) target_guest_dumpxml = (remote_virsh_session.dumpxml( vm_name, debug=True, ignore_status=True).stdout_text.strip()) if check_disk: check_str = disk_model if disk_model else controller_model if check_interface: check_str = iface_model if check_memballoon: check_str = membal_model if check_rng: check_str = rng_model xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) remote_virsh_session.close_session() # Execute migration from remote if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination vm.connect_uri = '' migration_test.cleanup_dest_vm(vm, src_uri, dest_uri) logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) # Clean up of pre migration setup for local machine if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) if remote_virsh_session: remote_virsh_session.close_session() logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if guest_src_url and blk_source: libvirt.delete_local_disk("file", path=blk_source)
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of rng :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ def get_free_pci_slot(): """ Get a free slot from pcie-to-pci-bridge :return: The free slot """ used_slot = [] for dev in pci_devices: address = dev.find('address') if (address is not None and address.get('bus') == pci_bridge_index): used_slot.append(address.get('slot')) for slot_index in range(1, 30): slot = "%0#4x" % slot_index if slot not in used_slot: return slot return None def get_free_root_port(): """ Get a free root port for rng device :return: The bus index of free root port """ root_ports = set() other_ports = set() used_slot = set() # Record the bus indexes for all pci controllers for controller in pci_controllers: if controller.get('model') == 'pcie-root-port': root_ports.add(controller.get('index')) else: other_ports.add(controller.get('index')) # Record the addresses being allocated for all pci devices pci_devices = vmxml.xmltreefile.find('devices').getchildren() for dev in pci_devices: address = dev.find('address') if address is not None: used_slot.add(address.get('bus')) # Find the bus address unused for bus_index in root_ports: bus = "%0#4x" % int(bus_index) if bus not in used_slot: return bus # Add a new pcie-root-port if no free one for index in range(1, 30): if index not in (root_ports | other_ports): contr_dict = {'controller_type': 'pci', 'controller_index': index, 'controller_model': 'pcie-root-port'} cntl_add = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, cntl_add) return "%0#4x" % int(index) return None def check_plug_to(bus_type='pcie-to-pci-bridge'): """ Check if the nic is plugged onto pcie-to-pci-bridge :param bus_type: The bus type been expected to plug to :return True if plugged onto 'bus_type', otherwise False """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) rng = vmxml.xmltreefile.find('devices').find('rng') bus = int(eval(rng.find('address').get('bus'))) controllers = vmxml.get_controllers('pci') for controller in controllers: if controller.get('index') == bus: if controller.get('model') == bus_type: return True break return False def check_rng_inside_guest(): """ check rng device inside guest """ check_cmd = params['check_cmd'] lspci_output = session.cmd_output(check_cmd) session.cmd_output('pkill -9 hexdump') if 'No such file or directory' in lspci_output and device_exists: test.fail('Can not detect device by %s.' % check_cmd) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() guest_src_url = params.get("guest_src_url") virtio_model = params['virtio_model'] boot_with_rng = (params.get('boot_with_rng', 'yes') == 'yes') hotplug = (params.get('hotplug', 'no') == 'yes') device_exists = (params.get('device_exists', 'yes') == 'yes') plug_to = params.get('plug_to', '') if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Download and update image if required if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path # Add 'pcie-to-pci-bridge' if there is no one pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': pci_bridge = controller break else: contr_dict = {'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge'} pci_bridge = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, pci_bridge) pci_bridge_index = '%0#4x' % int(pci_bridge.get("index")) try: # Update nic and vm disks if (params["os_variant"] == 'rhel6' or 'rhel6' in params.get("shortname")): iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) # vmxml will not be updated since set_vm_disk # sync with another dumped xml inside the function vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Remove existed rng devices if there are rng_devs = vmxml.get_devices('rng') for rng in rng_devs: vmxml.del_device(rng) vmxml.xmltreefile.write() vmxml.sync() # General new rng xml per configurations rng_xml = libvirt.create_rng_xml({"rng_model": virtio_model}) if params.get('specify_addr', 'no') == 'yes': pci_devices = vmxml.xmltreefile.find('devices').getchildren() addr = rng_xml.new_rng_address() if plug_to == 'pcie-root-port': bus = get_free_root_port() addr.set_attrs({'bus': bus}) else: slot = get_free_pci_slot() addr.set_attrs({'bus': pci_bridge_index, 'slot': slot}) rng_xml.address = addr if boot_with_rng: # Add to vm if required libvirt.add_vm_device(vmxml, rng_xml) if not vm.is_alive(): vm.start() if hotplug: # Hotplug rng if required file_arg = rng_xml.xml with open(file_arg) as rng_file: logging.debug("Attach rng by XML: %s", rng_file.read()) s_attach = virsh.attach_device(vm_name, file_arg, debug=True) libvirt.check_exit_status(s_attach) check_plug_to(plug_to) session = vm.wait_for_login() check_rng_inside_guest() if hotplug: # Unplug rng if hotplugged previously vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) rng = vmxml.get_devices('rng')[0] file_arg = rng.xml with open(file_arg) as rng_file: logging.debug("Detach rng by XML: %s", rng_file.read()) s_detach = virsh.detach_device(vm_name, file_arg, debug=True) libvirt.check_exit_status(s_detach) if not hotplug: session.close() save_path = os.path.join( data_dir.get_tmp_dir(), '%s.save' % params['os_variant']) ret = virsh.save(vm_name, save_path) libvirt.check_exit_status(ret) ret = virsh.restore(save_path) libvirt.check_exit_status(ret) session = vm.wait_for_login() check_rng_inside_guest() process.run('rm -f %s' % save_path, ignore_status=True) finally: vm.destroy() backup_xml.sync()
def run(test, params, env): """ Test virtio/virtio-transitional/virtio-non-transitional model of vsock :param test: Test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge") guest_src_url = params.get("guest_src_url") virtio_model = params['virtio_model'] boot_with_vsock = (params.get('boot_with_vsock', 'yes') == 'yes') hotplug = (params.get('hotplug', 'no') == 'yes') addr_pattern = params['addr_pattern'] device_pattern = params['device_pattern'] if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") def check_vsock_inside_guest(): """ check vsock device inside guest """ lspci_cmd = 'lspci' lspci_output = session.cmd_output(lspci_cmd) device_str = re.findall(r'%s\s%s' % (addr_pattern, device_pattern), lspci_output) if not device_str: test.fail('lspci failed, no device "%s"' % device_pattern) # Download and replace image when guest_src_url provided if guest_src_url: image_name = params['image_path'] target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name) if not os.path.exists(target_path): download.get_file(guest_src_url, target_path) params["blk_source_name"] = target_path # Add pcie-to-pci-bridge when it is required if add_pcie_to_pci_bridge: pci_controllers = vmxml.get_controllers('pci') for controller in pci_controllers: if controller.get('model') == 'pcie-to-pci-bridge': break else: contr_dict = {'controller_type': 'pci', 'controller_model': 'pcie-to-pci-bridge'} cntl_add = libvirt.create_controller_xml(contr_dict) libvirt.add_controller(vm_name, cntl_add) # Generate xml for device vsock vsock_xml = libvirt.create_vsock_xml(virtio_model) if boot_with_vsock: # Add vsock xml to vm only when needed libvirt.add_vm_device(vmxml, vsock_xml) try: if (params["os_variant"] == 'rhel6' or 'rhel6' in params.get("shortname")): # Update interface to virtio-transitional mode for # rhel6 guest to make it works for login iface_params = {'model': 'virtio-transitional'} libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) libvirt.set_vm_disk(vm, params) if hotplug: file_arg = vsock_xml.xml with open(file_arg) as vsock_file: logging.debug("Attach vsock by XML: %s", vsock_file.read()) s_attach = virsh.attach_device(vm_name, file_arg, debug=True) libvirt.check_exit_status(s_attach) if add_pcie_to_pci_bridge: # Check device should be plug to right bus virtio_transitional_base.check_plug_to(vm_name, 'vsock') session = vm.wait_for_login() check_vsock_inside_guest() if hotplug: with open(file_arg) as vsock_file: logging.debug("Detach vsock by XML: %s", vsock_file.read()) s_detach = virsh.detach_device(vm_name, file_arg, debug=True) libvirt.check_exit_status(s_detach) finally: vm.destroy() backup_xml.sync()
def run(test, params, env): """ Test migration with memory related configuration :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") remote_ip = params.get("remote_ip") remote_user = params.get("remote_user") remote_pwd = params.get("remote_pwd") local_ip = params.get("local_ip") local_pwd = params.get("local_pwd") ballooned_mem = params.get("ballooned_mem") check = params.get("check") remove_dict = {} src_libvirt_file = None remote_virsh_dargs = { 'remote_ip': remote_ip, 'remote_user': remote_user, 'remote_pwd': remote_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") options = params.get("virsh_migrate_options", "--live --verbose") func_params_exists = "yes" == params.get("func_params_exists", "yes") log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd_daemons.log") check_str_local_log = params.get("check_str_local_log", "") libvirtd_conf_dict = eval(params.get("libvirtd_conf_dict", '{}')) func_name = None libvirtd_conf = None mig_result = None # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() extra_args = {} if func_params_exists: extra_args.update({'func_params': params}) # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: if check == "mem_balloon": # Update memory balloon device to correct model membal_dict = { 'membal_model': 'virtio', 'membal_stats_period': '10' } libvirt.update_memballoon_xml(new_xml, membal_dict) if check == "mem_device": libvirt_cpu.add_cpu_settings(new_xml, params) dimm_params = { k.replace('memdev_', ''): v for k, v in params.items() if k.startswith('memdev_') } dimm_xml = utils_hotplug.create_mem_xml(**dimm_params) libvirt.add_vm_device(new_xml, dimm_xml) logging.debug(virsh.dumpxml(vm_name)) # Change the disk of the vm libvirt.set_vm_disk(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) # Update libvirtd configuration if libvirtd_conf_dict: if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) logging.debug("Update libvirtd configuration file") conf_type = params.get("conf_type", "libvirtd") if conf_type == "libvirtd" and utils_split_daemons.is_modular_daemon( ): conf_type = "virtqemud" libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, conf_type) try: if not vm.is_alive(): vm.start() except virt_vm.VMStartError as e: logging.info("Failed to start VM") test.fail("Failed to start VM: %s" % vm_name) logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm.wait_for_login(restart_network=True).close() migration_test.ping_vm(vm, params) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=func_name, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if int(mig_result.exit_status) == 0: migration_test.ping_vm(vm, params, uri=dest_uri) if check_str_local_log: libvirt.check_logfile(check_str_local_log, log_file) if check == "mem_balloon": remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) remote_virsh_session.setmem(vm_name, ballooned_mem, None, None, False, "", **virsh_args) def check_mem_balloon(): """Check if memory balloon worked""" memstat_ouput = remote_virsh_session.dommemstat( vm_name, "", **virsh_args) memstat_after = memstat_ouput.stdout_text mem_after = memstat_after.splitlines()[0].split()[1] if mem_after != ballooned_mem: logging.debug("Current memory size is: %s" % mem_after) return False return True check_ret = utils_misc.wait_for(check_mem_balloon, timeout=20) if not check_ret: test.fail("Memory is not ballooned to the expected size: %s" % ballooned_mem) remote_virsh_session.close_session() # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=remote_ip, username=remote_user, password=remote_pwd) if check == "mem_device": qemu_checks = params.get('qemu_checks', '').split('`') logging.debug("qemu_checks:%s" % qemu_checks[0]) for qemu_check in qemu_checks: libvirt.check_qemu_cmd_line(qemu_check, False, params, runner_on_target) if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=remote_ip, server_pwd=remote_pwd, client_ip=local_ip, client_pwd=local_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine src_full_uri = libvirt_vm.complete_uri( params.get("migrate_source_host")) migration_test.migrate_pre_setup(src_full_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_full_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination and source try: migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri) if vm.is_alive(): vm.destroy(gracefully=False) except Exception as err: logging.error(err) logging.info("Recovery VM XML configration") orig_config_xml.sync() if libvirtd_conf: logging.debug("Recover the configurations") libvirt.customize_libvirt_config(None, is_recover=True, config_object=libvirtd_conf) if src_libvirt_file: src_libvirt_file.restore()
def run(test, params, env): """ Test virsh migrate command. """ migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") guest_src_url = params.get("guest_src_url") guest_src_path = params.get("guest_src_path", "/var/lib/libvirt/images/guest.img") check_disk = "yes" == params.get("check_disk") disk_model = params.get("disk_model") disk_target = params.get("disk_target", "vda") controller_model = params.get("controller_model") check_interface = "yes" == params.get("check_interface") iface_type = params.get("iface_type", "network") iface_model = params.get("iface_model", "virtio") iface_params = {'type': iface_type, 'model': iface_model, 'del_addr': True, 'source': '{"network": "default"}'} check_memballoon = "yes" == params.get("check_memballoon") membal_model = params.get("membal_model") check_rng = "yes" == params.get("check_rng") rng_model = params.get("rng_model") migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") status_error = "yes" == params.get("status_error", "no") remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} remote_dargs = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd, 'file_path': "/etc/libvirt/libvirt.conf"} xml_check_after_mig = params.get("guest_xml_check_after_mig") err_msg = params.get("err_msg") vm_session = None remote_virsh_session = None vm = None mig_result = None remove_dict = {} remote_libvirt_file = None src_libvirt_file = None if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") extra_args = migration_test.update_virsh_migrate_extra_args(params) vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # download guest source and update interface model to keep guest up if guest_src_url: blk_source = download.get_file(guest_src_url, guest_src_path) if not blk_source: test.error("Fail to download image.") params["blk_source_name"] = blk_source if (not check_interface) and iface_model: iface_dict = {'model': iface_model} libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict) if not check_disk: params["disk_model"] = "virtio-transitional" if check_interface: libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) if check_memballoon: membal_dict = {'membal_model': membal_model} dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.update_memballoon_xml(dom_xml, membal_dict) if check_rng: rng_dict = {'rng_model': rng_model} rng_xml = libvirt.create_rng_xml(rng_dict) dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.add_vm_device(dom_xml, rng_xml) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm_session = vm.wait_for_login(restart_network=True) migration_test.ping_vm(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, **extra_args) mig_result = migration_test.ret if int(mig_result.exit_status) == 0: migration_test.ping_vm(vm, params, dest_uri) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) target_guest_dumpxml = (remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True) .stdout_text.strip()) if check_disk: check_str = disk_model if disk_model else controller_model if check_interface: check_str = iface_model if check_memballoon: check_str = membal_model if check_rng: check_str = rng_model xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) remote_virsh_session.close_session() # Execute migration from remote if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)} remote_libvirt_file = libvirt_config\ .remove_key_for_modular_daemon(remove_dict, remote_dargs) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination migration_test.cleanup_vm(vm, dest_uri) logging.info("Recover VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if src_libvirt_file: src_libvirt_file.restore() if remote_libvirt_file: del remote_libvirt_file # Clean up of pre migration setup for local machine if migrate_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) if remote_virsh_session: remote_virsh_session.close_session() logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if guest_src_url and blk_source: libvirt.delete_local_disk("file", path=blk_source)
def run_test_iface_acpi(case): """ Test acpi setting of interface :param case: test case """ if not utils_misc.compare_qemu_version(6, 1, 0, is_rhev=False) and \ machine_type == 'q35': test.cancel('This test is not supported on q35 ' 'by qemu until v6.1.0') acpi_index = params.get('acpi_index') iface_in_vm = params.get('iface_in_vm') iface_attrs = eval(params.get('iface_attrs')) # Create interface with acpi setting iface_acpi = interface.Interface('network') iface_acpi.setup_attrs(**iface_attrs) logging.debug('New interface with acpi: %s', iface_acpi) # Setup vm features with acpi if there is not features = vmxml.features feature_acpi = 'acpi' if not features.has_feature(feature_acpi): features.add_feature(feature_acpi) vmxml.features = features # Prepare vmxml for further test vmxml.remove_all_device_by_type('interface') vmxml.sync() # Test vm with iface with acpi if case == 'inplace': libvirt.add_vm_device(vmxml, iface_acpi) vm.start() # Test hotplug iface with acpi elif case == 'hotplug': vm.start() virsh.attach_device(vm_name, iface_acpi.xml, **VIRSH_ARGS) # Test boundary/negative values of acpi setting elif case == 'value_test': vm.start() vm.wait_for_serial_login().close() at_result = virsh.attach_device(vm_name, iface_acpi.xml, debug=True) libvirt.check_exit_status(at_result, status_error) if status_error: libvirt.check_result(at_result, expect_str) return # Check acpi setting in iface after adding/attaching to vm new_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) new_iface = new_vmxml.get_devices('interface')[0] logging.debug('Interface with acpi in vm: %s', new_iface) if new_iface.xmltreefile.find('acpi') is None: test.fail('acpi not found in Interface') if new_iface.acpi.get('index') != acpi_index: test.fail('Index of acpi check failed, should be %s' % acpi_index) # Check acpi inside vm session = vm.wait_for_serial_login() ip_output = session.cmd_output('ip l') logging.debug(ip_output) vm_ifaces = utils_net.get_linux_ifname(session) if iface_in_vm in vm_ifaces: logging.info('Found Interface %s in vm', iface_in_vm) else: test.fail('Interface %s not found in vm' % iface_in_vm) # Hotunplug iface virsh.detach_device(vm_name, new_iface.xml, **VIRSH_ARGS) vm_ifaces = utils_net.get_linux_ifname(session) if iface_in_vm in vm_ifaces: test.fail('Interface %s should be removed from vmxml' % iface_in_vm) new_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if new_vmxml.get_devices('interface'): test.fail('Interface should be removed.')