def run_migration_back(params, test): """ Execute migration back from target host to source host :param params: dict, test parameters :param test: test object """ migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") vm_name = params.get("migrate_main_vm") options = params.get("virsh_migrate_options", "--live --verbose") if migrate_vm_back: ssh_connection = utils_conn.SSHConnection( server_ip=params.get("client_ip"), server_pwd=params.get("local_pwd"), client_ip=params.get("server_ip"), client_pwd=params.get("server_pwd")) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine src_full_uri = libvirt_vm.complete_uri( params.get("migrate_source_host")) migration_test = migration.MigrationTest() migration_test.migrate_pre_setup(src_full_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_full_uri) test.log.debug("Start migration: %s", cmd) runner_on_target = remote.RemoteRunner( host=params.get("remote_ip"), username=params.get("remote_user"), password=params.get("remote_pwd")) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) test.log.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) else: test.log.debug("No need to migrate back")
def run(test, params, env): """ Test the command virsh maxvcpus (1) Call virsh maxvcpus (2) Call virsh -c remote_uri maxvcpus (3) Call virsh maxvcpus with an unexpected option """ # get the params from subtests. # params for general. option = params.get("virsh_maxvcpus_options") status_error = params.get("status_error") connect_arg = params.get("connect_arg", "") # params for transport connect. local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP") local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD") server_ip = params.get("remote_ip", local_ip) server_pwd = params.get("remote_pwd", local_pwd) transport_type = params.get("connect_transport_type", "local") transport = params.get("connect_transport", "ssh") # check the config if (connect_arg == "transport" and transport_type == "remote" and local_ip.count("ENTER")): raise error.TestNAError("Parameter local_ip is not configured" "in remote test.") if (connect_arg == "transport" and transport_type == "remote" and local_pwd.count("ENTER")): raise error.TestNAError("Parameter local_pwd is not configured" "in remote test.") if connect_arg == "transport": canonical_uri_type = virsh.driver() if transport == "ssh": ssh_connection = utils_conn.SSHConnection(server_ip=server_ip, server_pwd=server_pwd, client_ip=local_ip, client_pwd=local_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() connect_uri = libvirt_vm.get_uri_with_transport( uri_type=canonical_uri_type, transport=transport, dest_ip=server_ip) else: connect_uri = connect_arg # Run test case result = virsh.maxvcpus(option, uri=connect_uri, ignore_status=True, debug=True) maxvcpus_test = result.stdout.strip() status = result.exit_status # Check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successed with unsupported option!") else: logging.info("Run failed with unsupported option %s " % option) elif status_error == "no": if status == 0: if "kqemu" in option: if not maxvcpus_test == '1': raise error.TestFail("Command output %s is not expected " "for %s " % (maxvcpus_test, option)) elif option == 'qemu' or option == '--type qemu' or option == '': if not maxvcpus_test == '16': raise error.TestFail("Command output %s is not expected " "for %s " % (maxvcpus_test, option)) else: # No check with other types pass else: raise error.TestFail("Run command failed")
def run(test, params, env): """ Test the command virsh maxvcpus (1) Call virsh maxvcpus (2) Call virsh -c remote_uri maxvcpus (3) Call virsh maxvcpus with an unexpected option """ # get the params from subtests. # params for general. option = params.get("virsh_maxvcpus_options") status_error = params.get("status_error") connect_arg = params.get("connect_arg", "") # params for transport connect. local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP") local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD") server_ip = params.get("remote_ip", local_ip) server_pwd = params.get("remote_pwd", local_pwd) transport_type = params.get("connect_transport_type", "local") transport = params.get("connect_transport", "ssh") connect_uri = None # check the config if (connect_arg == "transport" and transport_type == "remote" and local_ip.count("ENTER")): raise exceptions.TestSkipError("Parameter local_ip is not configured " "in remote test.") if (connect_arg == "transport" and transport_type == "remote" and local_pwd.count("ENTER")): raise exceptions.TestSkipError("Parameter local_pwd is not configured " "in remote test.") if connect_arg == "transport": canonical_uri_type = virsh.driver() if transport == "ssh": ssh_connection = utils_conn.SSHConnection(server_ip=server_ip, server_pwd=server_pwd, client_ip=local_ip, client_pwd=local_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() connect_uri = libvirt_vm.get_uri_with_transport( uri_type=canonical_uri_type, transport=transport, dest_ip=server_ip) virsh_dargs = { 'remote_ip': server_ip, 'remote_user': '******', 'remote_pwd': server_pwd, 'ssh_remote_auth': True } virsh_instance = virsh.VirshPersistent(**virsh_dargs) else: connect_uri = connect_arg virsh_instance = virsh if libvirt_version.version_compare(2, 3, 0): try: maxvcpus = None maxvcpus_cap = None dom_capabilities = None # make sure we take maxvcpus from right host, helps in case remote try: dom_capabilities = domcap.DomCapabilityXML( virsh_instance=virsh_instance) maxvcpus = dom_capabilities.max logging.debug( "maxvcpus calculate from domcapabilities " "is %s", maxvcpus) except Exception as details: raise exceptions.TestFail("Failed to get maxvcpus from " "domcapabilities xml:\n%s" % dom_capabilities) try: cap_xml = capability_xml.CapabilityXML() maxvcpus_cap = cap_xml.get_guest_capabilities()['hvm'][ platform.machine()]['maxcpus'] logging.debug('maxvcpus_cap is %s', maxvcpus_cap) except Exception as details: logging.debug( "Failed to get maxvcpu from virsh " "capabilities: %s", details) # Let's fall back in case of failure maxvcpus_cap = maxvcpus if not maxvcpus: raise exceptions.TestFail("Failed to get max value for vcpu" "from domcapabilities " "xml:\n%s" % dom_capabilities) except Exception as details: raise exceptions.TestFail( "Failed get the virsh instance with uri: " "%s\n Details: %s" % (connect_uri, details)) is_arm = "aarch" in platform.machine() gic_version = '' if is_arm: for gic_enum in domcap.DomCapabilityXML()['features']['gic_enums']: if gic_enum['name'] == "version": gic_version = gic_enum['values'][0].get_value() # Run test case result = virsh.maxvcpus(option, uri=connect_uri, ignore_status=True, debug=True) maxvcpus_test = result.stdout.strip() status = result.exit_status # Check status_error if status_error == "yes": if status == 0: raise exceptions.TestFail("Run succeeded with unsupported option!") else: logging.info("Run failed with unsupported option %s " % option) elif status_error == "no": if status == 0: if not libvirt_version.version_compare(2, 3, 0): if "kqemu" in option: if not maxvcpus_test == '1': raise exceptions.TestFail("Command output %s is not " "expected for %s " % (maxvcpus_test, option)) elif option in ['qemu', '--type qemu', '']: if not maxvcpus_test == '16': raise exceptions.TestFail("Command output %s is not " "expected for %s " % (maxvcpus_test, option)) else: # No check with other types pass else: # It covers all possible combinations if option in [ 'qemu', 'kvm', '--type qemu', '--type kvm', 'kqemu', '--type kqemu', '' ]: if (is_arm and gic_version == '2' and option in ['kvm', '']): if not maxvcpus_test == '8': raise exceptions.TestFail( "Command output %s is not " "expected for %s " % (maxvcpus_test, option)) elif not (maxvcpus_test == maxvcpus or maxvcpus_test == maxvcpus_cap): raise exceptions.TestFail("Command output %s is not " "expected for %s " % (maxvcpus_test, option)) else: # No check with other types pass else: raise exceptions.TestFail("Run command failed")
def kdump_enable(vm, vm_name, crash_kernel_prob_cmd, kernel_param_cmd, kdump_enable_cmd, timeout): """ Check, configure and enable the kdump in guest. :param vm_name: vm name :param crash_kernel_prob_cmd: check kdume loaded :param kernel_param_cmd: the param add into kernel line for kdump :param kdump_enable_cmd: enable kdump command :param timeout: Timeout in seconds """ kdump_cfg_path = vm.params.get("kdump_cfg_path", "/etc/kdump.conf") kdump_config = vm.params.get("kdump_config") vmcore_path = vm.params.get("vmcore_path", "/var/crash") kdump_method = vm.params.get("kdump_method", "basic") kdump_propagate_cmd = vm.params.get("kdump_propagate_cmd", 'kdumpctl propagate') kdump_enable_timeout = int(vm.params.get("kdump_enable_timeout", 360)) error_context.context("Try to log into guest '%s'." % vm_name, LOG_JOB.info) session = vm.wait_for_login(timeout=timeout) error_context.context( "Checking the existence of crash kernel in %s" % vm_name, LOG_JOB.info) try: session.cmd(crash_kernel_prob_cmd) except Exception: error_context.context("Crash kernel is not loaded. Trying to load it", LOG_JOB.info) session.cmd(kernel_param_cmd) session = vm.reboot(session, timeout=timeout) if kdump_config: if kdump_method == "ssh": host_ip = utils_net.get_ip_address_by_interface( vm.params.get('netdst')) kdump_config = kdump_config % (host_ip, vmcore_path) error_context.context("Configuring the Core Collector...", LOG_JOB.info) session.cmd("cat /dev/null > %s" % kdump_cfg_path) session.cmd("echo 'core_collector makedumpfile -F -c -d 31' > %s" % kdump_cfg_path) for config_line in kdump_config.split(";"): config_cmd = "echo -e '%s' >> %s " config_con = config_line.strip() session.cmd(config_cmd % (config_con, kdump_cfg_path)) if kdump_method == "ssh": host_pwd = vm.params.get("host_pwd", "redhat") guest_pwd = vm.params.get("guest_pwd", "redhat") guest_ip = vm.get_address() error_context.context("Setup ssh login without password...", LOG_JOB.info) session.cmd("rm -rf /root/.ssh/*") ssh_connection = utils_conn.SSHConnection(server_ip=host_ip, server_pwd=host_pwd, client_ip=guest_ip, client_pwd=guest_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() LOG_JOB.info("Trying to propagate with command '%s'", kdump_propagate_cmd) session.cmd(kdump_propagate_cmd, timeout=120) error_context.context("Enabling kdump service...", LOG_JOB.info) # the initrd may be rebuilt here so we need to wait a little more session.cmd(kdump_enable_cmd, timeout=kdump_enable_timeout) return session
def run(test, params, env): """ Test command: virsh connect. """ def unix_transport_setup(): """ Setup a unix connect to local libvirtd. """ shutil.copy(libvirtd_conf_path, libvirtd_conf_bak_path) with open(libvirtd_conf_path, 'r') as libvirtdconf_file: line_list = libvirtdconf_file.readlines() conf_dict = { r'auth_unix_rw\s*=': 'auth_unix_rw="none"\n', } for key in conf_dict: pattern = key conf_line = conf_dict[key] flag = False for index in range(len(line_list)): line = line_list[index] if not re.search(pattern, line): continue else: line_list[index] = conf_line flag = True break if not flag: line_list.append(conf_line) with open(libvirtd_conf_path, 'w') as libvirtdconf_file: libvirtdconf_file.writelines(line_list) # restart libvirtd service utils_libvirtd.libvirtd_restart() def unix_transport_recover(): """ Recover the libvirtd on local. """ if os.path.exists(libvirtd_conf_bak_path): shutil.copy(libvirtd_conf_bak_path, libvirtd_conf_path) utils_libvirtd.libvirtd_restart() # get the params from subtests. # params for general. connect_arg = params.get("connect_arg", "") connect_opt = params.get("connect_opt", "") status_error = params.get("status_error", "no") # params for transport connect. local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP") local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD") transport_type = params.get("connect_transport_type", "local") transport = params.get("connect_transport", "ssh") client_ip = local_ip client_pwd = local_pwd server_ip = local_ip server_pwd = local_pwd # params special for tls connect. server_cn = params.get("connect_server_cn", "TLSServer") client_cn = params.get("connect_client_cn", "TLSClient") # params special for tcp connect. tcp_port = params.get("tcp_port", '16509') # params special for unix transport. libvirtd_conf_path = '/etc/libvirt/libvirtd.conf' libvirtd_conf_bak_path = '%s/libvirtd.conf.bak' % data_dir.get_tmp_dir() # special params for test connection alive alive = params.get('alive', None) if alive: check_virsh_connect_alive(test, params) return # check the config if (connect_arg == "transport" and transport_type == "remote" and local_ip.count("ENTER")): test.cancel("Parameter local_ip is not configured" "in remote test.") if (connect_arg == "transport" and transport_type == "remote" and local_pwd.count("ENTER")): test.cancel("Parameter local_pwd is not configured" "in remote test.") # In Ubuntu libvirt_lxc available in /usr/lib/libvirt if (connect_arg.count("lxc") and (not (os.path.exists("/usr/libexec/libvirt_lxc") or os.path.exists("/usr/lib/libvirt/libvirt_lxc")))): test.cancel("Connect test of lxc:/// is not suggested on " "the host with no lxc driver.") if connect_arg.count("xen") and (not os.path.exists("/var/run/xend")): test.cancel("Connect test of xen:/// is not suggested on " "the host with no xen driver.") if connect_arg.count("qemu") and (not os.path.exists("/dev/kvm")): test.cancel("Connect test of qemu:/// is not suggested" "on the host with no qemu driver.") if connect_arg == "transport": canonical_uri_type = virsh.driver() if transport == "ssh": ssh_connection = utils_conn.SSHConnection(server_ip=server_ip, server_pwd=server_pwd, client_ip=client_ip, client_pwd=client_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() connect_uri = libvirt_vm.get_uri_with_transport( uri_type=canonical_uri_type, transport=transport, dest_ip=server_ip) elif transport == "tls": tls_connection = utils_conn.TLSConnection(server_ip=server_ip, server_pwd=server_pwd, client_ip=client_ip, client_pwd=client_pwd, server_cn=server_cn, client_cn=client_cn, special_cn='yes') tls_connection.conn_setup() connect_uri = libvirt_vm.get_uri_with_transport( uri_type=canonical_uri_type, transport=transport, dest_ip=server_cn) elif transport == "tcp": tcp_connection = utils_conn.TCPConnection(server_ip=server_ip, server_pwd=server_pwd, tcp_port=tcp_port) tcp_connection.conn_setup() connect_uri = libvirt_vm.get_uri_with_transport( uri_type=canonical_uri_type, transport=transport, dest_ip="%s:%s" % (server_ip, tcp_port)) elif transport == "unix": unix_transport_setup() connect_uri = libvirt_vm.get_uri_with_transport( uri_type=canonical_uri_type, transport=transport, dest_ip="") else: test.cancel("Configuration of transport=%s is " "not recognized." % transport) else: connect_uri = connect_arg try: try: uri = do_virsh_connect(connect_uri, connect_opt) # connect successfully if status_error == "yes": test.fail("Connect successfully in the " "case expected to fail.") # get the expect uri when connect argument is "" if connect_uri == "": connect_uri = virsh.canonical_uri().split()[-1] logging.debug("expected uri is: %s", connect_uri) logging.debug("actual uri after connect is: %s", uri) if not uri == connect_uri: test.fail("Command exit normally but the uri is " "not set as expected.") except process.CmdError as detail: if status_error == "no": test.fail("Connect failed in the case expected" "to success.\n" "Error: %s" % detail) finally: if transport == "unix": unix_transport_recover() if transport == "tcp": tcp_connection.conn_recover() if transport == "tls": tls_connection.conn_recover()
def run(test, params, env): """ Test remote access with TCP, TLS connection """ test_dict = dict(params) vm_name = test_dict.get("main_vm") vm = env.get_vm(vm_name) start_vm = test_dict.get("start_vm", "no") # Server and client parameters server_ip = test_dict.get("server_ip") server_user = test_dict.get("server_user") server_pwd = test_dict.get("server_pwd") client_ip = test_dict.get("client_ip") client_user = test_dict.get("client_user") client_pwd = test_dict.get("client_pwd") server_cn = test_dict.get("server_cn") client_cn = test_dict.get("client_cn") target_ip = test_dict.get("target_ip", "") # generate remote IP if target_ip == "": if server_cn: target_ip = server_cn elif server_ip: target_ip = server_ip else: target_ip = target_ip remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } # Ceph disk parameters driver = test_dict.get("test_driver", "qemu") transport = test_dict.get("transport") plus = test_dict.get("conn_plus", "+") source_type = test_dict.get("vm_disk_source_type", "file") virsh_options = test_dict.get("virsh_options", "--verbose --live") vol_name = test_dict.get("vol_name") disk_src_protocol = params.get("disk_source_protocol") source_file = test_dict.get("disk_source_file") disk_format = test_dict.get("disk_format", "qcow2") mon_host = params.get("mon_host") ceph_key_opt = "" attach_disk = False # Disk XML file disk_xml = None # Define ceph_disk conditional variable ceph_disk = "yes" == test_dict.get("ceph_disk") # For --postcopy enable postcopy_options = test_dict.get("postcopy_options") if postcopy_options and not virsh_options.count(postcopy_options): virsh_options = "%s %s" % (virsh_options, postcopy_options) test_dict['virsh_options'] = virsh_options # For bi-directional and tls reverse test uri_port = test_dict.get("uri_port", ":22") uri_path = test_dict.get("uri_path", "/system") src_uri = test_dict.get("migration_source_uri", "qemu:///system") uri = "%s%s%s://%s%s%s" % (driver, plus, transport, target_ip, uri_port, uri_path) test_dict["desuri"] = uri # Make sure all of parameters are assigned a valid value check_parameters(test, test_dict) # Set up SSH key #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22) remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") remote_session.close() #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22) # Set up remote ssh key and remote /etc/hosts file for bi-direction migration migrate_vm_back = "yes" == test_dict.get("migrate_vm_back", "no") if migrate_vm_back: ssh_key.setup_remote_ssh_key(server_ip, server_user, server_pwd) ssh_key.setup_remote_known_hosts_file(client_ip, server_ip, server_user, server_pwd) # Reset Vm state if needed if vm.is_alive() and start_vm == "no": vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Setup migration context migrate_setup = migration.MigrationTest() migrate_setup.migrate_pre_setup(test_dict["desuri"], params) # Install ceph-common on remote host machine. remote_ssh_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r"[\#\$]\s*$") if not utils_package.package_install(["ceph-common"], remote_ssh_session): test.error("Failed to install required packages on remote host") remote_ssh_session.close() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Get initial Selinux config flex bit LOCAL_SELINUX_ENFORCING_STATUS = utils_selinux.get_status() logging.info("previous local enforce :%s", LOCAL_SELINUX_ENFORCING_STATUS) cmd_result = remote.run_remote_cmd('getenforce', params, runner_on_target) REMOTE_SELINUX_ENFORCING_STATUS = cmd_result.stdout_text logging.info("previous remote enforce :%s", REMOTE_SELINUX_ENFORCING_STATUS) if ceph_disk: logging.info( "Put local SELinux in permissive mode when test ceph migrating" ) utils_selinux.set_status("enforcing") logging.info("Put remote SELinux in permissive mode") cmd = "setenforce enforcing" cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) if status: test.Error("Failed to set SELinux " "in permissive mode") # Prepare ceph disk. key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") test_dict['key_file'] = key_file test_dict['first_disk'] = vm.get_first_disk_devices() ceph_key_opt, secret_uuid = prepare_ceph_disk( test_dict, remote_virsh_dargs, test, runner_on_target) host_ip = test_dict.get('mon_host') disk_image = test_dict.get('disk_img') # Build auth information. auth_attrs = {} auth_attrs['auth_user'] = params.get("auth_user") auth_attrs['secret_type'] = params.get("secret_type") auth_attrs['secret_uuid'] = secret_uuid build_disk_xml(vm_name, disk_format, host_ip, disk_src_protocol, vol_name, disk_image, auth=auth_attrs) vm_xml_cxt = process.run("virsh dumpxml %s" % vm_name, shell=True).stdout_text logging.debug("The VM XML with ceph disk source: \n%s", vm_xml_cxt) try: if vm.is_dead(): vm.start() except virt_vm.VMStartError as e: logging.info("Failed to start VM") test.fail("Failed to start VM: %s" % vm_name) # Ensure the same VM name doesn't exist on remote host before migrating. destroy_vm_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(cmd, params, runner_on_target) # Trigger migration migrate_vm(test, test_dict) if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migrate_setup.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migrating: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) logging.info(output) if status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target) test.fail("Failed to run '%s' on remote: %s" % (cmd, output)) finally: logging.info("Recovery test environment") # Clean up of pre migration setup for local machine if migrate_vm_back: migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True) # Ensure VM can be cleaned up on remote host even migrating fail. destroy_vm_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_vm_cmd, params, runner_on_target) logging.info("Recovery VM XML configuration") vmxml_backup.sync() logging.debug("The current VM XML:\n%s", vmxml_backup.xmltreefile) # Clean up ceph environment. if disk_src_protocol == "rbd": # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) # Clean up dirty secrets on remote host if testing involve in ceph auth. client_name = test_dict.get('client_name') client_key = test_dict.get("client_key") if client_name and client_key: try: remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs) remote_dirty_secret_list = get_secret_list(remote_virsh) for dirty_secret_uuid in remote_dirty_secret_list: remote_virsh.secret_undefine(dirty_secret_uuid) except (process.CmdError, remote.SCPError) as detail: test.Error(detail) finally: remote_virsh.close_session() # Delete the disk if it exists. disk_src_name = "%s/%s" % (vol_name, test_dict.get('disk_img')) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, ceph_key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) if LOCAL_SELINUX_ENFORCING_STATUS: logging.info("Restore SELinux in original mode") utils_selinux.set_status(LOCAL_SELINUX_ENFORCING_STATUS) if REMOTE_SELINUX_ENFORCING_STATUS: logging.info("Put remote SELinux in original mode") cmd = "yes yes | setenforce %s" % REMOTE_SELINUX_ENFORCING_STATUS remote.run_remote_cmd(cmd, params, runner_on_target) # Remove known hosts on local host cmd = "ssh-keygen -R %s" % server_ip process.run(cmd, ignore_status=True, shell=True) # Remove known hosts on remote host cmd = "ssh-keygen -R %s" % client_ip remote.run_remote_cmd(cmd, params, runner_on_target)
def run(test, params, env): """ Test virsh migrate command. """ def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ if not result: test.error("No migration result is returned.") logging.info("Migration out: %s", result.stdout_text.strip()) logging.info("Migration error: %s", result.stderr_text.strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, result.stderr_text.strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, result.stderr_text.strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(result.stderr_text.strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") guest_src_url = params.get("guest_src_url") guest_src_path = params.get("guest_src_path", "/var/lib/libvirt/images/guest.img") check_disk = "yes" == params.get("check_disk") disk_model = params.get("disk_model") disk_target = params.get("disk_target", "vda") controller_model = params.get("controller_model") check_interface = "yes" == params.get("check_interface") iface_type = params.get("iface_type", "network") iface_model = params.get("iface_model", "virtio") iface_params = { 'type': iface_type, 'model': iface_model, 'del_addr': True, 'source': '{"network": "default"}' } check_memballoon = "yes" == params.get("check_memballoon") membal_model = params.get("membal_model") check_rng = "yes" == params.get("check_rng") rng_model = params.get("rng_model") migr_vm_back = "yes" == params.get("migrate_vm_back", "no") status_error = "yes" == params.get("status_error", "no") remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } xml_check_after_mig = params.get("guest_xml_check_after_mig") err_msg = params.get("err_msg") vm_session = None remote_virsh_session = None vm = None mig_result = None if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Make sure all of parameters are assigned a valid value check_parameters(test, params) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() migration_test = migration.MigrationTest() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # download guest source and update interface model to keep guest up if guest_src_url: blk_source = download.get_file(guest_src_url, guest_src_path) if not blk_source: test.error("Fail to download image.") params["blk_source_name"] = blk_source if (not check_interface) and iface_model: iface_dict = {'model': iface_model} libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict) if not check_disk: params["disk_model"] = "virtio-transitional" if check_interface: libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) if check_memballoon: membal_dict = {'membal_model': membal_model} dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.update_memballoon_xml(dom_xml, membal_dict) if check_rng: rng_dict = {'rng_model': rng_model} rng_xml = libvirt.create_rng_xml(rng_dict) dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.add_vm_device(dom_xml, rng_xml) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm_session = vm.wait_for_login(restart_network=True) check_vm_network_accessed() # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra) mig_result = migration_test.ret check_migration_res(mig_result) if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) target_guest_dumpxml = (remote_virsh_session.dumpxml( vm_name, debug=True, ignore_status=True).stdout_text.strip()) if check_disk: check_str = disk_model if disk_model else controller_model if check_interface: check_str = iface_model if check_memballoon: check_str = membal_model if check_rng: check_str = rng_model xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) remote_virsh_session.close_session() # Execute migration from remote if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination vm.connect_uri = '' migration_test.cleanup_dest_vm(vm, src_uri, dest_uri) logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) # Clean up of pre migration setup for local machine if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) if remote_virsh_session: remote_virsh_session.close_session() logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if guest_src_url and blk_source: libvirt.delete_local_disk("file", path=blk_source)
def run(test, params, env): """ Test the command virsh maxvcpus (1) Call virsh maxvcpus (2) Call virsh -c remote_uri maxvcpus (3) Call virsh maxvcpus with an unexpected option """ # get the params from subtests. # params for general. option = params.get("virsh_maxvcpus_options") status_error = params.get("status_error") connect_arg = params.get("connect_arg", "") # params for transport connect. local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP") local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD") server_ip = params.get("remote_ip", local_ip) server_pwd = params.get("remote_pwd", local_pwd) transport_type = params.get("connect_transport_type", "local") transport = params.get("connect_transport", "ssh") # check the config if (connect_arg == "transport" and transport_type == "remote" and local_ip.count("ENTER")): raise exceptions.TestSkipError("Parameter local_ip is not configured " "in remote test.") if (connect_arg == "transport" and transport_type == "remote" and local_pwd.count("ENTER")): raise exceptions.TestSkipError("Parameter local_pwd is not configured " "in remote test.") if connect_arg == "transport": canonical_uri_type = virsh.driver() if transport == "ssh": ssh_connection = utils_conn.SSHConnection(server_ip=server_ip, server_pwd=server_pwd, client_ip=local_ip, client_pwd=local_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() connect_uri = libvirt_vm.get_uri_with_transport( uri_type=canonical_uri_type, transport=transport, dest_ip=server_ip) else: connect_uri = connect_arg if libvirt_version.version_compare(2, 3, 0): try: maxvcpus = None # make sure we take maxvcpus from right host, helps incase remote virsh_dargs = {'uri': connect_uri} virsh_instance = virsh.Virsh(virsh_dargs) try: capa = capability_xml.CapabilityXML(virsh_instance) host_arch = capa.arch maxvcpus = capa.get_guest_capabilities( )['hvm'][host_arch]['maxcpus'] except: raise exceptions.TestFail("Failed to get maxvcpus from " "capabilities xml\n%s" % capa) if not maxvcpus: raise exceptions.TestFail("Failed to get guest section for " "host arch: %s from capabilities " "xml\n%s" % (host_arch, capa)) except Exception, details: raise exceptions.TestFail( "Failed get the virsh instance with uri: " "%s\n Details: %s" % (connect_uri, details))
def run(test, params, env): """ Test migration with glusterfs. """ def create_or_clean_backend_dir(g_uri, params, session=None, is_clean=False): """ Create/cleanup backend directory :params g_uri: glusterfs uri :params params: the parameters to be checked :params session: VM/remote session object :params is_cleanup: True for cleanup backend directory; False for create one. :return: gluster_img if is_clean is equal to True """ mount_point = params.get("gluster_mount_dir") is_symlink = params.get("gluster_create_symlink") == "yes" symlink_name = params.get("gluster_symlink") gluster_img = None if not is_clean: if not utils_misc.check_exists(mount_point, session): utils_misc.make_dirs(mount_point, session) if gluster.glusterfs_is_mounted(mount_point, session): gluster.glusterfs_umount(g_uri, mount_point, session) gluster.glusterfs_mount(g_uri, mount_point, session) gluster_img = os.path.join(mount_point, disk_img) if is_symlink: utils_misc.make_symlink(mount_point, symlink_name) utils_misc.make_symlink(mount_point, symlink_name, remote_session) gluster_img = os.path.join(symlink_name, disk_img) return gluster_img else: if is_symlink: utils_misc.rm_link(symlink_name, session) gluster.glusterfs_umount(g_uri, mount_point, session) if utils_misc.check_exists(mount_point, session): utils_misc.safe_rmdir(gluster_mount_dir, session=session) def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ # Migrate the guest. virsh_args.update({"ignore_status": True}) migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ if not result: test.error("No migration result is returned.") logging.info("Migration out: %s", result.stdout_text.strip()) logging.info("Migration error: %s", result.stderr_text.strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, result.stderr_text.strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, result.stderr_text.strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(result.stderr_text.strip()) # Local variables virsh_args = {"debug": True} server_ip = params["server_ip"] = params.get("remote_ip") server_user = params["server_user"] = params.get("remote_user", "root") server_pwd = params["server_pwd"] = params.get("remote_pwd") client_ip = params["client_ip"] = params.get("local_ip") client_pwd = params["client_pwd"] = params.get("local_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") virsh_options = params.get("virsh_options", "--verbose --live") vol_name = params.get("vol_name") disk_format = params.get("disk_format", "qcow2") gluster_mount_dir = params.get("gluster_mount_dir") status_error = "yes" == params.get("status_error", "no") err_msg = params.get("err_msg") host_ip = params.get("gluster_server_ip", "") migr_vm_back = params.get("migrate_vm_back", "no") == "yes" selinux_local = params.get('set_sebool_local', 'yes') == "yes" selinux_remote = params.get('set_sebool_remote', 'no') == "yes" sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes') sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes') test_dict = dict(params) test_dict["local_boolean_varible"] = "virt_use_fusefs" test_dict["remote_boolean_varible"] = "virt_use_fusefs" remove_pkg = False seLinuxBool = None seLinuxfusefs = None gluster_uri = None mig_result = None # Make sure all of parameters are assigned a valid value check_parameters(test, params) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: virsh_options = "%s %s" % (virsh_options, postcopy_options) params['virsh_options'] = virsh_options vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # Back up xml file. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() migrate_setup = libvirt.MigrationTest() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Configure selinux if selinux_local or selinux_remote: seLinuxBool = utils_misc.SELinuxBoolean(params) seLinuxBool.setup() if sebool_fusefs_local or sebool_fusefs_remote: seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict) seLinuxfusefs.setup() # Setup glusterfs and disk xml. disk_img = "gluster.%s" % disk_format params['disk_img'] = disk_img libvirt.set_vm_disk(vm, params) vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip() logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt) # Check if gluster server is deployed locally if not host_ip: logging.debug("Enable port 24007 and 49152:49216") migrate_setup.migrate_pre_setup(src_uri, params, ports="24007") migrate_setup.migrate_pre_setup(src_uri, params) gluster_uri = "{}:{}".format(client_ip, vol_name) else: gluster_uri = "{}:{}".format(host_ip, vol_name) remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if gluster_mount_dir: # The package 'glusterfs-fuse' is not installed on target # which makes issue when trying to 'mount -t glusterfs' pkg_name = 'glusterfs-fuse' logging.debug("Check if glusterfs-fuse is installed") pkg_mgr = utils_package.package_manager(remote_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) else: remove_pkg = True gluster_img = create_or_clean_backend_dir(gluster_uri, params) create_or_clean_backend_dir(gluster_uri, params, remote_session) logging.debug("Gluster Image is %s", gluster_img) gluster_backend_disk = {'disk_source_name': gluster_img} # Update disk xml with gluster image in backend dir libvirt.set_vm_disk(vm, gluster_backend_disk) remote_session.close() mig_result = do_migration(vm, dest_uri, options, extra) check_migration_res(mig_result) if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migrate_setup.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migrating: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.info("Recovery test environment") orig_config_xml.sync() # Clean up of pre migration setup for local machine if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True) # Cleanup selinu configuration if seLinuxBool: seLinuxBool.cleanup() if seLinuxfusefs: seLinuxfusefs.cleanup() # Disable ports 24007 and 49152:49216 if not host_ip: logging.debug("Disable 24007 and 49152:49216 in Firewall") migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True, ports="24007") migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True) gluster.setup_or_cleanup_gluster(False, **params) # Cleanup backend directory/symlink if gluster_mount_dir and gluster_uri: remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") create_or_clean_backend_dir(gluster_uri, params, is_clean=True) create_or_clean_backend_dir(gluster_uri, params, remote_session, True) if remove_pkg: pkg_mgr = utils_package.package_manager(remote_session, pkg_name) if pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be uninstalled") if not pkg_mgr.remove(): logging.error("Package '%s' un-installation fails", pkg_name) remote_session.close()
def run(test, params, env): """ Test migration with special network settings 1) migrate guest with bridge type interface connected to ovs bridge 2) migrate guest with direct type interface when a macvtap device name exists on dest host :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def check_vm_network_accessed(ping_dest, session=None): """ The operations to the VM need to be done before or after migration happens :param ping_dest: The destination to be ping :param session: The session object to the host :raise: test.fail when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") status, output = utils_net.ping(ping_dest, count=10, timeout=20, output_func=logging.debug, session=session) if status != 0: test.fail("Ping failed, status: %s, output: %s" % (status, output)) def vm_sync(vmxml, vm_name=None, virsh_instance=virsh): """ A wrapper to sync vm xml on localhost and remote host :param vmxml: domain VMXML instance :param vm_name: The name of VM :param virsh_instance: virsh instance object """ if vm_name and virsh_instance != virsh: remote.scp_to_remote(server_ip, '22', server_user, server_pwd, vmxml.xml, vmxml.xml) if virsh_instance.domain_exists(vm_name): if virsh_instance.is_alive(vm_name): virsh_instance.destroy(vm_name, ignore_status=True) virsh_instance.undefine(vmxml.xml, ignore_status=True) virsh_instance.define(vmxml.xml, debug=True) else: vmxml.sync() def update_iface_xml(vm_name, iface_dict, virsh_instance=virsh): """ Update interfaces for guest :param vm_name: The name of VM :param iface_dict: The interface configurations params :param virsh_instance: virsh instance object """ logging.debug("update iface xml") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) vmxml.remove_all_device_by_type('interface') vm_sync(vmxml, vm_name, virsh_instance=virsh_instance) iface = interface.Interface('network') iface.xml = libvirt.modify_vm_iface(vm_name, "get_xml", iface_dict, virsh_instance=virsh_instance) vmxml.add_device(iface) vmxml.xmltreefile.write() vm_sync(vmxml, vm_name, virsh_instance=virsh_instance) logging.debug("VM XML after updating interface: %s" % vmxml) def update_net_dict(net_dict, runner=utils_net.local_runner): """ Update network dict :param net_dict: The network dict to be updated :param runner: Command runner :return: Updated network dict """ if net_dict.get("net_name", "") == "direct-macvtap": logging.info("Updating network iface name") iface_name = utils_net.get_net_if(runner=runner, state="UP")[0] net_dict.update({"forward_iface": iface_name}) else: # TODO: support other types logging.info("No need to update net_dict. We only support to " "update direct-macvtap type for now.") logging.debug("net_dict is %s" % net_dict) return net_dict def get_remote_direct_mode_vm_mac(vm_name, uri): """ Get mac of remote direct mode VM :param vm_name: The name of VM :param uri: The uri on destination :return: mac :raise: test.fail when the result of virsh domiflist is incorrect """ vm_mac = None res = virsh.domiflist( vm_name, uri=uri, ignore_status=False).stdout_text.strip().split("\n") if len(res) < 2: test.fail("Unable to get remote VM's mac: %s" % res) else: vm_mac = res[-1].split()[-1] return vm_mac def create_fake_tap(remote_session): """ Create a fake macvtap on destination host. :param remote_session: The session to the destination host. :return: The new tap device """ tap_cmd = "ls /dev/tap* |awk -F 'tap' '{print $NF}'" tap_idx = remote_session.cmd_output(tap_cmd).strip() if not tap_idx: test.fail("Unable to get tap index using %s." % tap_cmd) fake_tap_dest = 'tap'+str(int(tap_idx)+1) logging.debug("creating a fake tap %s...", fake_tap_dest) cmd = "touch /dev/%s" % fake_tap_dest remote_session.cmd(cmd) return fake_tap_dest migration_test = migration.MigrationTest() migration_test.check_parameters(params) libvirt_version.is_libvirt_feature_supported(params) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") virsh_options = params.get("virsh_options", "") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options", "--live --p2p --verbose") restart_dhclient = params.get("restart_dhclient", "dhclient -r; dhclient") ping_dest = params.get("ping_dest", "www.baidu.com") extra_args = migration_test.update_virsh_migrate_extra_args(params) migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") target_vm_name = params.get("target_vm_name") direct_mode = "yes" == params.get("direct_mode", "no") check_macvtap_exists = "yes" == params.get("check_macvtap_exists", "no") create_fake_tap_dest = "yes" == params.get("create_fake_tap_dest", "no") macvtap_cmd = params.get("macvtap_cmd") modify_target_vm = "yes" == params.get("modify_target_vm", "no") ovs_bridge_name = params.get("ovs_bridge_name") network_dict = eval(params.get("network_dict", '{}')) iface_dict = eval(params.get("iface_dict", '{}')) remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} virsh_session_remote = None libvirtd_conf = None mig_result = None target_org_xml = None target_vm_session = None target_vm = None exp_macvtap = [] fake_tap_dest = None # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() bk_uri = vm.connect_uri postcopy_options = params.get("postcopy_options") action_during_mig = None if postcopy_options: extra = "%s %s" % (extra, postcopy_options) action_during_mig = virsh.migrate_postcopy # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs) if target_vm_name: target_vm = libvirt_vm.VM(target_vm_name, params, vm.root_dir, vm.address_cache) target_vm.connect_uri = dest_uri if not virsh_session_remote.domain_exists(target_vm_name): test.error("VM %s should be installed on %s." % (target_vm_name, server_ip)) # Backup guest's xml on remote target_org_xml = vm_xml.VMXML.new_from_inactive_dumpxml( target_vm_name, virsh_instance=virsh_session_remote) # Scp original xml to remote for restoration remote.scp_to_remote(server_ip, '22', server_user, server_pwd, target_org_xml.xml, target_org_xml.xml) logging.debug("target xml is %s" % target_org_xml) if ovs_bridge_name: status, stdout = utils_net.create_ovs_bridge(ovs_bridge_name) if status: test.fail("Failed to create ovs bridge on local. Status: %s" "Stdout: %s" % (status, stdout)) status, stdout = utils_net.create_ovs_bridge( ovs_bridge_name, session=remote_session) if status: test.fail("Failed to create ovs bridge on remote. Status: %s" "Stdout: %s" % (status, stdout)) if network_dict: update_net_dict(network_dict, runner=remote_session.cmd) libvirt_network.create_or_del_network( network_dict, remote_args=remote_virsh_dargs) logging.info("dest: network created") update_net_dict(network_dict) libvirt_network.create_or_del_network(network_dict) logging.info("localhost: network created") if target_vm_name: if modify_target_vm and iface_dict: logging.info("Updating remote VM's interface") update_iface_xml(target_vm_name, iface_dict, virsh_instance=virsh_session_remote) target_vm.start() target_vm_session = target_vm.wait_for_serial_login(timeout=240) check_vm_network_accessed(ping_dest, session=target_vm_session) if check_macvtap_exists and macvtap_cmd: # Get macvtap device's index on remote after target_vm started idx = remote_session.cmd_output(macvtap_cmd).strip() if not idx: test.fail("Unable to get macvtap index using %s." % macvtap_cmd) # Generate the expected macvtap devices' index list exp_macvtap = ['macvtap'+idx, 'macvtap'+str(int(idx)+1)] if create_fake_tap_dest: fake_tap_dest = create_fake_tap(remote_session) remote_session.close() # Change domain network xml if iface_dict: if "mac" not in iface_dict: mac = utils_net.generate_mac_address_simple() iface_dict.update({'mac': mac}) else: mac = iface_dict["mac"] update_iface_xml(vm_name, iface_dict) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): try: vm.start() except virt_vm.VMStartError as err: test.fail("Failed to start VM: %s" % err) logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() vm_session = vm.wait_for_serial_login(timeout=240) if not utils_package.package_install('dhcp-client', session=vm_session): test.error("Failed to install dhcp-client on guest.") utils_net.restart_guest_network(vm_session) vm_ip = utils_net.get_guest_ip_addr(vm_session, mac) logging.debug("VM IP Addr: %s", vm_ip) if direct_mode: check_vm_network_accessed(ping_dest, session=vm_session) else: check_vm_network_accessed(vm_ip) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=action_during_mig, extra_opts=extra, **extra_args) mig_result = migration_test.ret # Check network accessibility after migration if int(mig_result.exit_status) == 0: vm.connect_uri = dest_uri if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() vm_session_after_mig = vm.wait_for_serial_login(timeout=240) vm_session_after_mig.cmd(restart_dhclient) check_vm_network_accessed(ping_dest, session=vm_session_after_mig) if check_macvtap_exists and macvtap_cmd: remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') # Check macvtap devices' index after migration idx = remote_session.cmd_output(macvtap_cmd) act_macvtap = ['macvtap'+i for i in idx.strip().split("\n")] if act_macvtap != exp_macvtap: test.fail("macvtap devices after migration are incorrect!" " Actual: %s, Expected: %s. " % (act_macvtap, exp_macvtap)) else: if fake_tap_dest: res = remote.run_remote_cmd("ls /dev/%s" % fake_tap_dest, params, runner_on_target) libvirt.check_exit_status(res) if target_vm_session: check_vm_network_accessed(ping_dest, session=target_vm_session) # Execute migration from remote if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) logging.debug("VM is migrated back.") vm.connect_uri = bk_uri if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() vm_session_after_mig_bak = vm.wait_for_serial_login(timeout=240) vm_session_after_mig_bak.cmd(restart_dhclient) check_vm_network_accessed(ping_dest, vm_session_after_mig_bak) finally: logging.debug("Recover test environment") vm.connect_uri = bk_uri migration_test.cleanup_vm(vm, dest_uri) logging.info("Recovery VM XML configration") orig_config_xml.sync() remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') if target_vm and target_vm.is_alive(): target_vm.destroy(gracefully=False) if target_org_xml and target_vm_name: logging.info("Recovery XML configration for %s.", target_vm_name) virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs) vm_sync(target_org_xml, vm_name=target_vm_name, virsh_instance=virsh_session_remote) virsh_session_remote.close_session() if fake_tap_dest: remote_session.cmd_output_safe("rm -rf /dev/%s" % fake_tap_dest) if network_dict: libvirt_network.create_or_del_network( network_dict, is_del=True, remote_args=remote_virsh_dargs) libvirt_network.create_or_del_network(network_dict, is_del=True) if ovs_bridge_name: utils_net.delete_ovs_bridge(ovs_bridge_name) utils_net.delete_ovs_bridge(ovs_bridge_name, session=remote_session) remote_session.close() if target_vm_session: target_vm_session.close() if virsh_session_remote: virsh_session_remote.close_session() if migrate_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) logging.info("Remove local NFS image") source_file = params.get("source_file") if source_file: libvirt.delete_local_disk("file", path=source_file)
def run(test, params, env): """ Test migration with memory related configuration :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") remote_ip = params.get("remote_ip") remote_user = params.get("remote_user") remote_pwd = params.get("remote_pwd") local_ip = params.get("local_ip") local_pwd = params.get("local_pwd") ballooned_mem = params.get("ballooned_mem") check = params.get("check") remove_dict = {} src_libvirt_file = None remote_virsh_dargs = { 'remote_ip': remote_ip, 'remote_user': remote_user, 'remote_pwd': remote_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") options = params.get("virsh_migrate_options", "--live --verbose") func_params_exists = "yes" == params.get("func_params_exists", "yes") log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd_daemons.log") check_str_local_log = params.get("check_str_local_log", "") libvirtd_conf_dict = eval(params.get("libvirtd_conf_dict", '{}')) func_name = None libvirtd_conf = None mig_result = None # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() extra_args = {} if func_params_exists: extra_args.update({'func_params': params}) # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: if check == "mem_balloon": # Update memory balloon device to correct model membal_dict = { 'membal_model': 'virtio', 'membal_stats_period': '10' } libvirt.update_memballoon_xml(new_xml, membal_dict) if check == "mem_device": libvirt_cpu.add_cpu_settings(new_xml, params) dimm_params = { k.replace('memdev_', ''): v for k, v in params.items() if k.startswith('memdev_') } dimm_xml = utils_hotplug.create_mem_xml(**dimm_params) libvirt.add_vm_device(new_xml, dimm_xml) logging.debug(virsh.dumpxml(vm_name)) # Change the disk of the vm libvirt.set_vm_disk(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) # Update libvirtd configuration if libvirtd_conf_dict: if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) logging.debug("Update libvirtd configuration file") conf_type = params.get("conf_type", "libvirtd") if conf_type == "libvirtd" and utils_split_daemons.is_modular_daemon( ): conf_type = "virtqemud" libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, conf_type) try: if not vm.is_alive(): vm.start() except virt_vm.VMStartError as e: logging.info("Failed to start VM") test.fail("Failed to start VM: %s" % vm_name) logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm.wait_for_login(restart_network=True).close() migration_test.ping_vm(vm, params) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=func_name, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if int(mig_result.exit_status) == 0: migration_test.ping_vm(vm, params, uri=dest_uri) if check_str_local_log: libvirt.check_logfile(check_str_local_log, log_file) if check == "mem_balloon": remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) remote_virsh_session.setmem(vm_name, ballooned_mem, None, None, False, "", **virsh_args) def check_mem_balloon(): """Check if memory balloon worked""" memstat_ouput = remote_virsh_session.dommemstat( vm_name, "", **virsh_args) memstat_after = memstat_ouput.stdout_text mem_after = memstat_after.splitlines()[0].split()[1] if mem_after != ballooned_mem: logging.debug("Current memory size is: %s" % mem_after) return False return True check_ret = utils_misc.wait_for(check_mem_balloon, timeout=20) if not check_ret: test.fail("Memory is not ballooned to the expected size: %s" % ballooned_mem) remote_virsh_session.close_session() # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=remote_ip, username=remote_user, password=remote_pwd) if check == "mem_device": qemu_checks = params.get('qemu_checks', '').split('`') logging.debug("qemu_checks:%s" % qemu_checks[0]) for qemu_check in qemu_checks: libvirt.check_qemu_cmd_line(qemu_check, False, params, runner_on_target) if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=remote_ip, server_pwd=remote_pwd, client_ip=local_ip, client_pwd=local_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine src_full_uri = libvirt_vm.complete_uri( params.get("migrate_source_host")) migration_test.migrate_pre_setup(src_full_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_full_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination and source try: migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri) if vm.is_alive(): vm.destroy(gracefully=False) except Exception as err: logging.error(err) logging.info("Recovery VM XML configration") orig_config_xml.sync() if libvirtd_conf: logging.debug("Recover the configurations") libvirt.customize_libvirt_config(None, is_recover=True, config_object=libvirtd_conf) if src_libvirt_file: src_libvirt_file.restore()
def run(test, params, env): """ Test the command virsh maxvcpus (1) Call virsh maxvcpus (2) Call virsh -c remote_uri maxvcpus (3) Call virsh maxvcpus with an unexpected option """ # get the params from subtests. # params for general. option = params.get("virsh_maxvcpus_options") status_error = params.get("status_error") connect_arg = params.get("connect_arg", "") # params for transport connect. local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP") local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD") server_ip = params.get("remote_ip", local_ip) server_pwd = params.get("remote_pwd", local_pwd) transport_type = params.get("connect_transport_type", "local") transport = params.get("connect_transport", "ssh") connect_uri = None # check the config if (connect_arg == "transport" and transport_type == "remote" and local_ip.count("ENTER")): raise exceptions.TestSkipError("Parameter local_ip is not configured " "in remote test.") if (connect_arg == "transport" and transport_type == "remote" and local_pwd.count("ENTER")): raise exceptions.TestSkipError("Parameter local_pwd is not configured " "in remote test.") if connect_arg == "transport": canonical_uri_type = virsh.driver() if transport == "ssh": ssh_connection = utils_conn.SSHConnection(server_ip=server_ip, server_pwd=server_pwd, client_ip=local_ip, client_pwd=local_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() connect_uri = libvirt_vm.get_uri_with_transport( uri_type=canonical_uri_type, transport=transport, dest_ip=server_ip) else: connect_uri = connect_arg if libvirt_version.version_compare(2, 3, 0): try: maxvcpus = None dom_capabilities = None # make sure we take maxvcpus from right host, helps incase remote try: dom_capabilities = domcap.DomCapabilityXML() maxvcpus = dom_capabilities.max logging.debug( "maxvcpus calculate from domcapabilities " "is %s", maxvcpus) except: raise exceptions.TestFail("Failed to get maxvcpus from " "domcapabilities xml:\n%s" % dom_capabilities) if not maxvcpus: raise exceptions.TestFail("Failed to get max value for vcpu" "from domcapabilities " "xml:\n%s" % dom_capabilities) except Exception, details: raise exceptions.TestFail( "Failed get the virsh instance with uri: " "%s\n Details: %s" % (connect_uri, details))
def run(test, params, env): """ Test virsh migrate command. """ migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") guest_src_url = params.get("guest_src_url") guest_src_path = params.get("guest_src_path", "/var/lib/libvirt/images/guest.img") check_disk = "yes" == params.get("check_disk") disk_model = params.get("disk_model") disk_target = params.get("disk_target", "vda") controller_model = params.get("controller_model") check_interface = "yes" == params.get("check_interface") iface_type = params.get("iface_type", "network") iface_model = params.get("iface_model", "virtio") iface_params = {'type': iface_type, 'model': iface_model, 'del_addr': True, 'source': '{"network": "default"}'} check_memballoon = "yes" == params.get("check_memballoon") membal_model = params.get("membal_model") check_rng = "yes" == params.get("check_rng") rng_model = params.get("rng_model") migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") status_error = "yes" == params.get("status_error", "no") remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} remote_dargs = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd, 'file_path': "/etc/libvirt/libvirt.conf"} xml_check_after_mig = params.get("guest_xml_check_after_mig") err_msg = params.get("err_msg") vm_session = None remote_virsh_session = None vm = None mig_result = None remove_dict = {} remote_libvirt_file = None src_libvirt_file = None if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") extra_args = migration_test.update_virsh_migrate_extra_args(params) vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # download guest source and update interface model to keep guest up if guest_src_url: blk_source = download.get_file(guest_src_url, guest_src_path) if not blk_source: test.error("Fail to download image.") params["blk_source_name"] = blk_source if (not check_interface) and iface_model: iface_dict = {'model': iface_model} libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict) if not check_disk: params["disk_model"] = "virtio-transitional" if check_interface: libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) if check_memballoon: membal_dict = {'membal_model': membal_model} dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.update_memballoon_xml(dom_xml, membal_dict) if check_rng: rng_dict = {'rng_model': rng_model} rng_xml = libvirt.create_rng_xml(rng_dict) dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.add_vm_device(dom_xml, rng_xml) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm_session = vm.wait_for_login(restart_network=True) migration_test.ping_vm(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, **extra_args) mig_result = migration_test.ret if int(mig_result.exit_status) == 0: migration_test.ping_vm(vm, params, dest_uri) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) target_guest_dumpxml = (remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True) .stdout_text.strip()) if check_disk: check_str = disk_model if disk_model else controller_model if check_interface: check_str = iface_model if check_memballoon: check_str = membal_model if check_rng: check_str = rng_model xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) remote_virsh_session.close_session() # Execute migration from remote if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)} remote_libvirt_file = libvirt_config\ .remove_key_for_modular_daemon(remove_dict, remote_dargs) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination migration_test.cleanup_vm(vm, dest_uri) logging.info("Recover VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if src_libvirt_file: src_libvirt_file.restore() if remote_libvirt_file: del remote_libvirt_file # Clean up of pre migration setup for local machine if migrate_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) if remote_virsh_session: remote_virsh_session.close_session() logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if guest_src_url and blk_source: libvirt.delete_local_disk("file", path=blk_source)
def run(test, params, env): """ Test command: virsh reboot. Run a reboot command in the target domain. 1.Prepare test environment. 2.When the libvirtd == "off", stop the libvirtd service. 3.Perform virsh reboot operation. 4.Recover test environment.(libvirts service) 5.Confirm the test result. """ def boot_time(): session = vm.wait_for_login() boot_time = session.cmd_output("uptime --since") session.close() return boot_time vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # run test case libvirtd = params.get("libvirtd", "on") vm_ref = params.get("reboot_vm_ref") status_error = ("yes" == params.get("status_error")) extra = params.get("reboot_extra", "") remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM") local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM") remote_pwd = params.get("remote_pwd", "password") local_pwd = params.get("local_pwd", "password") agent = ("yes" == params.get("reboot_agent", "no")) mode = params.get("reboot_mode", "") pre_domian_status = params.get("reboot_pre_domian_status", "running") reboot_readonly = "yes" == params.get("reboot_readonly", "no") wait_time = int(params.get('wait_time', 5)) xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Add or remove qemu-agent from guest before test try: vm.prepare_guest_agent(channel=agent, start=agent) except virt_vm.VMError as e: logging.debug(e) # qemu-guest-agent is not available on REHL5 test.cancel("qemu-guest-agent package is not available") if pre_domian_status == "shutoff": virsh.destroy(vm_name) if libvirtd == "off": utils_libvirtd.libvirtd_stop() domid = vm.get_id() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref == "hex_id": vm_ref = hex(int(domid)) elif vm_ref.find("invalid") != -1: vm_ref = params.get(vm_ref) elif vm_ref == "remote_name": if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"): test.cancel("remote_ip and/or local_ip parameters" " not changed from default values") complete_uri = libvirt_vm.complete_uri(local_ip) # Setup ssh connection ssh_connection = utils_conn.SSHConnection(server_ip=local_ip, server_pwd=local_pwd, client_ip=remote_ip, client_pwd=remote_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() try: session = remote.remote_login("ssh", remote_ip, "22", "root", remote_pwd, "#") session.cmd_output('LANG=C') command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name, mode) status, output = session.cmd_status_output(command, internal_timeout=5) session.close() if not status: # the operation before the end of reboot # may result in data corruption vm.wait_for_login().close() except (remote.LoginError, process.CmdError, aexpect.ShellError) as e: logging.error("Exception: %s", str(e)) status = -1 if vm_ref != "remote_name": if not status_error: # Not need to check the boot up time if it is a negative test first_boot_time = boot_time() vm_ref = "%s" % vm_ref if extra: vm_ref += " %s" % extra cmdresult = virsh.reboot(vm_ref, mode, ignore_status=True, debug=True) status = cmdresult.exit_status if status: logging.debug("Error status, cmd error: %s", cmdresult.stderr) if not virsh.has_command_help_match('reboot', '\s+--mode\s+'): # old libvirt doesn't support reboot status = -2 # avoid the check if it is negative test if not status_error: cmdoutput = '' def _wait_for_reboot_up(): second_boot_time = boot_time() is_rebooted = second_boot_time > first_boot_time cmdoutput = virsh.domstate(vm_ref, '--reason', ignore_status=True, debug=True) domstate_status = cmdoutput.exit_status output = "running" in cmdoutput.stdout return not domstate_status and output and is_rebooted if not wait.wait_for( _wait_for_reboot_up, timeout=wait_time, step=1): test.fail("Cmd error: %s Error status: %s" % (cmdoutput.stderr, cmdoutput.stdout)) elif pre_domian_status != 'shutoff': vm.wait_for_login().close() output = virsh.dom_list(ignore_status=True).stdout.strip() # Test the readonly mode if reboot_readonly: result = virsh.reboot(vm_ref, ignore_status=True, debug=True, readonly=True) libvirt.check_exit_status(result, expect_error=True) # This is for status_error check status = result.exit_status # recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # check status_error if status_error: if not status: test.fail("Run successfully with wrong command!") else: if status or (not re.search(vm_name, output)): if status == -2: test.cancel("Reboot command doesn't work on older libvirt " "versions") test.fail("Run failed with right command") finally: xml_backup.sync() if 'ssh_connection' in locals(): ssh_connection.auto_recover = True
def run(test, params, env): """ Test virsh migrate command. """ def check_vm_network_accessed(session=None, ping_dest="www.baidu.com"): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :param ping_dest: The destination to be ping :raise: test.fail when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") status, output = utils_test.ping(ping_dest, count=10, timeout=20, output_func=logging.debug, session=session) if status != 0: test.fail("Ping failed, status: %s," " output: %s" % (status, output)) def get_vm_ifaces(session=None): """ Get interfaces of vm :param session: The session object to the host :return: interfaces """ p_iface, v_iface = utils_net.get_remote_host_net_ifs(session) return p_iface def check_vm_iface_num(iface_list, exp_num=3): """ Check he number of interfaces :param iface_list: The interface list :param exp_num: The expected number :raise: test.fail when interfaces' number is not equal to exp_num """ if len(iface_list) != exp_num: test.fail("%d interfaces should be found on the vm, " "but find %s." % (exp_num, iface_list)) def create_or_del_networks(pf_name, params, remote_virsh_session=None, is_del=False): """ Create or delete network on local or remote :param params: Dictionary with the test parameters :param pf_name: The name of PF :param remote_virsh_session: The virsh session object to the remote host :param is_del: Whether the networks should be deleted :raise: test.fail when fails to define/start network """ net_hostdev_name = params.get("net_hostdev_name", "hostdev-net") net_hostdev_fwd = params.get("net_hostdev_fwd", '{"mode": "hostdev", "managed": "yes"}') net_bridge_name = params.get("net_bridge_name", "host-bridge") net_bridge_fwd = params.get("net_bridge_fwd", '{"mode": "bridge"}') bridge_name = params.get("bridge_name", "br0") net_dict = {"net_name": net_hostdev_name, "net_forward": net_hostdev_fwd, "net_forward_pf": '{"dev": "%s"}' % pf_name} bridge_dict = {"net_name": net_bridge_name, "net_forward": net_bridge_fwd, "net_bridge": '{"name": "%s"}' % bridge_name} if not is_del: for net_params in (net_dict, bridge_dict): net_dev = libvirt.create_net_xml(net_params.get("net_name"), net_params) if not remote_virsh_session: if net_dev.get_active(): net_dev.undefine() net_dev.define() net_dev.start() else: remote.scp_to_remote(server_ip, '22', server_user, server_pwd, net_dev.xml, net_dev.xml, limit="", log_filename=None, timeout=600, interface=None) remote_virsh_session.net_define(net_dev.xml, **virsh_args) remote_virsh_session.net_start(net_params.get("net_name"), **virsh_args) else: virsh_session = virsh if remote_virsh_session: virsh_session = remote_virsh_session for nname in (net_hostdev_name, net_bridge_name): if nname not in virsh_session.net_state_dict(): continue virsh_session.net_destroy(nname, debug=True, ignore_status=True) virsh_session.net_undefine(nname, debug=True, ignore_status=True) def check_vm_network_connection(net_name, expected_conn=0): """ Check network connections in network xml :param net_name: The network to be checked :param expected_conn: The expected value :raise: test.fail when fails """ output = virsh.net_dumpxml(net_name, debug=True).stdout_text if expected_conn == 0: reg_pattern = r"<network>" else: reg_pattern = r"<network connections='(\d)'>" res = re.findall(reg_pattern, output, re.I) if not res: test.fail("Unable to find expected connection in %s." % net_name) if expected_conn != 0: if expected_conn != int(res[0]): test.fail("Unable to get expected connection number." "Expected: %s, Actual %s" % (expected_conn, int(res[0]))) def get_hostdev_addr_from_xml(): """ Get VM hostdev address :return: pci driver id """ address_dict = {} for ifac in vm_xml.VMXML.new_from_dumpxml(vm_name).devices.by_device_tag("interface"): if ifac.type_name == "hostdev": address_dict = ifac.hostdev_address.attrs return libvirt.pci_info_from_address(address_dict, 16, "id") def check_vfio_pci(pci_path, status_error=False): """ Check if vf driver is vfio-pci :param pci_path: The absolute path of pci device :param status_error: Whether the driver should be vfio-pci """ cmd = "readlink %s/driver | awk -F '/' '{print $NF}'" % pci_path output = process.run(cmd, shell=True, verbose=True).stdout_text.strip() if (output == "vfio-pci") == status_error: test.fail("Get incorrect dirver %s, it should%s be vfio-pci." % (output, ' not' if status_error else '')) def update_iface_xml(vmxml): """ Update interfaces for guest :param vmxml: vm_xml.VMXML object """ vmxml.remove_all_device_by_type('interface') vmxml.sync() iface_dict = {"type": "network", "source": "{'network': 'host-bridge'}", "mac": mac_addr, "model": "virtio", "teaming": '{"type":"persistent"}', "alias": '{"name": "ua-backup0"}', "inbound": '{"average":"5"}', "outbound": '{"average":"5"}'} iface_dict2 = {"type": "network", "source": "{'network': 'hostdev-net'}", "mac": mac_addr, "model": "virtio", "teaming": '{"type":"transient", "persistent": "ua-backup0"}'} iface = interface.Interface('network') for ifc in (iface_dict, iface_dict2): iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", ifc) vmxml.add_device(iface) vmxml.sync() migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") bridge_name = params.get("bridge_name", "br0") net_hostdev_name = params.get("net_hostdev_name", "hostdev-net") net_bridge_name = params.get("net_bridge_name", "host-bridge") driver = params.get("driver", "ixgbe") vm_tmp_file = params.get("vm_tmp_file", "/tmp/test.txt") cmd_during_mig = params.get("cmd_during_mig") net_failover_test = "yes" == params.get("net_failover_test", "no") cancel_migration = "yes" == params.get("cancel_migration", "no") try: vf_no = int(params.get("vf_no", "4")) except ValueError as e: test.error(e) migr_vm_back = "yes" == params.get("migrate_vm_back", "no") err_msg = params.get("err_msg") status_error = "yes" == params.get("status_error", "no") cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} destparams_dict = copy.deepcopy(params) remote_virsh_session = None vm_session = None vm = None mig_result = None func_name = None extra_args = {} default_src_vf = 0 default_dest_vf = 0 default_src_rp_filter = 1 default_dest_rp_filer = 1 if not libvirt_version.version_compare(6, 0, 0): test.cancel("This libvirt version doesn't support migration with " "net failover devices.") # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if net_failover_test: src_pf, src_pf_pci = utils_sriov.find_pf(driver) logging.debug("src_pf is %s. src_pf_pci: %s", src_pf, src_pf_pci) params['pf_name'] = src_pf dest_pf, dest_pf_pci = utils_sriov.find_pf(driver, server_session) logging.debug("dest_pf is %s. dest_pf_pci: %s", dest_pf, dest_pf_pci) destparams_dict['pf_name'] = dest_pf src_pf_pci_path = utils_misc.get_pci_path(src_pf_pci) dest_pf_pci_path = utils_misc.get_pci_path(dest_pf_pci, server_session) cmd = "cat %s/sriov_numvfs" % (src_pf_pci_path) default_src_vf = process.run(cmd, shell=True, verbose=True).stdout_text cmd = "cat %s/sriov_numvfs" % (dest_pf_pci_path) status, default_dest_vf = utils_misc.cmd_status_output(cmd, shell=True, session=server_session) if status: test.error("Unable to get default sriov_numvfs on target!" "status: %s, output: %s" % (status, default_dest_vf)) if not utils_sriov.set_vf(src_pf_pci_path, vf_no): test.error("Failed to set vf on source.") if not utils_sriov.set_vf(dest_pf_pci_path, vf_no, session=server_session): test.error("Failed to set vf on target.") # Create PF and bridge connection on source and target host cmd = 'cat /proc/sys/net/ipv4/conf/all/rp_filter' default_src_rp_filter = process.run(cmd, shell=True, verbose=True).stdout_text status, default_dest_rp_filter = utils_misc.cmd_status_output(cmd, shell=True, session=server_session) if status: test.error("Unable to get default rp_filter on target!" "status: %s, output: %s" % (status, default_dest_rp_filter)) cmd = 'echo 0 >/proc/sys/net/ipv4/conf/all/rp_filter' process.run(cmd, shell=True, verbose=True) utils_misc.cmd_status_output(cmd, shell=True, session=server_session) utils_sriov.add_or_del_connection(params, is_del=False) utils_sriov.add_or_del_connection(destparams_dict, is_del=False, session=server_session) if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) create_or_del_networks(dest_pf, params, remote_virsh_session=remote_virsh_session) remote_virsh_session.close_session() create_or_del_networks(src_pf, params) # Change network interface xml mac_addr = utils_net.generate_mac_address_simple() update_iface_xml(new_xml) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() # Check local guest network connection before migration if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() vm_session = vm.wait_for_serial_login(timeout=240) if net_failover_test: utils_net.restart_guest_network(vm_session) iface_list = get_vm_ifaces(vm_session) vm_ipv4, vm_ipv6 = utils_net.get_linux_ipaddr(vm_session, iface_list[0]) check_vm_network_accessed(ping_dest=vm_ipv4) if net_failover_test: check_vm_iface_num(iface_list) check_vm_network_connection(net_hostdev_name, 1) check_vm_network_connection(net_bridge_name, 1) hostdev_pci_id = get_hostdev_addr_from_xml() vf_path = utils_misc.get_pci_path(hostdev_pci_id) check_vfio_pci(vf_path) if cmd_during_mig: s, o = utils_misc.cmd_status_output(cmd_during_mig, shell=True, session=vm_session) if s: test.fail("Failed to run %s in vm." % cmd_during_mig) if extra.count("--postcopy"): func_name = virsh.migrate_postcopy extra_args.update({'func_params': params}) if cancel_migration: func_name = migration_test.do_cancel # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=func_name, extra_opts=extra, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session, vm_ipv4) server_session.close() if net_failover_test: # Check network connection check_vm_network_connection(net_hostdev_name) check_vm_network_connection(net_bridge_name) # VF driver should not be vfio-pci check_vfio_pci(vf_path, True) cmd_parms.update({'vm_ip': vm_ipv4, 'vm_pwd': params.get("password")}) vm_after_mig = remote.VMManager(cmd_parms) vm_after_mig.setup_ssh_auth() cmd = "ip link" cmd_result = vm_after_mig.run_command(cmd) libvirt.check_result(cmd_result) p_iface = re.findall(r"\d+:\s+(\w+):\s+.*", cmd_result.stdout_text) p_iface = [x for x in p_iface if x != 'lo'] check_vm_iface_num(p_iface) # Check the output of ping command cmd = 'cat %s' % vm_tmp_file cmd_result = vm_after_mig.run_command(cmd) libvirt.check_result(cmd_result) if re.findall('Destination Host Unreachable', cmd_result.stdout_text, re.M): test.fail("The network does not work well during " "the migration peirod. ping output: %s" % cmd_result.stdout_text) # Execute migration from remote if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) logging.debug("migration back done") check_vm_network_accessed(ping_dest=vm_ipv4) if net_failover_test: if vm_session: vm_session.close() vm_session = vm.wait_for_login() iface_list = get_vm_ifaces(vm_session) check_vm_iface_num(iface_list) else: check_vm_network_accessed(ping_dest=vm_ipv4) if net_failover_test: iface_list = get_vm_ifaces(vm_session) check_vm_iface_num(iface_list) finally: logging.debug("Recover test environment") # Clean VM on destination migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri) if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if 'src_pf' in locals(): cmd = 'echo %s >/proc/sys/net/ipv4/conf/all/rp_filter' % default_src_rp_filter process.run(cmd, shell=True, verbose=True) utils_sriov.add_or_del_connection(params, is_del=True) create_or_del_networks(src_pf, params, is_del=True) if 'dest_pf' in locals(): cmd = 'echo %s >/proc/sys/net/ipv4/conf/all/rp_filter' % default_dest_rp_filter utils_misc.cmd_status_output(cmd, shell=True, session=server_session) utils_sriov.add_or_del_connection(destparams_dict, session=server_session, is_del=True) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) create_or_del_networks(dest_pf, params, remote_virsh_session, is_del=True) remote_virsh_session.close_session() if 'dest_pf_pci_path' in locals() and default_dest_vf != vf_no: utils_sriov.set_vf(dest_pf_pci_path, default_dest_vf, server_session) if 'src_pf_pci_path' in locals() and default_src_vf != vf_no: utils_sriov.set_vf(src_pf_pci_path, default_src_vf) # Clean up of pre migration setup for local machine if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) server_session.close() if remote_virsh_session: remote_virsh_session.close_session() logging.info("Remove local NFS image") source_file = params.get("source_file") if source_file: libvirt.delete_local_disk("file", path=source_file)
def run(test, params, env): """ Test migration with glusterfs. """ def create_or_clean_backend_dir(g_uri, params, session=None, is_clean=False): """ Create/cleanup backend directory :params g_uri: glusterfs uri :params params: the parameters to be checked :params session: VM/remote session object :params is_cleanup: True for cleanup backend directory; False for create one. :return: gluster_img if is_clean is equal to True """ mount_point = params.get("gluster_mount_dir") is_symlink = params.get("gluster_create_symlink") == "yes" symlink_name = params.get("gluster_symlink") gluster_img = None if not is_clean: if not utils_misc.check_exists(mount_point, session): utils_misc.make_dirs(mount_point, session) if gluster.glusterfs_is_mounted(mount_point, session): gluster.glusterfs_umount(g_uri, mount_point, session) gluster.glusterfs_mount(g_uri, mount_point, session) gluster_img = os.path.join(mount_point, disk_img) if is_symlink: utils_misc.make_symlink(mount_point, symlink_name) utils_misc.make_symlink(mount_point, symlink_name, remote_session) gluster_img = os.path.join(symlink_name, disk_img) return gluster_img else: if is_symlink: utils_misc.rm_link(symlink_name, session) gluster.glusterfs_umount(g_uri, mount_point, session) if utils_misc.check_exists(mount_point, session): utils_misc.safe_rmdir(gluster_mount_dir, session=session) # Local variables virsh_args = {"debug": True} server_ip = params["server_ip"] = params.get("remote_ip") server_user = params["server_user"] = params.get("remote_user", "root") server_pwd = params["server_pwd"] = params.get("remote_pwd") client_ip = params["client_ip"] = params.get("local_ip") client_pwd = params["client_pwd"] = params.get("local_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options", "--live --p2p --verbose") virsh_options = params.get("virsh_options", "") vol_name = params.get("vol_name") disk_format = params.get("disk_format", "qcow2") gluster_mount_dir = params.get("gluster_mount_dir") status_error = "yes" == params.get("status_error", "no") err_msg = params.get("err_msg") host_ip = params.get("gluster_server_ip", "") migrate_vm_back = params.get("migrate_vm_back", "no") == "yes" selinux_local = params.get('set_sebool_local', 'yes') == "yes" selinux_remote = params.get('set_sebool_remote', 'no') == "yes" sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes') sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes') test_dict = dict(params) test_dict["local_boolean_varible"] = "virt_use_fusefs" test_dict["remote_boolean_varible"] = "virt_use_fusefs" remote_dargs = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd, 'file_path': "/etc/libvirt/libvirt.conf" } remove_pkg = False seLinuxBool = None seLinuxfusefs = None gluster_uri = None mig_result = None remove_dict = {} remote_libvirt_file = None src_libvirt_file = None # Make sure all of parameters are assigned a valid value migrate_test = migration.MigrationTest() migrate_test.check_parameters(params) extra_args = migrate_test.update_virsh_migrate_extra_args(params) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (virsh_options, postcopy_options) func_name = virsh.migrate_postcopy vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # Back up xml file. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Configure selinux if selinux_local or selinux_remote: seLinuxBool = utils_misc.SELinuxBoolean(params) seLinuxBool.setup() if sebool_fusefs_local or sebool_fusefs_remote: seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict) seLinuxfusefs.setup() # Setup glusterfs disk_img = "gluster.%s" % disk_format params['disk_img'] = disk_img host_ip = gluster.setup_or_cleanup_gluster(is_setup=True, **params) logging.debug("host ip: %s ", host_ip) # Check if gluster server is deployed locally if not host_ip: logging.debug("Enable port 24007 and 49152:49216") migrate_test.migrate_pre_setup(src_uri, params, ports="24007") migrate_test.migrate_pre_setup(src_uri, params) gluster_uri = "{}:{}".format(client_ip, vol_name) else: gluster_uri = "{}:{}".format(host_ip, vol_name) remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if gluster_mount_dir: # The package 'glusterfs-fuse' is not installed on target # which makes issue when trying to 'mount -t glusterfs' pkg_name = 'glusterfs-fuse' logging.debug("Check if glusterfs-fuse is installed") pkg_mgr = utils_package.package_manager(remote_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) else: remove_pkg = True gluster_img = create_or_clean_backend_dir(gluster_uri, params) create_or_clean_backend_dir(gluster_uri, params, remote_session) # Get the image path image_source = vm.get_first_disk_devices()['source'] image_info = utils_misc.get_image_info(image_source) if image_info["format"] == disk_format: disk_cmd = "cp -f %s %s" % (image_source, gluster_img) else: # Convert the disk format disk_cmd = ("qemu-img convert -f %s -O %s %s %s" % (image_info["format"], disk_format, image_source, gluster_img)) process.run("%s; chmod a+rw %s" % (disk_cmd, gluster_mount_dir), shell=True) logging.debug("Gluster Image is %s", gluster_img) gluster_backend_disk = {'disk_source_name': gluster_img} # Update disk xml with gluster image in backend dir libvirt.set_vm_disk(vm, gluster_backend_disk) remote_session.close() vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip() logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt) vm.wait_for_login().close() migrate_test.ping_vm(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) vms = [vm] migrate_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, **extra_args) migrate_test.ping_vm(vm, params, dest_uri) if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migrate_test.migrate_pre_setup(src_uri, params) remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)} remote_libvirt_file = libvirt_config\ .remove_key_for_modular_daemon(remove_dict, remote_dargs) cmd = "virsh migrate %s %s %s %s" % (vm_name, options, virsh_options, src_uri) logging.debug("Start migrating: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.info("Recover test environment") migrate_test.cleanup_vm(vm, dest_uri) orig_config_xml.sync() if src_libvirt_file: src_libvirt_file.restore() if remote_libvirt_file: del remote_libvirt_file # Clean up of pre migration setup for local machine if migrate_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migrate_test.migrate_pre_setup(src_uri, params, cleanup=True) # Cleanup selinu configuration if seLinuxBool: seLinuxBool.cleanup() if seLinuxfusefs: seLinuxfusefs.cleanup() # Disable ports 24007 and 49152:49216 if not host_ip: logging.debug("Disable 24007 and 49152:49216 in Firewall") migrate_test.migrate_pre_setup(src_uri, params, cleanup=True, ports="24007") migrate_test.migrate_pre_setup(src_uri, params, cleanup=True) gluster.setup_or_cleanup_gluster(False, **params) # Cleanup backend directory/symlink if gluster_mount_dir and gluster_uri: remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") create_or_clean_backend_dir(gluster_uri, params, is_clean=True) create_or_clean_backend_dir(gluster_uri, params, remote_session, True) if remove_pkg: pkg_mgr = utils_package.package_manager( remote_session, pkg_name) if pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be uninstalled") if not pkg_mgr.remove(): logging.error("Package '%s' un-installation fails", pkg_name) remote_session.close()
def run(test, params, env): """ Test migration with special network settings 1) migrate guest with bridge type interface connected to ovs bridge :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def check_vm_network_accessed(ping_dest, session=None): """ The operations to the VM need to be done before or after migration happens :param ping_dest: The destination to be ping :param session: The session object to the host :raise: test.fail when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") status, output = utils_net.ping(ping_dest, count=10, timeout=20, output_func=logging.debug, session=session) if status != 0: test.fail("Ping failed, status: %s, output: %s" % (status, output)) def update_iface_xml(vm_name, iface_dict): """ Update interfaces for guest :param vm_name: The name of VM :param iface_dict: The interface configurations params """ logging.debug("update iface xml") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml.remove_all_device_by_type('interface') vmxml.sync() iface = interface.Interface('network') iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict) libvirt.add_vm_device(vmxml, iface) migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") virsh_options = params.get("virsh_options", "") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options", "--live --p2p --verbose") func_params_exists = "yes" == params.get("func_params_exists", "no") migr_vm_back = "yes" == params.get("migr_vm_back", "no") ovs_bridge_name = params.get("ovs_bridge_name") network_dict = eval(params.get("network_dict", '{}')) iface_dict = eval(params.get("iface_dict", '{}')) remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } func_name = None libvirtd_conf = None mig_result = None # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() extra_args = {} if func_params_exists: extra_args.update({'func_params': params}) postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) func_name = virsh.migrate_postcopy # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') if ovs_bridge_name: status, stdout = utils_net.create_ovs_bridge(ovs_bridge_name) if status: test.fail("Failed to create ovs bridge on local. Status: %s" "Stdout: %s" % (status, stdout)) status, stdout = utils_net.create_ovs_bridge( ovs_bridge_name, session=remote_session) if status: test.fail("Failed to create ovs bridge on remote. Status: %s" "Stdout: %s" % (status, stdout)) if network_dict: libvirt_network.create_or_del_network( network_dict, remote_args=remote_virsh_dargs) libvirt_network.create_or_del_network(network_dict) remote_session.close() # Change domain network xml if iface_dict: if "mac" not in iface_dict: mac = utils_net.generate_mac_address_simple() iface_dict.update({'mac': mac}) else: mac = iface_dict["mac"] update_iface_xml(vm_name, iface_dict) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): try: vm.start() except virt_vm.VMStartError as err: test.fail("Failed to start VM: %s" % err) logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() vm_session = vm.wait_for_serial_login(timeout=240) utils_net.restart_guest_network(vm_session) vm_ip = utils_net.get_guest_ip_addr(vm_session, mac) logging.debug("VM IP Addr: %s", vm_ip) check_vm_network_accessed(vm_ip) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=func_name, extra_opts=extra, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if int(mig_result.exit_status) == 0: remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') check_vm_network_accessed(vm_ip, session=remote_session) remote_session.close() # Execute migration from remote if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) logging.debug("VM is migrated back.") check_vm_network_accessed(vm_ip) finally: logging.debug("Recover test environment") # Clean VM on destination and source try: migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri) except Exception as err: logging.error(err) if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Recovery VM XML configration") orig_config_xml.sync() remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') if network_dict: libvirt_network.create_or_del_network( network_dict, is_del=True, remote_args=remote_virsh_dargs) libvirt_network.create_or_del_network(network_dict, is_del=True) if ovs_bridge_name: utils_net.delete_ovs_bridge(ovs_bridge_name) utils_net.delete_ovs_bridge(ovs_bridge_name, session=remote_session) remote_session.close() if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) logging.info("Remove local NFS image") source_file = params.get("source_file") if source_file: libvirt.delete_local_disk("file", path=source_file)