def run(test, params, env): """ qemu should be terminated when launching an L1 guest with "cap-nested-hv=on,max-cpu-compat=power8". 1) Launch a guest with "cap-nested-hv=on,max-cpu-compat=power8". 2) Check whether qemu terminates. 3) Check whether the qemu output is as expected. :param test: Qemu test object. :param params: the test params. :param env: test environment. """ params['start_vm'] = 'yes' error_msg = params['error_msg'] vm = env.get_vm(params['main_vm']) error_context.base_context('Try to create a qemu instance...', logging.info) try: vm.create(params=params) except VMCreateError as e: if not re.search(error_msg, e.output): logging.error(e.output) test.error('The error message could not be searched at qemu ' 'outputs.') logging.info('qemu terminated with the expected error message.') else: test.fail('The qemu instance should not be launched with ' '"cap-nested-hv=on" and "max-cpu-compat=power8".')
def run(test, params, env): """ vCPU offline/online test: 1) Launch a guest with many CPU. 2) Offline all CPUs except 0. 3) Online them again. """ host_cpu = cpu.online_count() cpu_range = range(host_cpu) cpu_list = "{}-{}".format(cpu_range[1], cpu_range[-1]) params["smp"] = params["vcpu_maxcpus"] = host_cpu params["start_vm"] = "yes" vm = env.get_vm(params['main_vm']) vm.create(params=params) vm.verify_alive() session = vm.wait_for_login() error_context.base_context("Offline CPUs: {}".format(cpu_list), logging.info) session.cmd("chcpu -d {}".format(cpu_list), timeout=len(cpu_range)) if not check_if_vm_vcpu_match(1, vm): test.fail("CPU quantity on guest mismatch after offline") logging.info("{} have been offline.".format(cpu_list)) error_context.context("Online CPUs: {}".format(cpu_list), logging.info) session.cmd("chcpu -e {}".format(cpu_list), timeout=len(cpu_range)) if not check_if_vm_vcpu_match(host_cpu, vm): test.fail("CPU quantity on guest mismatch after online again") logging.info("{} have been online.".format(cpu_list))
def run(test, params, env): """ KVM -no-shutdown flag test: 1. Boot a guest, with -no-shutdown flag on command line 2. Run 'system_powerdown' command in monitor 3. Wait for guest OS to shutdown down and issue power off to the VM 4. Run 'system_reset' qemu monitor command 5. Run 'cont' qemu monitor command 6. Wait for guest OS to boot up 7. Repeat step 2-6 for 5 times. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ timeout = int(params.get("login_timeout", 360)) repeat_times = int(params.get("repeat_times", 5)) error_context.base_context("Qemu -no-shutdown test") vm = env.get_vm(params["main_vm"]) vm.verify_alive() qemu_process_id = vm.get_pid() session = vm.wait_for_login(timeout=timeout) logging.info("The guest bootup successfully.") for i in range(repeat_times): error_context.context( "Round %s : Send monitor cmd system_powerdown." % str(i + 1), logging.info) # Send a system_powerdown monitor command vm.monitor.system_powerdown() # Wait for the session to become unresponsive and close it if not utils_misc.wait_for(lambda: not session.is_responsive(), timeout, 0, 1): test.fail("Oops, Guest refuses to go down!") if session: session.close() # Check the qemu id is not change if not utils_misc.wait_for(lambda: vm.is_alive(), 5, 0, 1): test.fail("VM not responsive after system_powerdown " "with -no-shutdown!") if vm.get_pid() != qemu_process_id: test.fail("Qemu pid changed after system_powerdown!") logging.info("Round %s -> System_powerdown successfully.", str(i + 1)) # Send monitor command system_reset and cont error_context.context( "Round %s : Send monitor command system_reset " "and cont." % str(i + 1), logging.info) vm.monitor.cmd("system_reset") vm.resume() session = vm.wait_for_login(timeout=timeout) logging.info("Round %s -> Guest is up successfully." % str(i + 1)) if vm.get_pid() != qemu_process_id: test.fail("Qemu pid changed after system_reset & cont!") if session: session.close()
def run(test, params, env): """ Base test for vnc, mainly focus on handshaking during vnc connection setup. This case check following point: 1) VNC server support different rfb protocol version. Now it is 3.3, 3.7 and 3.8. 2) Connection could be setup with password enable. 3) Change and __com.redhat_set_password monitor command could work. This case will do following step: 1) Start VM with VNC password enable. 2) Handshaking after vnc password set by change. 3) Handshaking after vnc password set by __com.redhat_set_password. 4) Handshaking again after vnc password timeout. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environmen. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() port = vm.get_vnc_port() default_cmd = "__com.redhat_set_password protocol=vnc," default_cmd += "password=%s,expiration=%s" change_passwd_cmd = params.get("change_passwd_cmd", default_cmd) rfb_version_list = params.get("rfb_version").strip().split() for rfb_version in rfb_version_list: error_context.base_context("Test with guest RFB version %s" % rfb_version) rand = random.SystemRandom() rand.seed() password = utils_misc.generate_random_string(rand.randint(1, 8)) logging.info("Set VNC password to: %s", password) timeout = rand.randint(10, 100) logging.info("VNC password timeout is: %s", timeout) vm.monitor.send_args_cmd(change_passwd_cmd % (password, timeout)) error_context.context("Connect to VNC server after setting password" " to '%s'" % password) vnc = VNC(port=port, rfb_version=rfb_version) status = vnc.hand_shake(password) vnc.initialize() vnc.close() if not status: test.fail("VNC Authentication failed.") logging.info("VNC Authentication pass") logging.info("Waiting for vnc password timeout.") time.sleep(timeout + 5) error_context.context("Connect to VNC server after password expires") vnc = VNC(port=port, rfb_version=rfb_version) status = vnc.hand_shake(password) vnc.close() if status: # Should not handshake succeffully. test.fail("VNC connected with Timeout password, The" " cmd of setting expire time doesn't work.")
def run(test, params, env): """ KVM -no-shutdown flag test: 1. Boot a guest, with -no-shutdown flag on command line 2. Run 'system_powerdown' command in monitor 3. Wait for guest OS to shutdown down and issue power off to the VM 4. Run 'system_reset' qemu monitor command 5. Run 'cont' qemu monitor command 6. Wait for guest OS to boot up 7. Repeat step 2-6 for 5 times. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ timeout = int(params.get("login_timeout", 360)) repeat_times = int(params.get("repeat_times", 5)) error_context.base_context("Qemu -no-shutdown test") vm = env.get_vm(params["main_vm"]) vm.verify_alive() qemu_process_id = vm.get_pid() session = vm.wait_for_login(timeout=timeout) logging.info("The guest bootup successfully.") for i in range(repeat_times): error_context.context("Round %s : Send monitor cmd system_powerdown." % str(i + 1), logging.info) # Send a system_powerdown monitor command vm.monitor.system_powerdown() # Wait for the session to become unresponsive and close it if not utils_misc.wait_for(lambda: not session.is_responsive(), timeout, 0, 1): test.fail("Oops, Guest refuses to go down!") if session: session.close() # Check the qemu id is not change if not utils_misc.wait_for(lambda: vm.is_alive(), 5, 0, 1): test.fail("VM not responsive after system_powerdown " "with -no-shutdown!") if vm.get_pid() != qemu_process_id: test.fail("Qemu pid changed after system_powerdown!") logging.info("Round %s -> System_powerdown successfully.", str(i + 1)) # Send monitor command system_reset and cont error_context.context("Round %s : Send monitor command system_reset " "and cont." % str(i + 1), logging.info) vm.monitor.cmd("system_reset") vm.resume() session = vm.wait_for_login(timeout=timeout) logging.info("Round %s -> Guest is up successfully." % str(i + 1)) if vm.get_pid() != qemu_process_id: test.fail("Qemu pid changed after system_reset & cont!") if session: session.close()
def run(test, params, env): """ Boots VMs until one of them becomes unresponsive, and records the maximum number of VMs successfully started: 1) boot the first vm 2) boot the second vm cloned from the first vm, check whether it boots up and all booted vms respond to shell commands 3) go on until cannot create VM anymore or cannot allocate memory for VM :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ error_context.base_context("waiting for the first guest to be up", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=login_timeout) num = 2 sessions = [session] # Boot the VMs try: try: while num <= int(params.get("max_vms")): # Clone vm according to the first one error_context.base_context("booting guest #%d" % num, logging.info) vm_name = "vm%d" % num vm_params = vm.params.copy() curr_vm = vm.clone(vm_name, vm_params) env.register_vm(vm_name, curr_vm) env_process.preprocess_vm(test, vm_params, env, vm_name) params["vms"] += " " + vm_name session = curr_vm.wait_for_login(timeout=login_timeout) sessions.append(session) logging.info("Guest #%d booted up successfully", num) # Check whether all previous shell sessions are responsive for i, se in enumerate(sessions): error_context.context( "checking responsiveness of guest" " #%d" % (i + 1), logging.debug) se.cmd(params.get("alive_test_cmd")) num += 1 except Exception as emsg: test.fail("Expect to boot up %s guests." "Failed to boot up #%d guest with " "error: %s." % (params["max_vms"], num, emsg)) finally: for se in sessions: se.close() logging.info("Total number booted: %d", (num - 1))
def run(test, params, env): """ Boots VMs until one of them becomes unresponsive, and records the maximum number of VMs successfully started: 1) boot the first vm 2) boot the second vm cloned from the first vm, check whether it boots up and all booted vms respond to shell commands 3) go on until cannot create VM anymore or cannot allocate memory for VM :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ error_context.base_context("waiting for the first guest to be up", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=login_timeout) num = 2 sessions = [session] # Boot the VMs try: try: while num <= int(params.get("max_vms")): # Clone vm according to the first one error_context.base_context("booting guest #%d" % num, logging.info) vm_name = "vm%d" % num vm_params = vm.params.copy() curr_vm = vm.clone(vm_name, vm_params) env.register_vm(vm_name, curr_vm) env_process.preprocess_vm(test, vm_params, env, vm_name) params["vms"] += " " + vm_name session = curr_vm.wait_for_login(timeout=login_timeout) sessions.append(session) logging.info("Guest #%d booted up successfully", num) # Check whether all previous shell sessions are responsive for i, se in enumerate(sessions): error_context.context("checking responsiveness of guest" " #%d" % (i + 1), logging.debug) se.cmd(params.get("alive_test_cmd")) num += 1 except Exception as emsg: test.fail("Expect to boot up %s guests." "Failed to boot up #%d guest with " "error: %s." % (params["max_vms"], num, emsg)) finally: for se in sessions: se.close() logging.info("Total number booted: %d" % (num - 1))
def run(test, params, env): """ Qemu numa basic test: 1) Get host numa topological structure 2) Start a guest and bind it on the cpus of one node 3) Check the memory status of qemu process. It should mainly use the memory in the same node. 4) Destroy the guest 5) Repeat step 2 ~ 4 on every node in host :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ error_context.context("Get host numa topological structure", logging.info) timeout = float(params.get("login_timeout", 240)) host_numa_node = utils_misc.NumaInfo() node_list = host_numa_node.online_nodes for node_id in node_list: error_context.base_context("Bind qemu process to numa node %s" % node_id, logging.info) vm = "vm_bind_to_%s" % node_id params['qemu_command_prefix'] = "numactl --cpunodebind=%s" % node_id utils_memory.drop_caches() node_MemFree = int(host_numa_node.read_from_node_meminfo(node_id, "MemFree")) if node_MemFree < int(params["mem"]) * 1024: test.cancel("No enough free memory in node %d." % node_id) env_process.preprocess_vm(test, params, env, vm) vm = env.get_vm(vm) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) session.close() error_context.context("Check the memory use status of qemu process", logging.info) memory_status, _ = utils_test.qemu.get_numa_status(host_numa_node, vm.get_pid()) node_used_most = 0 memory_sz_used_most = 0 for index in range(len(node_list)): if memory_sz_used_most < memory_status[index]: memory_sz_used_most = memory_status[index] node_used_most = node_list[index] logging.debug("Qemu used %s pages in node" " %s" % (memory_status[index], node_list[index])) if node_used_most != node_id: test.fail("Qemu still use memory from other node. " "Expect: %s, used: %s" % (node_id, node_used_most)) error_context.context("Destroy guest.", logging.info) vm.destroy()
def run(test, params, env): """ KVM shutdown test: For a test with two VMs: client & guest 1) Log into the VMS(guests) that represent the client &guest 2) Send a shutdown command to the guest, or issue a system_powerdown monitor command (depending on the value of shutdown_method) 3) Wait until the guest is down :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ client_vm = env.get_vm(params["client_vm"]) client_vm.verify_alive() guest_vm = env.get_vm(params["guest_vm"]) guest_vm.verify_alive() timeout = int(params.get("login_timeout", 360)) # shutdown both of the sessions for vm in [client_vm, guest_vm]: vm_session = vm.wait_for_login(timeout=timeout, username="******", password="******") try: error_context.base_context("shutting down the VM") if params.get("shutdown_method") == "shell": # Send a shutdown command to the guest's shell vm_session.sendline(vm.get_params().get("shutdown_command")) error_context.context("waiting VM to go down " "(shutdown shell cmd)") elif params.get("shutdown_method") == "system_powerdown": # Sleep for a while -- give the guest a chance to finish # booting time.sleep(float(params.get("sleep_before_powerdown", 10))) # Send a system_powerdown monitor command vm.monitor.system_powerdown() error_context.context("waiting VM to go down " "(system_powerdown monitor cmd)") if not utils_misc.wait_for(vm.is_dead, 240, 0, 1): vm.destroy(gracefully=False, free_mac_addresses=True) test.fail("Guest refuses to go down") finally: vm_session.close()
def run(test, params, env): """ Check the interrupt controller mode. 1) Launch a guest with kernel-irqchip=on/off and ic-mode=xics/xive. 2) Get pic info from human monitor and get interrupts info inside guest. 3) Check whether irqchip and ic-mode match what we set. :param test: the test object. :param params: the test params. :param env: test environment. """ ic_mode = params["ic_mode"] kernel_irqchip = params["kernel_irqchip"] params["start_vm"] = "yes" vm = env.get_vm(params["main_vm"]) error_context.base_context("Try to create a qemu instance...", logging.info) try: vm.create(params=params) except VMCreateError as e: if re.search( r"kernel_irqchip requested but unavailable|" r"XIVE-only machines", e.output): test.cancel(e.output) raise else: vm.verify_alive() session = vm.wait_for_login() error_context.context("Get irqchip and ic-mode information.", logging.info) pic_o = vm.monitor.info("pic") irqchip_match = re.search(r"^irqchip: %s" % kernel_irqchip, pic_o, re.M) ic_mode_match = session.cmd_status("grep %s /proc/interrupts" % ic_mode.upper()) == 0 error_context.context("Check wherever irqchip/ic-mode match.", logging.info) if not irqchip_match: test.fail("irqchip does not match to '%s'." % kernel_irqchip) elif not ic_mode_match: test.fail("ic-mode does not match to '%s'." % ic_mode)
def run(test, params, env): def compile_kernel_selftests(): git_cmd = 'git clone --depth=1 {} {} 2>/dev/null'.format(git_repo, dst_dir) if os.path.exists(dst_dir): shutil.rmtree(dst_dir) process.run(git_cmd, timeout=360, shell=True) s, o = process.getstatusoutput(compile_cmd, timeout=180) if s: logging.error('Compile output: %s', o) test.error('Failed to compile the test suite.') dst_dir = params['dst_dir'] git_repo = params['git_repo'] compile_cmd = params['compile_cmd'] execute_suite_cmd = params['execute_suite_cmd'] required_pkgs = params.objects('required_pkgs') suite_timeout = params.get_numeric('suite_timeout') if not utils_package.package_install(required_pkgs): test.error("Failed to install required packages in host") error_context.base_context('Check if the CPU of host supports SVE', logging.info) cpu_utils.check_cpu_flags(params, 'sve', test) try: compile_kernel_selftests() s, o = process.getstatusoutput(execute_suite_cmd, timeout=suite_timeout) if s: test.fail('The exit code of "get-reg-list" test suite is not 0.') elif not all([result == "PASS" for result in re.findall(r'^sve\S*: (\w+)$', o, re.M)]): logging.error('Test result: %s', o) test.fail('The sve part of the "get-reg-list" test failed') logging.info('get-reg-list test passed') finally: shutil.rmtree(dst_dir, ignore_errors=True)
def run(test, params, env): """ Save VM while it's running, and then load it again. 1) Launch a VM. 2) Save VM via human monitor while VM is running. (unsafe) 3) Check if it exists in snapshots. 4) Load VM via human monitor. 5) Verify kernel and dmesg. 6) Delete snapshot after testing. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ snapshot_tag = "vm_" + utils_misc.generate_random_string(8) os_type = params["os_type"] vm = env.get_vm(params["main_vm"]) vm.verify_alive() vm.wait_for_login().close() try: error_context.base_context("Saving VM to %s" % snapshot_tag, logging.info) vm.monitor.human_monitor_cmd("savevm %s" % snapshot_tag) vm_snapshots = vm.monitor.info("snapshots") if snapshot_tag not in vm_snapshots: test.fail("Failed to save VM to %s" % snapshot_tag) error_context.context("Loading VM from %s" % snapshot_tag, logging.info) vm.monitor.human_monitor_cmd("loadvm %s" % snapshot_tag) if os_type == "linux": vm.verify_kernel_crash() vm.verify_dmesg() finally: if snapshot_tag in vm.monitor.info("snapshots"): vm.monitor.human_monitor_cmd("delvm %s" % snapshot_tag)
def run(test, params, env): """ Check the CPU model and MMU mode of host and guest are matched. 1) Launch a guest. 2) Get CPU information both of host and guest. 3) Assert that CPU model and MMU node are matched. :param test: the test object. :param params: the test params. :param env: test environment. """ def get_cpu_mmu(session=None): cmd_func = session.cmd if session else process.getoutput cpu_info = cmd_func("tail -n 11 /proc/cpuinfo") cpu_info = re.findall(r"(?:cpu\s+:\s+(\w+\d+)).*(?:MMU\s+:\s+(\w+))", cpu_info, re.S) if cpu_info: return cpu_info[0] test.error("Unable to get the CPU information of this system.") vm = env.get_vm(params["main_vm"]) vm.verify_alive() guest_session = vm.wait_for_login() error_context.base_context("Get CPU information of host and guest.", logging.info) host_cpu_model, host_mmu_mode = get_cpu_mmu() guest_cpu_model, guest_mmu_mode = get_cpu_mmu(guest_session) error_context.context("Assert CPU model and MMU mode of host and guest.", logging.info) assert (guest_cpu_model == host_cpu_model), ("The CPU model of the host " "and guest do not match") assert (guest_mmu_mode == host_mmu_mode), ("The MMU mode of the host and " "guest do not match") logging.info("CPU model and MMU mode of host and guest are matched.")
def run(test, params, env): """ KVM migration test: 1) Start a guest. 2) Start netperf server in guest. 3) Start multi netperf clients in host. 4) Migrate the guest in local during netperf clients working. 5) Repeatedly migrate VM and wait until netperf clients stopped. :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ login_timeout = int(params.get("login_timeout", 360)) mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 netperf_timeout = int(params.get("netperf_timeout", "300")) client_num = int(params.get("client_num", "100")) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) guest_address = vm.get_address() host_address = utils_net.get_host_ip_address(params) remote_ip = params.get("remote_host", host_address) netperf_link = utils_misc.get_path(data_dir.get_deps_dir("netperf"), params.get("netperf_link")) md5sum = params.get("pkg_md5sum") netperf_server_link = params.get("netperf_server_link_win") if netperf_server_link: netperf_server_link = utils_misc.get_path(data_dir.get_deps_dir("netperf"), netperf_server_link) server_md5sum_win = params.get("server_md5sum") netperf_client_link = params.get("netperf_client_link_win", netperf_link) client_md5sum_win = params.get("client_md5sum", md5sum) netperf_client_link = utils_misc.get_path(data_dir.get_deps_dir("netperf"), netperf_client_link) server_path = params.get("server_path", "/var/tmp/") client_path = params.get("client_path", "/var/tmp/") server_path_win = params.get("server_path_win") client_path_win = params.get("client_path_win") username = params.get("username", "root") password = params.get("password", "redhat") passwd = params.get("hostpassword", "redhat") client = params.get("shell_client", "ssh") port = params.get("shell_port", "22") prompt = params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#") linesep = params.get( "shell_linesep", "\n").encode().decode('unicode_escape') status_test_command = params.get("status_test_command", "echo $?") compile_option_client_h = params.get("compile_option_client_h", "") compile_option_server_h = params.get("compile_option_server_h", "") compile_option_client_g = params.get("compile_option_client_g", "") compile_option_server_g = params.get("compile_option_server_g", "") if params.get("os_type") == "linux": session.cmd("iptables -F", ignore_all_errors=True) g_client_link = netperf_link g_server_link = netperf_link g_server_path = server_path g_client_path = client_path g_server_md5sum = md5sum g_client_md5sum = md5sum elif params.get("os_type") == "windows": g_client_link = netperf_client_link g_server_link = netperf_server_link g_server_path = server_path_win g_client_path = client_path_win g_server_md5sum = server_md5sum_win g_client_md5sum = client_md5sum_win netperf_client_g = None netperf_client_h = None netperf_server_g = None netperf_server_h = None try: netperf_client_g = utils_netperf.NetperfClient(guest_address, g_client_path, g_client_md5sum, g_client_link, client=client, port=port, username=username, password=password, prompt=prompt, linesep=linesep, status_test_command=status_test_command, compile_option=compile_option_client_g) netperf_server_h = utils_netperf.NetperfServer(remote_ip, server_path, md5sum, netperf_link, password=passwd, prompt=prompt, linesep=linesep, status_test_command=status_test_command, install=False, compile_option=compile_option_server_h) netperf_client_h = utils_netperf.NetperfClient(remote_ip, client_path, md5sum, netperf_link, password=passwd, prompt=prompt, linesep=linesep, status_test_command=status_test_command, compile_option=compile_option_client_h) netperf_server_g = utils_netperf.NetperfServer(guest_address, g_server_path, g_server_md5sum, g_server_link, client=client, port=port, username=username, password=password, prompt=prompt, linesep=linesep, status_test_command=status_test_command, compile_option=compile_option_server_g) error_context.base_context("Run netperf test between host and guest") error_context.context("Start netserver in guest.", logging.info) netperf_server_g.start() if netperf_server_h: error_context.context("Start netserver in host.", logging.info) netperf_server_h.start() error_context.context("Start Netperf in host", logging.info) test_option = "-l %s" % netperf_timeout netperf_client_h.bg_start(guest_address, test_option, client_num) if netperf_client_g: error_context.context("Start Netperf in guest", logging.info) netperf_client_g.bg_start(host_address, test_option, client_num) m_count = 0 while netperf_client_h.is_netperf_running(): m_count += 1 error_context.context("Start migration iterations: %s " % m_count, logging.info) vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, env=env) finally: if netperf_server_g: if netperf_server_g.is_server_running(): netperf_server_g.stop() netperf_server_g.package.env_cleanup(True) if netperf_server_h: if netperf_server_h.is_server_running(): netperf_server_h.stop() netperf_server_h.package.env_cleanup(True) if netperf_client_h: if netperf_client_h.is_netperf_running(): netperf_client_h.stop() netperf_client_h.package.env_cleanup(True) if netperf_client_g: if netperf_client_g.is_netperf_running(): netperf_client_g.stop() netperf_client_g.package.env_cleanup(True) if session: session.close()
def run(test, params, env): """ QEMU flow caches stress test test 1) Make sure nf_conntrack is disabled in host and guest. If nf_conntrack is enabled in host, skip this case. 2) Boot guest with vhost=on/off. 3) Enable multi queues support in guest (optional). 4) After installation of netperf, run netserver in host. 5) Run netperf TCP_CRR protocal test in guest. 6) Transfer file between guest and host. 7) Check the md5 of copied file. This is a sample QEMU test, so people can get used to some of the test APIs. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ msg = "Make sure nf_conntrack is disabled in host and guest." error_context.context(msg, logging.info) if "nf_conntrack" in process.system_output("lsmod"): err = "nf_conntrack load in host, skip this case" test.cancel(err) params["start_vm"] = "yes" error_context.context("Boot up guest", logging.info) env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) if "nf_conntrack" in session.cmd_output("lsmod"): msg = "Unload nf_conntrack module in guest." error_context.context(msg, logging.info) black_str = "#disable nf_conntrack\\nblacklist nf_conntrack\\n" \ "blacklist nf_conntrack_ipv6\\nblacklist xt_conntrack\\n" \ "blacklist nf_conntrack_ftp\\nblacklist xt_state\\n" \ "blacklist iptable_nat\\nblacklist ipt_REDIRECT\\n" \ "blacklist nf_nat\\nblacklist nf_conntrack_ipv4" cmd = "echo -e '%s' >> /etc/modprobe.d/blacklist.conf" % black_str session.cmd(cmd) session = vm.reboot(session, timeout=timeout) if "nf_conntrack" in session.cmd_output("lsmod"): err = "Fail to unload nf_conntrack module in guest." test.error(err) netperf_link = utils_misc.get_path(data_dir.get_deps_dir("netperf"), params["netperf_link"]) md5sum = params.get("pkg_md5sum") win_netperf_link = params.get("win_netperf_link") if win_netperf_link: win_netperf_link = utils_misc.get_path(data_dir.get_deps_dir("netperf"), win_netperf_link) win_netperf_md5sum = params.get("win_netperf_md5sum") server_path = params.get("server_path", "/var/tmp/") client_path = params.get("client_path", "/var/tmp/") win_netperf_path = params.get("win_netperf_path", "c:\\") client_num = params.get("netperf_client_num", 520) netperf_timeout = int(params.get("netperf_timeout", 600)) netperf_client_ip = vm.get_address() host_ip = utils_net.get_host_ip_address(params) netperf_server_ip = params.get("netperf_server_ip", host_ip) username = params.get("username", "root") password = params.get("password", "123456") passwd = params.get("hostpasswd", "123456") client = params.get("shell_client", "ssh") port = params.get("shell_port", "22") prompt = params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#") linesep = params.get( "shell_linesep", "\n").encode().decode('unicode_escape') status_test_command = params.get("status_test_command", "echo $?") compile_option_client = params.get("compile_option_client", "") compile_option_server = params.get("compile_option_server", "") if int(params.get("queues", 1)) > 1 and params.get("os_type") == "linux": error_context.context("Enable multi queues support in guest.", logging.info) guest_mac = vm.get_mac_address() ifname = utils_net.get_linux_ifname(session, guest_mac) cmd = "ethtool -L %s combined %s" % (ifname, params.get("queues")) status, out = session.cmd_status_output(cmd) msg = "Fail to enable multi queues support in guest." msg += "Command %s fail output: %s" % (cmd, out) test.error(msg) if params.get("os_type") == "linux": session.cmd("iptables -F", ignore_all_errors=True) g_client_link = netperf_link g_client_path = client_path g_md5sum = md5sum elif params.get("os_type") == "windows": g_client_link = win_netperf_link g_client_path = win_netperf_path g_md5sum = win_netperf_md5sum error_context.context("Setup netperf in guest and host", logging.info) netperf_client = utils_netperf.NetperfClient(netperf_client_ip, g_client_path, g_md5sum, g_client_link, username=username, password=password, prompt=prompt, linesep=linesep, status_test_command=status_test_command, compile_option=compile_option_client) netperf_server = utils_netperf.NetperfServer(netperf_server_ip, server_path, md5sum, netperf_link, client, port, password=passwd, prompt=prompt, linesep=linesep, status_test_command=status_test_command, compile_option=compile_option_server) try: error_context.base_context("Run netperf test between host and guest.") error_context.context("Start netserver in host.", logging.info) netperf_server.start() error_context.context("Start Netperf in guest for %ss." % netperf_timeout, logging.info) test_option = "-t TCP_CRR -l %s -- -b 10 -D" % netperf_timeout netperf_client.bg_start(netperf_server_ip, test_option, client_num) utils_misc.wait_for(lambda: not netperf_client.is_netperf_running(), timeout=netperf_timeout, first=590, step=2) utils_test.run_file_transfer(test, params, env) finally: netperf_server.stop() netperf_client.package.env_cleanup(True) if session: session.close()
def run(test, params, env): """ Test hotplug vcpu devices with specified numa nodes. 1) Boot up guest without vcpu device and with multi numa nodes. 2) Hotplug vcpu devices and check successfully or not. (qemu side) 3) Check if the number of CPUs in guest changes accordingly. (guest side) 4) Check numa info in guest 5) Hotunplug vcpu devices 6) Recheck the numa info in guest :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def assign_numa_cpus(nodes, count): """Average allocation of cpu to each node.""" cpus = list(map(str, range(maxcpus))) avg_count = maxcpus / float(len(nodes)) if avg_count % count != 0: avg_count = round(avg_count / count) * count numa_cpus_list = [] last = 0.0 while last < maxcpus: numa_cpus_list.append(cpus[int(last):int(last + avg_count)]) last += avg_count return dict(zip(nodes, numa_cpus_list)) def get_guest_numa_cpus_info(): """Get guest numa information via numactl""" # Skip this step on windows guest if os_type == "windows": return numa_out = session.cmd_output("numactl -H | grep cpus") numa_cpus_info = re.findall(r"^node (\d+) cpus:([\d| ]*)$", numa_out, re.M) return dict(map(lambda x: (x[0], x[1].split()), numa_cpus_info)) os_type = params["os_type"] machine = params["machine_type"] login_timeout = params.get_numeric("login_timeout", 360) vm = env.get_vm(params["main_vm"]) maxcpus = vm.cpuinfo.maxcpus alignment = vm.cpuinfo.threads if machine.startswith("pseries") else 1 if not params.objects("vcpu_devices"): vcpus_count = (vm.cpuinfo.threads if machine.startswith("pseries") else 1) pluggable_cpus = vm.cpuinfo.maxcpus // vcpus_count // 2 params["vcpu_devices"] = " ".join( ["vcpu%d" % (count + 1) for count in range(pluggable_cpus)]) vm.destroy() if len(params.objects("vcpu_devices")) < 2: test.cancel("Insufficient maxcpus for multi-CPU hotplug") params["paused_after_start_vm"] = "no" error_context.base_context("Define the cpu list for each numa node", logging.info) numa_nodes = params.objects("guest_numa_nodes") node_ids = [params["numa_nodeid_%s" % node] for node in numa_nodes] node_cpus_mapping = assign_numa_cpus(node_ids, alignment) for node in numa_nodes: params["numa_cpus_%s" % node] = ",".join( node_cpus_mapping[params["numa_nodeid_%s" % node]]) error_context.context("Launch the guest with our assigned numa node", logging.info) vcpu_devices = params.objects("vcpu_devices") vm.create(params=params) if vm.is_paused(): vm.resume() session = vm.wait_for_login(timeout=login_timeout) if params.get_boolean("workaround_need"): win_wora.modify_driver(params, session) error_context.context("Check the number of guest CPUs after startup", logging.info) if not cpu_utils.check_if_vm_vcpus_match_qemu(vm): test.error("The number of guest CPUs is not equal to the qemu command " "line configuration") if os_type == "linux" and not utils_package.package_install( "numactl", session): test.cancel("Please install numactl to proceed") numa_before_plug = get_guest_numa_cpus_info() for vcpu_dev in vcpu_devices: error_context.context("hotplug vcpu device: %s" % vcpu_dev, logging.info) vm.hotplug_vcpu_device(vcpu_dev) if not utils_misc.wait_for( lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), 10): test.fail("Actual number of guest CPUs is not equal to expected") if os_type == "linux": error_context.context("Check the CPU information of each numa node", logging.info) guest_numa_cpus = get_guest_numa_cpus_info() for node_id, node_cpus in node_cpus_mapping.items(): try: if guest_numa_cpus[node_id] != node_cpus: logging.debug("Current guest numa info:\n%s", session.cmd_output("numactl -H")) test.fail("The cpu obtained by guest is inconsistent with " "we assigned.") except KeyError: test.error("Could not find node %s in guest." % node_id) logging.info("Number of each CPU in guest matches what we assign.") for vcpu_dev in vcpu_devices[::-1]: error_context.context("hotunplug vcpu device: %s" % vcpu_dev, logging.info) vm.hotunplug_vcpu_device(vcpu_dev) if not utils_misc.wait_for( lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), 10): test.fail("Actual number of guest CPUs is not equal to expected") if get_guest_numa_cpus_info() != numa_before_plug: logging.debug("Current guest numa info:\n%s", session.cmd_output("numactl -H")) test.fail("Numa info of guest is incorrect after vcpu hotunplug.")
def run(test, params, env): """ Test 802.1Q vlan of NIC. For Linux guest: 1) Create two VMs. 2) load 8021q module in guest. 3) Setup vlans by ip in guest and using hard-coded ip address. 4) Enable arp_ignore for all ipv4 device in guest. 5) Repeat steps 2 - 4 in every guest. 6) Test by ping between same and different vlans of two VMs. 7) Test by flood ping between same vlan of two VMs. 8) Test by TCP data transfer between same vlan of two VMs. 9) Remove the named vlan-device. 10) Test maximal plumb/unplumb vlans. For Windows guest: 1) Create two VMs. 2) Set vlan tag in every guest and guest will get subnet ip(169.254) automatically. 3) Test by ping between same vlan of two VMs. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def add_vlan(test, session, v_id, iface="eth0", cmd_type="ip"): """ Creates a vlan-device on iface by cmd that assigned by cmd_type now only support 'ip' and 'vconfig' """ vlan_if = '%s.%s' % (iface, v_id) txt = "Create vlan interface '%s' on %s" % (vlan_if, iface) error_context.context(txt, logging.info) if cmd_type == "vconfig": cmd = "vconfig add %s %s" % (iface, v_id) elif cmd_type == "ip": v_name = "%s.%s" % (iface, v_id) cmd = "ip link add link %s %s type vlan id %s " % (iface, v_name, v_id) else: err_msg = "Unexpected vlan operation command: %s, " % cmd_type err_msg += "only support 'ip' and 'vconfig' now" test.error(err_msg) session.cmd(cmd) def set_ip_vlan(session, v_id, vlan_ip, iface="eth0"): """ Set ip address of vlan interface """ iface = "%s.%s" % (iface, v_id) txt = "Assign IP '%s' to vlan interface '%s'" % (vlan_ip, iface) error_context.context(txt, logging.info) session.cmd("ifconfig %s %s" % (iface, vlan_ip)) def set_arp_ignore(session): """ Enable arp_ignore for all ipv4 device in guest """ error_context.context("Enable arp_ignore for all ipv4 device in guest", logging.info) ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore" session.cmd(ignore_cmd) def rem_vlan(test, session, v_id, iface="eth0", cmd_type="ip"): """ Removes the named vlan interface(iface+v_id) """ v_iface = '%s.%s' % (iface, v_id) if cmd_type == "vconfig": rem_vlan_cmd = "vconfig rem %s" % v_iface elif cmd_type == "ip": rem_vlan_cmd = "ip link delete %s" % v_iface else: err_msg = "Unexpected vlan operation command: %s, " % cmd_type err_msg += "only support 'ip' and 'vconfig' now" test.error(err_msg) error_context.context("Remove vlan interface '%s'." % v_iface, logging.info) return session.cmd_status(rem_vlan_cmd) def nc_transfer(test, src, dst): """ Transfer file by netcat """ nc_port = utils_misc.find_free_port(1025, 5334, vm_ip[dst]) listen_cmd = params.get("listen_cmd") send_cmd = params.get("send_cmd") # listen in dst listen_cmd = listen_cmd % (nc_port, "receive") sessions[dst].sendline(listen_cmd) time.sleep(2) # send file from src to dst send_cmd = send_cmd % (vlan_ip[dst], str(nc_port), "file") sessions[src].cmd(send_cmd, timeout=60) try: sessions[dst].read_up_to_prompt(timeout=60) except aexpect.ExpectError: # kill server session_ctl[dst].cmd_output_safe("killall -9 nc") test.fail("Fail to receive file" " from vm%s to vm%s" % (src + 1, dst + 1)) # check MD5 message digest of receive file in dst output = sessions[dst].cmd_output("md5sum receive").strip() digest_receive = re.findall(r'(\w+)', output)[0] if digest_receive == digest_origin[src]: logging.info("File succeed received in vm %s", vlan_ip[dst]) else: logging.info("Digest_origin is %s", digest_origin[src]) logging.info("Digest_receive is %s", digest_receive) test.fail("File transferred differ from origin") sessions[dst].cmd("rm -f receive") def flood_ping(src, dst): """ Flood ping test # we must use a dedicated session because the aexpect # does not have the other method to interrupt the process in # the guest rather than close the session. """ txt = "Flood ping from %s interface %s to %s" % (vms[src].name, ifname[src], vlan_ip[dst]) error_context.context(txt, logging.info) session_flood = vms[src].wait_for_login(timeout=60) utils_test.ping(vlan_ip[dst], flood=True, interface=ifname[src], session=session_flood, timeout=10) session_flood.close() vms = [] sessions = [] session_ctl = [] ifname = [] vm_ip = [] digest_origin = [] vlan_ip = ['', ''] ip_unit = ['1', '2'] subnet = params.get("subnet", "192.168") vlan_num = int(params.get("vlan_num", 5)) maximal = int(params.get("maximal", 4094)) file_size = params.get("file_size", 4096) cmd_type = params.get("cmd_type", "ip") login_timeout = int(params.get("login_timeout", 360)) vms.append(env.get_vm(params["main_vm"])) vms.append(env.get_vm("vm2")) for vm_ in vms: vm_.verify_alive() for vm_index, vm in enumerate(vms): if params["os_type"] == "windows": session = vm.wait_for_serial_login(timeout=login_timeout) set_vlan_cmd = params.get("set_vlan_cmd") nicid = utils_net.get_windows_nic_attribute(session=session, key="netenabled", value=True, target="netconnectionID") set_vlan_cmd = set_vlan_cmd % nicid session.cmd(set_vlan_cmd, timeout=240) time.sleep(10) ifname.append(nicid) dev_mac = vm.virtnet[0].mac vm_ip.append(utils_net.get_guest_ip_addr(session, dev_mac, os_type="windows", linklocal=True)) logging.debug("IP address is %s in %s" % (vm_ip, vm.name)) session_ctl.append(session) continue error_context.base_context("Prepare test env on %s" % vm.name) session = vm.wait_for_login(timeout=login_timeout) if not session: err_msg = "Could not log into guest %s" % vm.name test.error(err_msg) sessions.append(session) logging.info("Logged in %s successful" % vm.name) session_ctl.append(vm.wait_for_login(timeout=login_timeout)) ifname.append(utils_net.get_linux_ifname(session, vm.get_mac_address())) # get guest ip vm_ip.append(vm.get_address()) logging.debug("IP address is %s in %s" % (vm_ip, vm.name)) # produce sized file in vm dd_cmd = "dd if=/dev/urandom of=file bs=1M count=%s" session.cmd(dd_cmd % file_size) # record MD5 message digest of file md5sum_output = session.cmd("md5sum file", timeout=60) digest_origin.append(re.findall(r'(\w+)', md5sum_output)[0]) # stop firewall in vm stop_firewall_cmd = "systemctl stop firewalld||service firewalld stop" session.cmd_output_safe(stop_firewall_cmd) error_context.context("Load 8021q module in guest %s" % vm.name, logging.info) session.cmd_output_safe("modprobe 8021q") error_context.context("Setup vlan environment in guest %s" % vm.name, logging.info) for vlan_i in range(1, vlan_num + 1): add_vlan(test, session, vlan_i, ifname[vm_index], cmd_type) v_ip = "%s.%s.%s" % (subnet, vlan_i, ip_unit[vm_index]) set_ip_vlan(session, vlan_i, v_ip, ifname[vm_index]) set_arp_ignore(session) if params["os_type"] == "windows": for vm_index, vm in enumerate(vms): status, output = utils_test.ping(dest=vm_ip[(vm_index + 1) % 2], count=10, session=session_ctl[vm_index], timeout=30) loss = utils_test.get_loss_ratio(output) if not loss and ("TTL=" in output): pass # window get loss=0 when ping fail sometimes, need further check else: test.fail("Guests ping test hit unexpected loss, error info: %s" % output) for sess in session_ctl: if sess: sess.close() return try: for vlan in range(1, vlan_num + 1): error_context.base_context("Test for vlan %s" % vlan, logging.info) error_context.context("Ping test between vlans", logging.info) interface = ifname[0] + '.' + str(vlan) for vm_index, vm in enumerate(vms): for vlan2 in range(1, vlan_num + 1): interface = ifname[vm_index] + '.' + str(vlan) dest = ".".join((subnet, str(vlan2), ip_unit[(vm_index + 1) % 2])) status, output = utils_test.ping(dest, count=2, interface=interface, session=sessions[vm_index], timeout=30) if ((vlan == vlan2) ^ (status == 0)): err_msg = "%s ping %s unexpected, " % (interface, dest) err_msg += "error info: %s" % output test.fail(err_msg) error_context.context("Flood ping between vlans", logging.info) vlan_ip[0] = ".".join((subnet, str(vlan), ip_unit[0])) vlan_ip[1] = ".".join((subnet, str(vlan), ip_unit[1])) flood_ping(0, 1) flood_ping(1, 0) error_context.context("Transferring data between vlans by nc", logging.info) nc_transfer(test, 0, 1) nc_transfer(test, 1, 0) finally: # If client can not connect the nc server, need kill the server. for session in session_ctl: session.cmd_output_safe("killall -9 nc") error_context.base_context("Remove vlan") for vm_index, vm in enumerate(vms): for vlan in range(1, vlan_num + 1): status = rem_vlan(test, sessions[vm_index], vlan, ifname[vm_index], cmd_type) if status: logging.error("Remove vlan %s failed" % vlan) # Plumb/unplumb maximal number of vlan interfaces if params.get("do_maximal_test", "no") == "yes": bound = maximal + 1 try: error_context.base_context("Vlan scalability test") error_context.context("Testing the plumb of vlan interface", logging.info) for vlan_index in range(1, bound): add_vlan(test, sessions[0], vlan_index, ifname[0], cmd_type) vlan_added = vlan_index if vlan_added != maximal: test.fail("Maximal interface plumb test failed") finally: for vlan_index in range(1, vlan_added + 1): if rem_vlan(test, sessions[0], vlan_index, ifname[0], cmd_type): logging.error("Remove vlan %s failed" % vlan_index) error_context.base_context("Vlan negative test") error_context.context("Create vlan with ID %s in guest" % bound, logging.info) try: add_vlan(test, sessions[0], bound, ifname[0], cmd_type) test.fail("Maximal ID allow to vlan is %s" % maximal) except aexpect.ShellCmdError as detail: pattern = params["msg_pattern"] if not re.search(pattern, detail.output, re.M | re.I): raise sessions.extend(session_ctl) for sess in sessions: if sess: sess.close()
def run(test, params, env): """ Verify that next-entry will get unset after reboot. Step: 1) Check if any error info in output of SLOF during booting. 2) Ensure the guest has at least two kernel versions. 3) Set a boot next entry and check it. 4) Reboot guest, check the kernel version and value of next_entry. 5) Reboot guest again, continue to check the kernel version. :param test: Qemu test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def get_kernels_info(): """ Get detailed information about each kernel version in the guest. """ kernels_info = {} for kernel in kernel_list: grubby_info = session.cmd_output("grubby --info=%s" % kernel, print_func=logging.info) entry_dict = dict((item.replace('"', '').split("=", 1) for item in grubby_info.splitlines())) kernels_info[int(entry_dict.pop("index"))] = entry_dict return kernels_info def check_kernel_version(k_index): """ Check whether the kernel version matches the kernel index. """ current_kernel = session.cmd_output("uname -r").strip() if guest_kernels[k_index]["kernel"].split("-", 1)[1] != current_kernel: logging.debug("The current kernel version is: %s", current_kernel) test.fail("The current kernel version is different from expected") logging.info("The kernel version matches the kernel index") get_kernel_list_cmd = params["get_kernel_list_cmd"] vm = env.get_vm(params["main_vm"]) vm.verify_alive() error_context.base_context("Check the output of SLOF.", logging.info) content = slof.wait_for_loaded(vm, test)[0] slof.check_error(test, content) session = vm.wait_for_login() logging.info("Ensure the guest has at least two kernel versions") kernel_list = session.cmd_output(get_kernel_list_cmd).splitlines() if len(kernel_list) < 2: test.cancel("This test requires at least two kernel versions in the " "guest") if session.cmd_output("grubby --default-index").strip() != "0": logging.info("Ensure that the default kernel index of the guest is 0.") session.cmd("grubby --set-default-index=0") session = vm.reboot() guest_kernels = get_kernels_info() error_context.context( "Set a next boot entry other than the default one and" " check it", logging.info) next_entry = guest_kernels[1]["title"] session.cmd("grub2-reboot '%s'" % next_entry) grub_env = dict( (item.split("=", 1) for item in session.cmd_output("grub2-editenv list").splitlines())) grub_next_entry = grub_env["next_entry"] if grub_next_entry != next_entry: logging.debug("The 'next_entry' is: %s", grub_next_entry) test.fail("The next boot entry is not expected as we set") error_context.base_context( "Reboot guest, check the kernel version and " "'next_entry'", logging.info) session = vm.reboot(session) grub_env = dict( (item.split("=", 1) for item in session.cmd_output("grub2-editenv list").splitlines())) check_kernel_version(1) grub_next_entry = grub_env["next_entry"] if grub_next_entry: logging.debug("The 'next_entry' is: %s", grub_next_entry) test.fail("The 'next_entry' did not return to empty after reboot") error_context.context("Reboot guest again to check the kernel version") session = vm.reboot(session) check_kernel_version(0) session.close() vm.destroy(gracefully=True)
def run(test, params, env): """ KVM migration test: 1) Start a guest. 2) Start netperf server in guest. 3) Start multi netperf clients in host. 4) Migrate the guest in local during netperf clients working. 5) Repeatedly migrate VM and wait until netperf clients stopped. :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ login_timeout = int(params.get("login_timeout", 360)) mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 netperf_timeout = int(params.get("netperf_timeout", "300")) client_num = int(params.get("client_num", "100")) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) guest_address = vm.get_address() host_address = utils_net.get_host_ip_address(params) remote_ip = params.get("remote_host", host_address) netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), params.get("netperf_link")) netperf_server_link = params.get("netperf_server_link_win") if netperf_server_link: netperf_server_link = os.path.join(data_dir.get_deps_dir("netperf"), netperf_server_link) netperf_client_link = params.get("netperf_client_link_win", netperf_link) netperf_client_link = os.path.join(data_dir.get_deps_dir("netperf"), netperf_client_link) server_path = params.get("server_path", "/var/tmp/") client_path = params.get("client_path", "/var/tmp/") server_path_win = params.get("server_path_win") client_path_win = params.get("client_path_win") if params.get("os_type") == "linux": session.cmd("iptables -F", ignore_all_errors=True) g_client_link = netperf_link g_server_link = netperf_link g_server_path = server_path g_client_path = client_path elif params.get("os_type") == "windows": g_client_link = netperf_client_link g_server_link = netperf_server_link g_server_path = server_path_win g_client_path = client_path_win netperf_client_g = None netperf_client_h = None netperf_server_g = None netperf_server_h = None try: netperf_client_g = utils_netperf.NetperfClient( guest_address, g_client_path, netperf_source=g_client_link, client=params.get("shell_client"), port=params.get("shell_port"), prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), username=params.get("username"), password=params.get("password"), linesep=params.get("shell_linesep", "\n").encode().decode('unicode_escape'), status_test_command=params.get("status_test_command", ""), compile_option=params.get("compile_option_client_g", "")) netperf_server_h = utils_netperf.NetperfServer( remote_ip, server_path, netperf_source=netperf_link, password=params.get("hostpassword"), compile_option=params.get("compile_option", "")) netperf_client_h = utils_netperf.NetperfClient( remote_ip, client_path, netperf_source=netperf_link, password=params.get("hostpassword"), compile_option=params.get("compile_option", "")) netperf_server_g = utils_netperf.NetperfServer( guest_address, g_server_path, netperf_source=g_server_link, username=params.get("username"), password=params.get("password"), client=params.get("shell_client"), port=params.get("shell_port"), prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), linesep=params.get("shell_linesep", "\n").encode().decode('unicode_escape'), status_test_command=params.get("status_test_command", "echo $?"), compile_option=params.get("compile_option_server_g", "")) error_context.base_context("Run netperf test between host and guest") error_context.context("Start netserver in guest.", logging.info) netperf_server_g.start() if netperf_server_h: error_context.context("Start netserver in host.", logging.info) netperf_server_h.start() error_context.context("Start Netperf in host", logging.info) test_option = "-l %s" % netperf_timeout netperf_client_h.bg_start(guest_address, test_option, client_num) if netperf_client_g: error_context.context("Start Netperf in guest", logging.info) netperf_client_g.bg_start(host_address, test_option, client_num) m_count = 0 while netperf_client_h.is_netperf_running(): m_count += 1 error_context.context("Start migration iterations: %s " % m_count, logging.info) vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, env=env) finally: if netperf_server_g: if netperf_server_g.is_server_running(): netperf_server_g.stop() netperf_server_g.package.env_cleanup(True) if netperf_server_h: if netperf_server_h.is_server_running(): netperf_server_h.stop() netperf_server_h.package.env_cleanup(True) if netperf_client_h: if netperf_client_h.is_netperf_running(): netperf_client_h.stop() netperf_client_h.package.env_cleanup(True) if netperf_client_g: if netperf_client_g.is_netperf_running(): netperf_client_g.stop() netperf_client_g.package.env_cleanup(True) if session: session.close()
def run(test, params, env): """ Test nic driver load/unload. 1) Boot a VM. 2) Get the NIC driver name. 3) Multi-session TCP transfer on test interface. 4) Repeatedly unload/load NIC driver during file transfer. 5) Check whether the test interface should still work. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def reset_guest_udevrules(session, rules_file, rules_content): """ Write guest udev rules, then reboot the guest and return the new session """ set_cmd = "echo '%s' > %s" % (rules_content, rules_file) session.cmd_output_safe(set_cmd) return vm.reboot() def all_threads_done(threads): """ Check whether all threads have finished """ for thread in threads: if thread.isAlive(): return False else: continue return True def all_threads_alive(threads): """ Check whether all threads is alive """ for thread in threads: if not thread.isAlive(): return False else: continue return True timeout = int(params.get("login_timeout", 360)) transfer_timeout = int(params.get("transfer_timeout", 1000)) filesize = int(params.get("filesize", 512)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) vm_mac_address = vm.get_mac_address() udev_rules_file = "/etc/udev/rules.d/70-persistent-net.rules" rules = params.get("rules") if not session.cmd_status("[ -e %s ]" % udev_rules_file): if not rules: test.cancel("You must set udev rules before test") rules = rules % vm_mac_address session = reset_guest_udevrules(session, udev_rules_file, rules) error_context.base_context("Test env prepare") error_context.context("Get NIC interface name in guest.", logging.info) ethname = utils_net.get_linux_ifname(session, vm.get_mac_address(0)) # get ethernet driver from '/sys' directory. # ethtool can do the same thing and doesn't care about os type. # if we make sure all guests have ethtool, we can make a change here. sys_path = params.get("sys_path") % (ethname) # readlink in RHEL4.8 doesn't have '-e' param, should use '-f' in RHEL4.8. readlink_cmd = params.get("readlink_command", "readlink -e") driver = os.path.basename(session.cmd("%s %s" % (readlink_cmd, sys_path)).strip()) logging.info("The guest interface %s using driver %s" % (ethname, driver)) error_context.context("Host test file prepare, create %dMB file on host" % filesize, logging.info) tmp_dir = data_dir.get_tmp_dir() host_path = os.path.join(tmp_dir, "host_file_%s" % utils_misc.generate_random_string(8)) guest_path = os.path.join("/home", "guest_file_%s" % utils_misc.generate_random_string(8)) cmd = "dd if=/dev/zero of=%s bs=1M count=%d" % (host_path, filesize) process.run(cmd) file_checksum = crypto.hash_file(host_path, algorithm="md5") error_context.context("Guest test file prepare, Copy file %s from host to " "guest" % host_path, logging.info) vm.copy_files_to(host_path, guest_path, timeout=transfer_timeout) if session.cmd_status("md5sum %s | grep %s" % (guest_path, file_checksum)): test.cancel("File MD5SUMs changed after copy to guest") logging.info("Test env prepare successfully") error_context.base_context("Nic driver load/unload testing", logging.info) session_serial = vm.wait_for_serial_login(timeout=timeout) try: error_context.context("Transfer file between host and guest", logging.info) threads = [] file_paths = [] host_file_paths = [] for sess_index in range(int(params.get("sessions_num", "10"))): sess_path = os.path.join("/home", "dst-%s" % sess_index) host_sess_path = os.path.join(tmp_dir, "dst-%s" % sess_index) thread1 = utils_misc.InterruptedThread( vm.copy_files_to, (host_path, sess_path), {"timeout": transfer_timeout}) thread2 = utils_misc.InterruptedThread( vm.copy_files_from, (guest_path, host_sess_path), {"timeout": transfer_timeout}) thread1.start() threads.append(thread1) thread2.start() threads.append(thread2) file_paths.append(sess_path) host_file_paths.append(host_sess_path) utils_misc.wait_for(lambda: all_threads_alive(threads), 60, 10, 1) time.sleep(5) error_context.context("Repeatedly unload/load NIC driver during file " "transfer", logging.info) while not all_threads_done(threads): error_context.context("Shutdown the driver for NIC interface.", logging.info) session_serial.cmd_output_safe("ifconfig %s down" % ethname) error_context.context("Unload NIC driver.", logging.info) session_serial.cmd_output_safe("modprobe -r %s" % driver) error_context.context("Load NIC driver.", logging.info) session_serial.cmd_output_safe("modprobe %s" % driver) error_context.context("Activate NIC driver.", logging.info) session_serial.cmd_output_safe("ifconfig %s up" % ethname) session_serial.cmd_output_safe("sleep %s" % random.randint(10, 60)) # files md5sums check error_context.context("File transfer finished, checking files md5sums", logging.info) err_info = [] for copied_file in file_paths: if session_serial.cmd_status("md5sum %s | grep %s" % (copied_file, file_checksum)): err_msg = "Guest file %s md5sum changed" err_info.append(err_msg % copied_file) for copied_file in host_file_paths: if process.system("md5sum %s | grep %s" % (copied_file, file_checksum), shell=True): err_msg = "Host file %s md5sum changed" err_info.append(err_msg % copied_file) if err_info: test.error("files MD5SUMs changed after copying %s" % err_info) except Exception: for thread in threads: thread.join(suppress_exception=True) raise else: for thread in threads: thread.join() for copied_file in file_paths: session_serial.cmd("rm -rf %s" % copied_file) for copied_file in host_file_paths: process.system("rm -rf %s" % copied_file) session_serial.cmd("%s %s" % ("rm -rf", guest_path)) os.remove(host_path) session.close() session_serial.close()
def run(test, params, env): """ Check smbios table : 1) Run the qemu command as nobody 2) check the process is same as the user's :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def get_user_ugid(username): """ return user uid and gid as a list """ user_uid = process.getoutput("id -u %s" % username).split() user_gid = process.getoutput("id -g %s" % username).split() return (user_uid, user_gid) def get_ugid_from_processid(pid): """ return a list[uid,euid,suid,fsuid,gid,egid,sgid,fsgid] of pid """ grep_ugid_cmd = "cat /proc/%s/status | grep -iE '^(U|G)id'" o = process.getoutput(grep_ugid_cmd % pid, shell=True) ugid = re.findall(r"(\d+)", o) # real UID, effective UID, saved set UID, and file system UID if ugid: return ugid else: test.error("Could not find the correct UID for process %s" % pid) exec_username = params.get("user_runas", "nobody") error_context.base_context("Run QEMU %s test:" % exec_username) error_context.context("Get the user uid and gid,using 'id -u/g username'") (exec_uid, exec_gid) = get_user_ugid(exec_username) error_context.context("Run the qemu as user '%s'" % exec_username) logging.info("The user %s :uid='%s', gid='%s'" % (exec_username, exec_uid, exec_gid)) params["extra_params"] = " -runas %s" % exec_username params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) failures = [] for pid in process.get_children_pids(vm.get_shell_pid()): error_context.context( "Get the process '%s' u/gid, using 'cat " "/proc/%s/status'" % (pid, pid), logging.info) qemu_ugid = get_ugid_from_processid(pid) logging.info("Process run as uid=%s,euid=%s,suid=%s,fsuid=%s" % tuple(qemu_ugid[0:4])) logging.info("Process run as gid=%s,egid=%s,sgid=%s,fsgid=%s" % tuple(qemu_ugid[4:])) error_context.context("Check if the user %s ugid is equal to the " "process %s" % (exec_username, pid)) # generate user uid, euid, suid, fsuid, gid, egid, sgid, fsgid user_ugid_extend = exec_uid * 4 + exec_gid * 4 if cmp(user_ugid_extend, qemu_ugid) != 0: e_msg = ("Process %s error, expect ugid is %s, real is %s" % (pid, user_ugid_extend, qemu_ugid)) failures.append(e_msg) if failures: test.fail("FAIL: Test reported %s failures:\n%s" % (len(failures), "\n".join(failures)))
def run(test, params, env): """ Suspend a running Virtual Machine and verify its state. 1) Boot the vm 2) Do preparation operation (Optional) 3) Start a background process (Optional) 4) Stop the VM 5) Verify the status of VM is 'paused' 6) Verify the session has no response 7) Resume the VM 8) Verify the status of VM is 'running' 9) Re-login the guest 10) Do check operation (Optional) 11) Do clean operation (Optional) :param test: Kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=login_timeout) session_bg = None start_bg_process = params.get("start_bg_process") try: prepare_op = params.get("prepare_op") if prepare_op: error_context.context("Do preparation operation: '%s'" % prepare_op, logging.info) op_timeout = float(params.get("prepare_op_timeout", 60)) session.cmd(prepare_op, timeout=op_timeout) if start_bg_process: bg_cmd = params.get("bg_cmd") error_context.context("Start a background process: '%s'" % bg_cmd, logging.info) session_bg = vm.wait_for_login(timeout=login_timeout) bg_cmd_timeout = float(params.get("bg_cmd_timeout", 240)) args = (bg_cmd, bg_cmd_timeout) bg = utils_test.BackgroundTest(session_bg.cmd, args) bg.start() error_context.base_context("Stop the VM", logging.info) vm.pause() error_context.context("Verify the status of VM is 'paused'", logging.info) vm.verify_status("paused") error_context.context("Verify the session has no response", logging.info) if session.is_responsive(): msg = "Session is still responsive after stop" logging.error(msg) test.fail(msg) session.close() time.sleep(float(params.get("pause_time", 0))) error_context.base_context("Resume the VM", logging.info) vm.resume() error_context.context("Verify the status of VM is 'running'", logging.info) vm.verify_status("running") error_context.context("Re-login the guest", logging.info) session = vm.wait_for_login(timeout=login_timeout) if start_bg_process: if bg: bg.join() check_op = params.get("check_op") if check_op: error_context.context("Do check operation: '%s'" % check_op, logging.info) op_timeout = float(params.get("check_op_timeout", 60)) s, o = session.cmd_status_output(check_op, timeout=op_timeout) if s != 0: test.fail("Something wrong after stop continue, " "check command report: %s" % o) finally: try: clean_op = params.get("clean_op") if clean_op: error_context.context( "Do clean operation: '%s'" % clean_op, logging.info) # session close if exception raised, so get renew a session # to do cleanup step. session = vm.wait_for_login(timeout=login_timeout) op_timeout = float(params.get("clean_op_timeout", 60)) session.cmd(clean_op, timeout=op_timeout, ignore_all_errors=True) session.close() if session_bg: session_bg.close() except Exception as details: logging.warn( "Exception occur when clean test environment: %s" % details)
def run(test, params, env): """ Check smbios table : 1) Run the qemu command as nobody 2) check the process is same as the user's :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def get_user_ugid(username): """ return user uid and gid as a list """ user_uid = process.system_output("id -u %s" % username).split() user_gid = process.system_output("id -g %s" % username).split() return(user_uid, user_gid) def get_ugid_from_processid(pid): """ return a list[uid,euid,suid,fsuid,gid,egid,sgid,fsgid] of pid """ grep_ugid_cmd = "cat /proc/%s/status | grep -iE '^(U|G)id'" o = process.system_output(grep_ugid_cmd % pid.strip(), shell=True) ugid = re.findall(r"(\d+)", o) # real UID, effective UID, saved set UID, and file system UID if ugid: return ugid else: test.error("Could not find the correct UID for process %s" % pid) exec_username = params.get("user_runas", "nobody") error_context.base_context("Run QEMU %s test:" % exec_username) error_context.context("Get the user uid and gid,using 'id -u/g username'") (exec_uid, exec_gid) = get_user_ugid(exec_username) error_context.context("Run the qemu as user '%s'" % exec_username) logging.info("The user %s :uid='%s', gid='%s'" % (exec_username, exec_uid, exec_gid)) params["extra_params"] = " -runas %s" % exec_username params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) failures = [] for pid in process.get_children_pids(vm.get_shell_pid()): error_context.context("Get the process '%s' u/gid, using 'cat " "/proc/%s/status'" % (pid, pid), logging.info) qemu_ugid = get_ugid_from_processid(pid) logging.info("Process run as uid=%s,euid=%s,suid=%s,fsuid=%s" % tuple(qemu_ugid[0:4])) logging.info("Process run as gid=%s,egid=%s,sgid=%s,fsgid=%s" % tuple(qemu_ugid[4:])) error_context.context("Check if the user %s ugid is equal to the " "process %s" % (exec_username, pid)) # generate user uid, euid, suid, fsuid, gid, egid, sgid, fsgid user_ugid_extend = exec_uid * 4 + exec_gid * 4 if cmp(user_ugid_extend, qemu_ugid) != 0: e_msg = ("Process %s error, expect ugid is %s, real is %s" % (pid, user_ugid_extend, qemu_ugid)) failures.append(e_msg) if failures: test.fail("FAIL: Test reported %s failures:\n%s" % (len(failures), "\n".join(failures)))
def run(test, params, env): """ Ansible playbook basic test: 1) Check ansible package exists 2) Launch the guest 3) Clone an ansible playbook repo 4) Generate the ansible-playbook command 5) Execute the playbook and verify the return status :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ guest_user = params["username"] guest_passwd = params["password"] step_time = params.get_numeric("step_time", 60) ansible_callback_plugin = params.get("ansible_callback_plugin") ansible_addl_opts = params.get("ansible_addl_opts", "") ansible_ssh_extra_args = params["ansible_ssh_extra_args"] ansible_extra_vars = params.get("ansible_extra_vars", "{}") playbook_repo = params["playbook_repo"] playbook_timeout = params.get_numeric("playbook_timeout") playbook_dir = params.get("playbook_dir", os.path.join(test.workdir, "ansible_playbook")) toplevel_playbook = os.path.join(playbook_dir, params["toplevel_playbook"]) # Use this directory to copy some logs back from the guest test_harness_log_dir = test.logdir vms = env.get_all_vms() guest_ip_list = [] for vm in vms: vm.verify_alive() vm.wait_for_login() guest_ip_list.append(vm.get_address()) logging.info("Cloning %s", playbook_repo) process.run("git clone {src} {dst}".format(src=playbook_repo, dst=playbook_dir), verbose=False) error_context.base_context("Generate playbook related options.", logging.info) extra_vars = { "ansible_ssh_extra_args": ansible_ssh_extra_args, "ansible_ssh_pass": guest_passwd, "test_harness_log_dir": test_harness_log_dir } extra_vars.update(json.loads(ansible_extra_vars)) error_context.context("Execute the ansible playbook.", logging.info) playbook_executor = ansible.PlaybookExecutor( inventory="{},".format(",".join(guest_ip_list)), site_yml=toplevel_playbook, remote_user=guest_user, extra_vars=json.dumps(extra_vars), callback_plugin=ansible_callback_plugin, addl_opts=ansible_addl_opts) ansible_log = "ansible_playbook.log" try: playbook_executor.wait_for_completed(playbook_timeout, step_time) except ansible.ExecutorTimeoutError as err: test.error(str(err)) else: if playbook_executor.get_status() != 0: test.fail("Ansible playbook execution failed, please check the {} " "for details.".format(ansible_log)) logging.info("Ansible playbook execution passed.") finally: playbook_executor.store_playbook_log(test_harness_log_dir, ansible_log) playbook_executor.close()
def run(test, params, env): """ Ansible playbook basic test: 1) Check ansible package exists 2) Launch the guest 3) Clone an ansible playbook repo 4) Generate the ansible-playbook command 5) Execute the playbook and verify the return status :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ sm = software_manager.SoftwareManager() if not (sm.check_installed("ansible") or sm.install("ansible")): test.cancel("ansible package install failed") guest_user = params["username"] guest_passwd = params["password"] ansible_callback_plugin = params.get("ansible_callback_plugin") ansible_addl_opts = params.get("ansible_addl_opts", "") ansible_ssh_extra_args = params["ansible_ssh_extra_args"] ansible_extra_vars = params.get("ansible_extra_vars", "{}") playbook_repo = params["playbook_repo"] playbook_timeout = params.get_numeric("playbook_timeout") playbook_dir = params.get("playbook_dir", os.path.join(test.workdir, "ansible_playbook")) toplevel_playbook = os.path.join(playbook_dir, params["toplevel_playbook"]) # Use this directory to copy some logs back from the guest test_harness_log_dir = test.logdir guest_ip_list = [] for vm in env.get_all_vms(): vm.verify_alive() vm.wait_for_login() guest_ip_list.append(vm.get_address()) logging.info("Cloning %s", playbook_repo) process.run("git clone {src} {dst}".format(src=playbook_repo, dst=playbook_dir), verbose=False) error_context.base_context("Generate playbook related options.", logging.info) extra_vars = { "ansible_ssh_extra_args": ansible_ssh_extra_args, "ansible_ssh_pass": guest_passwd, "test_harness_log_dir": test_harness_log_dir } extra_vars.update(json.loads(ansible_extra_vars)) ansible_cmd_options = [ "ansible-playbook", "-u {}".format(guest_user), "-i {},".format(",".join(guest_ip_list)), "-e '{}'".format(json.dumps(extra_vars)), ansible_addl_opts, toplevel_playbook ] ansible_cmd = r" ".join(ansible_cmd_options) error_context.context("Execute the ansible playbook.", logging.info) env_vars = ({ "ANSIBLE_STDOUT_CALLBACK": ansible_callback_plugin } if ansible_callback_plugin else None) logging.info("Command of ansible playbook: '%s'", ansible_cmd) play_s, play_o = process.getstatusoutput(ansible_cmd, timeout=playbook_timeout, shell=False, env=env_vars) ansible_log = "ansible_playbook.log" with open(os.path.join(test_harness_log_dir, ansible_log), "w") as log_file: log_file.write(play_o) log_file.flush() if play_s != 0: test.fail("Ansible playbook execution failed, please check the {} " "for details.".format(ansible_log)) logging.info("Ansible playbook execution passed.")
def run(test, params, env): """ Qemu allocate hugepage from specify node. Steps: 1) Setup total of 4G mem hugepages for specify node. 2) Setup total of 1G mem hugepages for idle node. 3) Mount this hugepage to /mnt/kvm_hugepage. 4) Boot guest only allocate hugepage from specify node. 5) Check the hugepage used from every node. :params test: QEMU test object. :params params: Dictionary with the test parameters. :params env: Dictionary with test environment. """ memory.drop_caches() hugepage_size = memory.get_huge_page_size() mem_size = int(normalize_data_size("%sM" % params["mem"], "K")) idle_node_mem = int(normalize_data_size("%sM" % params["idle_node_mem"], "K")) error_context.context("Get host numa topological structure.", logging.info) host_numa_node = utils_misc.NumaInfo() node_list = host_numa_node.get_online_nodes_withmem() idle_node_list = node_list.copy() node_meminfo = host_numa_node.get_all_node_meminfo() for node_id in node_list: error_context.base_context("Check preprocess HugePages Free on host " "numa node %s." % node_id, logging.info) node_memfree = int(node_meminfo[node_id]["MemFree"]) if node_memfree < idle_node_mem: idle_node_list.remove(node_id) if node_memfree < mem_size: node_list.remove(node_id) if len(idle_node_list) < 2 or not node_list: test.cancel("Host node does not have enough nodes to run the test, " "skipping test...") for node_id in node_list: error_context.base_context("Specify qemu process only allocate " "HugePages from node%s." % node_id, logging.info) params["target_nodes"] = "%s" % node_id params["target_num_node%s" % node_id] = math.ceil(mem_size / hugepage_size) error_context.context("Setup huge pages for specify node%s." % node_id, logging.info) check_list = [_ for _ in idle_node_list if _ != node_id] for idle_node in check_list: params["target_nodes"] += " %s" % idle_node params["target_num_node%s" % idle_node] = math.ceil(idle_node_mem / hugepage_size) error_context.context("Setup huge pages for idle node%s." % idle_node, logging.info) params["setup_hugepages"] = "yes" hp_config = test_setup.HugePageConfig(params) hp_config.setup() params["qemu_command_prefix"] = "numactl --membind=%s" % node_id params["start_vm"] = "yes" params["hugepage_path"] = hp_config.hugepage_path env_process.preprocess_vm(test, params, env, params["main_vm"]) try: vm = env.get_vm(params["main_vm"]) vm.verify_alive() vm.wait_for_login() meminfo = host_numa_node.get_all_node_meminfo() for index in check_list: error_context.base_context("Check process HugePages Free on host " "numa node %s." % index, logging.info) hugepages_free = int(meminfo[index]["HugePages_Free"]) if int(node_meminfo[index]["HugePages_Free"]) > hugepages_free: test.fail("Qemu still use HugePages from other node." "Expect: node%s, used: node%s." % (node_id, index)) finally: vm.destroy() hp_config.cleanup()
def run(test, params, env): """ Ansible playbook basic test: 1) Check ansible package exists 2) Launch the guest 3) Clone an ansible playbook repo 4) Generate the ansible-playbook command 5) Execute the playbook and verify the return status :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ guest_user = params["username"] guest_passwd = params["password"] step_time = params.get_numeric("step_time", 60) ansible_callback_plugin = params.get("ansible_callback_plugin") ansible_addl_opts = params.get("ansible_addl_opts", "") ansible_ssh_extra_args = params["ansible_ssh_extra_args"] ansible_extra_vars = params.get("ansible_extra_vars", "{}") playbook_repo = params["playbook_repo"] playbook_timeout = params.get_numeric("playbook_timeout") playbook_dir = params.get("playbook_dir", os.path.join(test.workdir, "ansible_playbook")) toplevel_playbook = os.path.join(playbook_dir, params["toplevel_playbook"]) # Use this directory to copy some logs back from the guest test_harness_log_dir = test.logdir # Responsive migration specific parameters mq_listen_port = params.get_numeric("mq_listen_port", find_free_port()) wait_response_timeout = params.get_numeric("wait_response_timeout", 600) vms = env.get_all_vms() guest_ip_list = [] for vm in vms: vm.verify_alive() vm.wait_for_login() guest_ip_list.append(vm.get_address()) logging.info("Cloning %s", playbook_repo) process.run("git clone {src} {dst}".format(src=playbook_repo, dst=playbook_dir), verbose=False) error_context.base_context("Generate playbook related options.", logging.info) extra_vars = {"ansible_ssh_extra_args": ansible_ssh_extra_args, "ansible_ssh_pass": guest_passwd, "mq_port": mq_listen_port, "test_harness_log_dir": test_harness_log_dir} extra_vars.update(json.loads(ansible_extra_vars)) error_context.context("Execute the ansible playbook.", logging.info) playbook_executor = ansible.PlaybookExecutor( inventory="{},".format(",".join(guest_ip_list)), site_yml=toplevel_playbook, remote_user=guest_user, extra_vars=json.dumps(extra_vars), callback_plugin=ansible_callback_plugin, addl_opts=ansible_addl_opts ) mq_publisher = message_queuing.MQPublisher(mq_listen_port) try: error_context.base_context('Confirm remote subscriber has accessed to ' 'activate migrating guests.', logging.info) try: mq_publisher.confirm_access(wait_response_timeout) except message_queuing.MessageNotFoundError as err: logging.error(err) test.fail("Failed to capture the 'ACCESS' message.") logging.info("Already captured the 'ACCESS' message.") error_context.context("Migrate guests after subscriber accessed.", logging.info) for vm in vms: vm.migrate() except VMMigrateFailedError: error_context.context("Send the 'ALERT' message to notify the remote " "subscriber to stop the test.", logging.info) mq_publisher.alert() raise else: error_context.context("Send the 'APPROVE' message to notify the remote " "subscriber to continue the test.", logging.info) mq_publisher.approve() finally: ansible_log = "ansible_playbook.log" try: playbook_executor.wait_for_completed(playbook_timeout, step_time) except ansible.ExecutorTimeoutError as err: test.error(str(err)) else: if playbook_executor.get_status() != 0: test.fail("Ansible playbook execution failed, please check the " "{} for details.".format(ansible_log)) logging.info("Ansible playbook execution passed.") finally: playbook_executor.store_playbook_log(test_harness_log_dir, ansible_log) playbook_executor.close() mq_publisher.close()
def run(test, params, env): """ Test nic driver load/unload. 1) Boot a VM. 2) Get the NIC driver name. 3) Multi-session TCP transfer on test interface. 4) Repeatedly unload/load NIC driver during file transfer. 5) Check whether the test interface should still work. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def reset_guest_udevrules(session, rules_file, rules_content): """ Write guest udev rules, then reboot the guest and return the new session """ set_cmd = "echo '%s' > %s" % (rules_content, rules_file) session.cmd_output_safe(set_cmd) return vm.reboot() def all_threads_done(threads): """ Check whether all threads have finished """ for thread in threads: if thread.is_alive(): return False else: continue return True def all_threads_alive(threads): """ Check whether all threads is alive """ for thread in threads: if not thread.is_alive(): return False else: continue return True timeout = int(params.get("login_timeout", 360)) transfer_timeout = int(params.get("transfer_timeout", 1000)) filesize = int(params.get("filesize", 512)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) vm_mac_address = vm.get_mac_address() udev_rules_file = "/etc/udev/rules.d/70-persistent-net.rules" rules = params.get("rules") if not session.cmd_status("[ -e %s ]" % udev_rules_file): if not rules: test.cancel("You must set udev rules before test") rules = rules % vm_mac_address session = reset_guest_udevrules(session, udev_rules_file, rules) error_context.base_context("Test env prepare") error_context.context("Get NIC interface name in guest.", logging.info) ethname = utils_net.get_linux_ifname(session, vm.get_mac_address(0)) # get ethernet driver from '/sys' directory. # ethtool can do the same thing and doesn't care about os type. # if we make sure all guests have ethtool, we can make a change here. sys_path = params.get("sys_path") % (ethname) # readlink in RHEL4.8 doesn't have '-e' param, should use '-f' in RHEL4.8. readlink_cmd = params.get("readlink_command", "readlink -e") driver = os.path.basename( session.cmd("%s %s" % (readlink_cmd, sys_path)).strip()) logging.info("The guest interface %s using driver %s" % (ethname, driver)) error_context.context( "Host test file prepare, create %dMB file on host" % filesize, logging.info) tmp_dir = data_dir.get_tmp_dir() host_path = os.path.join( tmp_dir, "host_file_%s" % utils_misc.generate_random_string(8)) guest_path = os.path.join( "/home", "guest_file_%s" % utils_misc.generate_random_string(8)) cmd = "dd if=/dev/zero of=%s bs=1M count=%d" % (host_path, filesize) process.run(cmd) file_checksum = crypto.hash_file(host_path, algorithm="md5") error_context.context( "Guest test file prepare, Copy file %s from host to " "guest" % host_path, logging.info) vm.copy_files_to(host_path, guest_path, timeout=transfer_timeout) if session.cmd_status("md5sum %s | grep %s" % (guest_path, file_checksum)): test.cancel("File MD5SUMs changed after copy to guest") logging.info("Test env prepare successfully") error_context.base_context("Nic driver load/unload testing", logging.info) session_serial = vm.wait_for_serial_login(timeout=timeout) try: error_context.context("Transfer file between host and guest", logging.info) threads = [] file_paths = [] host_file_paths = [] for sess_index in range(int(params.get("sessions_num", "10"))): sess_path = os.path.join("/home", "dst-%s" % sess_index) host_sess_path = os.path.join(tmp_dir, "dst-%s" % sess_index) thread1 = utils_misc.InterruptedThread( vm.copy_files_to, (host_path, sess_path), {"timeout": transfer_timeout}) thread2 = utils_misc.InterruptedThread( vm.copy_files_from, (guest_path, host_sess_path), {"timeout": transfer_timeout}) thread1.start() threads.append(thread1) thread2.start() threads.append(thread2) file_paths.append(sess_path) host_file_paths.append(host_sess_path) utils_misc.wait_for(lambda: all_threads_alive(threads), 60, 10, 1) time.sleep(5) error_context.context( "Repeatedly unload/load NIC driver during file " "transfer", logging.info) while not all_threads_done(threads): error_context.context("Shutdown the driver for NIC interface.", logging.info) session_serial.cmd_output_safe("ifconfig %s down" % ethname) error_context.context("Unload NIC driver.", logging.info) session_serial.cmd_output_safe("modprobe -r %s" % driver) error_context.context("Load NIC driver.", logging.info) session_serial.cmd_output_safe("modprobe %s" % driver) error_context.context("Activate NIC driver.", logging.info) session_serial.cmd_output_safe("ifconfig %s up" % ethname) session_serial.cmd_output_safe("sleep %s" % random.randint(10, 60)) # files md5sums check error_context.context("File transfer finished, checking files md5sums", logging.info) err_info = [] for copied_file in file_paths: if session_serial.cmd_status("md5sum %s | grep %s" % (copied_file, file_checksum)): err_msg = "Guest file %s md5sum changed" err_info.append(err_msg % copied_file) for copied_file in host_file_paths: if process.system("md5sum %s | grep %s" % (copied_file, file_checksum), shell=True): err_msg = "Host file %s md5sum changed" err_info.append(err_msg % copied_file) if err_info: test.error("files MD5SUMs changed after copying %s" % err_info) except Exception: for thread in threads: thread.join(suppress_exception=True) raise else: for thread in threads: thread.join() for copied_file in file_paths: session_serial.cmd("rm -rf %s" % copied_file) for copied_file in host_file_paths: process.system("rm -rf %s" % copied_file) session_serial.cmd("%s %s" % ("rm -rf", guest_path)) os.remove(host_path) session.close() session_serial.close()
def run(test, params, env): """ KVM shutdown test: 1) Log into a guest 2) Send a shutdown command to the guest, or issue a system_powerdown monitor command (depending on the value of shutdown_method) 3) Wait until the guest is down :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ timeout = int(params.get("login_timeout", 360)) shutdown_count = int(params.get("shutdown_count", 1)) shutdown_method = params.get("shutdown_method", "shell") sleep_time = float(params.get("sleep_before_powerdown", 10)) shutdown_command = params.get("shutdown_command") check_from_monitor = params.get("check_from_monitor", "no") == "yes" for i in range(shutdown_count): vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error_context.base_context("shutting down the VM %s/%s" % (i + 1, shutdown_count), logging.info) if params.get("setup_runlevel") == "yes": error_context.context("Setup the runlevel for guest", logging.info) utils_test.qemu.setup_runlevel(params, session) if shutdown_method == "shell": # Send a shutdown command to the guest's shell session.sendline(shutdown_command) error_context.context("waiting VM to go down (shutdown shell cmd)", logging.info) elif shutdown_method == "system_powerdown": # Sleep for a while -- give the guest a chance to finish booting time.sleep(sleep_time) # Send a system_powerdown monitor command vm.monitor.system_powerdown() error_context.context("waiting VM to go down " "(system_powerdown monitor cmd)", logging.info) if not vm.wait_for_shutdown(360): test.fail("Guest refuses to go down") if check_from_monitor and params.get("disable_shutdown") == "yes": check_failed = False vm_status = vm.monitor.get_status() if vm.monitor.protocol == "qmp": if vm_status['status'] != "shutdown": check_failed = True else: if not re.findall(r"paused\s+\(shutdown\)", vm_status): check_failed = True if check_failed: test.fail("Status check from monitor is: %s" % str(vm_status)) if params.get("disable_shutdown") == "yes": # Quit the qemu process vm.destroy(gracefully=False) if i < shutdown_count - 1: session.close() env_process.preprocess_vm(test, params, env, params["main_vm"])
def run(test, params, env): """ KVM shutdown test: 1) Log into a guest 2) Send a shutdown command to the guest, or issue a system_powerdown monitor command (depending on the value of shutdown_method) 3) Wait until the guest is down :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ timeout = int(params.get("login_timeout", 360)) shutdown_count = int(params.get("shutdown_count", 1)) shutdown_method = params.get("shutdown_method", "shell") sleep_time = float(params.get("sleep_before_powerdown", 10)) shutdown_command = params.get("shutdown_command") check_from_monitor = params.get("check_from_monitor", "no") == "yes" for i in range(shutdown_count): vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error_context.base_context( "shutting down the VM %s/%s" % (i + 1, shutdown_count), logging.info) if params.get("setup_runlevel") == "yes": error_context.context("Setup the runlevel for guest", logging.info) utils_test.qemu.setup_runlevel(params, session) if shutdown_method == "shell": # Send a shutdown command to the guest's shell session.sendline(shutdown_command) error_context.context("waiting VM to go down (shutdown shell cmd)", logging.info) elif shutdown_method == "system_powerdown": # Sleep for a while -- give the guest a chance to finish booting time.sleep(sleep_time) # Send a system_powerdown monitor command vm.monitor.system_powerdown() error_context.context( "waiting VM to go down " "(system_powerdown monitor cmd)", logging.info) if not vm.wait_for_shutdown(360): test.fail("Guest refuses to go down") if check_from_monitor and params.get("disable_shutdown") == "yes": check_failed = False vm_status = vm.monitor.get_status() if vm.monitor.protocol == "qmp": if vm_status['status'] != "shutdown": check_failed = True else: if not re.findall(r"paused\s+\(shutdown\)", vm_status): check_failed = True if check_failed: test.fail("Status check from monitor is: %s" % str(vm_status)) if params.get("disable_shutdown") == "yes": # Quit the qemu process vm.destroy(gracefully=False) if i < shutdown_count - 1: session.close() env_process.preprocess_vm(test, params, env, params["main_vm"])
def run(test, params, env): """ QEMU flow caches stress test case, only for linux 1) Make sure nf_conntrack is disabled in host and guest. If nf_conntrack is enabled in host, skip this case. 2) Boot guest with vhost=on/off. 3) Enable multi queues support in guest (optional). 4) After installation of netperf, run netserver in host. 5) Run netperf TCP_CRR protocal test in guest. 6) Transfer file between guest and host. 7) Check the md5 of copied file. This is a sample QEMU test, so people can get used to some of the test APIs. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def get_if_queues(ifname): """ Query interface queues with 'ethtool -l' :param ifname: interface name """ cmd = "ethtool -l %s" % ifname out = session.cmd_output(cmd) logging.info(out) nf_conntrack_max_set_cmd = params.get("nf_conntrack_max_set") logging.info("nf_conntrack_max_set_cmd is %s", nf_conntrack_max_set_cmd) msg = "Make sure nf_conntrack is disabled in host and guest." error_context.context(msg, logging.info) if str.encode("nf_conntrack") in process.system_output("lsmod"): process.system_output(nf_conntrack_max_set_cmd) params["start_vm"] = "yes" error_context.context("Boot up guest", logging.info) env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) if "nf_conntrack" in session.cmd_output("lsmod"): msg = "Unload nf_conntrack module in guest." error_context.context(msg, logging.info) black_str = "#disable nf_conntrack\\nblacklist nf_conntrack\\n" \ "blacklist nf_conntrack_ipv6\\nblacklist xt_conntrack\\n" \ "blacklist nf_conntrack_ftp\\nblacklist xt_state\\n" \ "blacklist iptable_nat\\nblacklist ipt_REDIRECT\\n" \ "blacklist nf_nat\\nblacklist nf_conntrack_ipv4" cmd = "echo -e '%s' >> /etc/modprobe.d/blacklist.conf" % black_str session.cmd(cmd) session = vm.reboot(session, timeout=timeout) if "nf_conntrack" in session.cmd_output("lsmod"): err = "nf_conntrack module still running in guest, " err += "set nf_conntrack_max instead." error_context.context(err, logging.info) session.cmd(nf_conntrack_max_set_cmd) netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), params.get("netperf_link")) md5sum = params.get("pkg_md5sum") client_num = params.get("netperf_client_num", 520) netperf_timeout = int(params.get("netperf_timeout", 600)) disable_firewall = params.get("disable_firewall", "") if int(params.get("queues", 1)) > 1 and params.get("os_type") == "linux": error_context.context("Enable multi queues support in guest.", logging.info) guest_mac = vm.get_mac_address() ifname = utils_net.get_linux_ifname(session, guest_mac) get_if_queues(ifname) try: cmd = "ethtool -L %s combined %s" % (ifname, params.get("queues")) status, out = session.cmd_status_output(cmd) except Exception: get_if_queues(ifname) msg = "Fail to enable multi queues support in guest." msg += "Command %s fail output: %s" % (cmd, out) test.error(msg) logging.info("Command %s set queues succeed", cmd) error_context.context("Setup netperf in guest", logging.info) if params.get("os_type") == "linux": session.cmd(disable_firewall, ignore_all_errors=True) g_client_link = netperf_link g_client_path = params.get("client_path", "/var/tmp/") netperf_client_ip = vm.get_address() username = params.get("username", "root") password = params.get("password", "123456") client = params.get("shell_client", "ssh") port = params.get("shell_port", "22") prompt = params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#") linesep = params.get("shell_linesep", "\n").encode().decode('unicode_escape') status_test_command = params.get("status_test_command", "echo $?") compile_option_client = params.get("compile_option_client", "") netperf_client = utils_netperf.NetperfClient( netperf_client_ip, g_client_path, md5sum, g_client_link, client, port, username=username, password=password, prompt=prompt, linesep=linesep, status_test_command=status_test_command, compile_option=compile_option_client) error_context.context("Setup netperf in host", logging.info) host_ip = utils_net.get_host_ip_address(params) server_path = params.get("server_path", "/var/tmp/") server_shell_client = params.get("server_shell_client", "ssh") server_shell_port = params.get("server_shell_port", "22") server_passwd = params["hostpasswd"] server_username = params.get("host_username", "root") compile_option_server = params.get("compile_option_server", "") netperf_server = utils_netperf.NetperfServer( host_ip, server_path, md5sum, netperf_link, server_shell_client, server_shell_port, username=server_username, password=server_passwd, prompt=prompt, linesep=linesep, status_test_command=status_test_command, compile_option=compile_option_server) try: error_context.base_context("Run netperf test between host and guest.") error_context.context("Start netserver in host.", logging.info) netperf_server.start() error_context.context( "Start Netperf in guest for %ss." % netperf_timeout, logging.info) test_option = "-t TCP_CRR -l %s -- -b 10 -D" % netperf_timeout netperf_client.bg_start(host_ip, test_option, client_num) start_time = time.time() deviation_time = params.get_numeric("deviation_time") duration = time.time() - start_time max_run_time = netperf_timeout + deviation_time while duration < max_run_time: time.sleep(10) duration = time.time() - start_time status = netperf_client.is_netperf_running() if not status and duration < netperf_timeout - 10: test.fail("netperf terminated unexpectedly") logging.info("Wait netperf test finish %ss", duration) if netperf_client.is_netperf_running(): test.fail("netperf still running, netperf hangs") else: logging.info("netperf runs successfully") finally: netperf_server.stop() netperf_client.cleanup(True) netperf_server.cleanup(True) if session: session.close()
def run(test, params, env): """ Test 802.1Q vlan of NIC. For Linux guest: 1) Create two VMs. 2) load 8021q module in guest. 3) Setup vlans by ip in guest and using hard-coded ip address. 4) Enable arp_ignore for all ipv4 device in guest. 5) Repeat steps 2 - 4 in every guest. 6) Test by ping between same and different vlans of two VMs. 7) Test by flood ping between same vlan of two VMs. 8) Test by TCP data transfer between same vlan of two VMs. 9) Remove the named vlan-device. 10) Test maximal plumb/unplumb vlans. For Windows guest: 1) Create two VMs. 2) Set vlan tag in every guest and guest will get subnet ip(169.254) automatically. 3) Test by ping between same vlan of two VMs. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def add_vlan(test, session, v_id, iface="eth0", cmd_type="ip"): """ Creates a vlan-device on iface by cmd that assigned by cmd_type now only support 'ip' and 'vconfig' """ vlan_if = '%s.%s' % (iface, v_id) txt = "Create vlan interface '%s' on %s" % (vlan_if, iface) error_context.context(txt, logging.info) if cmd_type == "vconfig": cmd = "vconfig add %s %s" % (iface, v_id) elif cmd_type == "ip": v_name = "%s.%s" % (iface, v_id) cmd = "ip link add link %s %s type vlan id %s " % (iface, v_name, v_id) else: err_msg = "Unexpected vlan operation command: %s, " % cmd_type err_msg += "only support 'ip' and 'vconfig' now" test.error(err_msg) session.cmd(cmd) def set_ip_vlan(session, v_id, vlan_ip, iface="eth0"): """ Set ip address of vlan interface """ iface = "%s.%s" % (iface, v_id) txt = "Assign IP '%s' to vlan interface '%s'" % (vlan_ip, iface) error_context.context(txt, logging.info) session.cmd("ifconfig %s %s" % (iface, vlan_ip)) def set_arp_ignore(session): """ Enable arp_ignore for all ipv4 device in guest """ error_context.context("Enable arp_ignore for all ipv4 device in guest", logging.info) ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore" session.cmd(ignore_cmd) def rem_vlan(test, session, v_id, iface="eth0", cmd_type="ip"): """ Removes the named vlan interface(iface+v_id) """ v_iface = '%s.%s' % (iface, v_id) if cmd_type == "vconfig": rem_vlan_cmd = "vconfig rem %s" % v_iface elif cmd_type == "ip": rem_vlan_cmd = "ip link delete %s" % v_iface else: err_msg = "Unexpected vlan operation command: %s, " % cmd_type err_msg += "only support 'ip' and 'vconfig' now" test.error(err_msg) error_context.context("Remove vlan interface '%s'." % v_iface, logging.info) return session.cmd_status(rem_vlan_cmd) def nc_transfer(test, src, dst): """ Transfer file by netcat """ nc_port = utils_misc.find_free_port(1025, 5334, vm_ip[dst]) listen_cmd = params.get("listen_cmd") send_cmd = params.get("send_cmd") # listen in dst listen_cmd = listen_cmd % (nc_port, "receive") sessions[dst].sendline(listen_cmd) time.sleep(2) # send file from src to dst send_cmd = send_cmd % (vlan_ip[dst], str(nc_port), "file") sessions[src].cmd(send_cmd, timeout=60) try: sessions[dst].read_up_to_prompt(timeout=60) except aexpect.ExpectError: # kill server session_ctl[dst].cmd_output_safe("killall -9 nc") test.fail("Fail to receive file" " from vm%s to vm%s" % (src + 1, dst + 1)) # check MD5 message digest of receive file in dst output = sessions[dst].cmd_output("md5sum receive").strip() digest_receive = re.findall(r'(\w+)', output)[0] if digest_receive == digest_origin[src]: logging.info("File succeed received in vm %s", vlan_ip[dst]) else: logging.info("Digest_origin is %s", digest_origin[src]) logging.info("Digest_receive is %s", digest_receive) test.fail("File transferred differ from origin") sessions[dst].cmd("rm -f receive") def flood_ping(src, dst): """ Flood ping test # we must use a dedicated session because the aexpect # does not have the other method to interrupt the process in # the guest rather than close the session. """ txt = "Flood ping from %s interface %s to %s" % ( vms[src].name, ifname[src], vlan_ip[dst]) error_context.context(txt, logging.info) session_flood = vms[src].wait_for_login(timeout=60) utils_test.ping(vlan_ip[dst], flood=True, interface=ifname[src], session=session_flood, timeout=10) session_flood.close() def get_netkvmco_path(session): """ Get the proper netkvmco.dll path from iso. :param session: a session to send cmd :return: the proper netkvmco.dll path """ viowin_ltr = virtio_win.drive_letter_iso(session) if not viowin_ltr: err = "Could not find virtio-win drive in guest" test.error(err) guest_name = virtio_win.product_dirname_iso(session) if not guest_name: err = "Could not get product dirname of the vm" test.error(err) guest_arch = virtio_win.arch_dirname_iso(session) if not guest_arch: err = "Could not get architecture dirname of the vm" test.error(err) middle_path = "%s\\%s" % (guest_name, guest_arch) find_cmd = 'dir /b /s %s\\netkvmco.dll | findstr "\\%s\\\\"' find_cmd %= (viowin_ltr, middle_path) netkvmco_path = session.cmd(find_cmd).strip() logging.info("Found netkvmco.dll file at %s", netkvmco_path) return netkvmco_path vms = [] sessions = [] session_ctl = [] ifname = [] vm_ip = [] digest_origin = [] vlan_ip = ['', ''] ip_unit = ['1', '2'] subnet = params.get("subnet", "192.168") vlan_num = int(params.get("vlan_num", 5)) maximal = int(params.get("maximal", 4094)) file_size = params.get("file_size", 4096) cmd_type = params.get("cmd_type", "ip") login_timeout = int(params.get("login_timeout", 360)) prepare_netkvmco_cmd = params.get("prepare_netkvmco_cmd") set_vlan_cmd = params.get("set_vlan_cmd") vms.append(env.get_vm(params["main_vm"])) vms.append(env.get_vm("vm2")) for vm_ in vms: vm_.verify_alive() for vm_index, vm in enumerate(vms): if params["os_type"] == "windows": session = vm.wait_for_login(timeout=login_timeout) session = utils_test.qemu.windrv_check_running_verifier( session, vm, test, "netkvm") netkvmco_path = get_netkvmco_path(session) session.cmd(prepare_netkvmco_cmd % netkvmco_path, timeout=240) session.close() session = vm.wait_for_serial_login(timeout=login_timeout) session.cmd(set_vlan_cmd) dev_mac = vm.virtnet[0].mac connection_id = utils_net.get_windows_nic_attribute( session, "macaddress", dev_mac, "netconnectionid") utils_net.restart_windows_guest_network(session, connection_id) time.sleep(10) nicid = utils_net.get_windows_nic_attribute( session=session, key="netenabled", value=True, target="netconnectionID") ifname.append(nicid) vm_ip.append( utils_net.get_guest_ip_addr(session, dev_mac, os_type="windows", linklocal=True)) logging.debug("IP address is %s in %s", vm_ip, vm.name) session_ctl.append(session) continue error_context.base_context("Prepare test env on %s" % vm.name) session = vm.wait_for_login(timeout=login_timeout) if not session: err_msg = "Could not log into guest %s" % vm.name test.error(err_msg) sessions.append(session) logging.info("Logged in %s successful", vm.name) session_ctl.append(vm.wait_for_login(timeout=login_timeout)) ifname.append(utils_net.get_linux_ifname(session, vm.get_mac_address())) # get guest ip vm_ip.append(vm.get_address()) logging.debug("IP address is %s in %s", vm_ip, vm.name) # produce sized file in vm dd_cmd = "dd if=/dev/urandom of=file bs=1M count=%s" session.cmd(dd_cmd % file_size) # record MD5 message digest of file md5sum_output = session.cmd("md5sum file", timeout=60) digest_origin.append(re.findall(r'(\w+)', md5sum_output)[0]) # stop firewall in vm stop_firewall_cmd = "systemctl stop firewalld||service firewalld stop" session.cmd_output_safe(stop_firewall_cmd) error_context.context("Load 8021q module in guest %s" % vm.name, logging.info) session.cmd_output_safe("modprobe 8021q") error_context.context("Setup vlan environment in guest %s" % vm.name, logging.info) for vlan_i in range(1, vlan_num + 1): add_vlan(test, session, vlan_i, ifname[vm_index], cmd_type) v_ip = "%s.%s.%s" % (subnet, vlan_i, ip_unit[vm_index]) set_ip_vlan(session, vlan_i, v_ip, ifname[vm_index]) set_arp_ignore(session) if params["os_type"] == "windows": for vm_index, vm in enumerate(vms): status, output = utils_test.ping(dest=vm_ip[(vm_index + 1) % 2], count=10, session=session_ctl[vm_index], timeout=30) loss = utils_test.get_loss_ratio(output) if not loss and ("TTL=" in output): pass # window get loss=0 when ping fail sometimes, need further check else: test.fail( "Guests ping test hit unexpected loss, error info: %s" % output) for sess in session_ctl: if sess: sess.close() return try: for vlan in range(1, vlan_num + 1): error_context.base_context("Test for vlan %s" % vlan, logging.info) error_context.context("Ping test between vlans", logging.info) interface = ifname[0] + '.' + str(vlan) for vm_index, vm in enumerate(vms): for vlan2 in range(1, vlan_num + 1): interface = ifname[vm_index] + '.' + str(vlan) dest = ".".join( (subnet, str(vlan2), ip_unit[(vm_index + 1) % 2])) status, output = utils_test.ping( dest, count=2, interface=interface, session=sessions[vm_index], timeout=30) if ((vlan == vlan2) ^ (status == 0)): err_msg = "%s ping %s unexpected, " % (interface, dest) err_msg += "error info: %s" % output test.fail(err_msg) error_context.context("Flood ping between vlans", logging.info) vlan_ip[0] = ".".join((subnet, str(vlan), ip_unit[0])) vlan_ip[1] = ".".join((subnet, str(vlan), ip_unit[1])) flood_ping(0, 1) flood_ping(1, 0) error_context.context("Transferring data between vlans by nc", logging.info) nc_transfer(test, 0, 1) nc_transfer(test, 1, 0) finally: # If client can not connect the nc server, need kill the server. for session in session_ctl: session.cmd_output_safe("killall -9 nc") error_context.base_context("Remove vlan") for vm_index, vm in enumerate(vms): for vlan in range(1, vlan_num + 1): status = rem_vlan(test, sessions[vm_index], vlan, ifname[vm_index], cmd_type) if status: logging.error("Remove vlan %s failed", vlan) # Plumb/unplumb maximal number of vlan interfaces if params.get("do_maximal_test", "no") == "yes": bound = maximal + 1 try: error_context.base_context("Vlan scalability test") error_context.context("Testing the plumb of vlan interface", logging.info) for vlan_index in range(1, bound): add_vlan(test, sessions[0], vlan_index, ifname[0], cmd_type) vlan_added = vlan_index if vlan_added != maximal: test.fail("Maximal interface plumb test failed") finally: for vlan_index in range(1, vlan_added + 1): if rem_vlan(test, sessions[0], vlan_index, ifname[0], cmd_type): logging.error("Remove vlan %s failed", vlan_index) error_context.base_context("Vlan negative test") error_context.context("Create vlan with ID %s in guest" % bound, logging.info) try: add_vlan(test, sessions[0], bound, ifname[0], cmd_type) test.fail("Maximal ID allow to vlan is %s" % maximal) except aexpect.ShellCmdError as detail: pattern = params["msg_pattern"] if not re.search(pattern, detail.output, re.M | re.I): raise sessions.extend(session_ctl) for sess in sessions: if sess: sess.close()
def run(test, params, env): """ TestStep: 1) Exec the stap script in host 2) Boot the guest, and do some operation(if needed). 3) Check the output of the stap params: :param test: kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def create_patterns_reg(trace_key): """ Create a regular exp using the tracing key, the purpose is checking the systemtap output is accord with expected. """ pattern_reg = r"" for tracing_key in trace_key.split(): pattern_reg += r"%s=\d+," % tracing_key return pattern_reg.rstrip(",") error_context.base_context("Qemu_Tracing Test") error_context.context("Test start ...") probe_var_key = params.get("probe_var_key") checking_pattern_re = create_patterns_reg(probe_var_key) capdata_timeout = int(params.get("capdata_timeout", "360")) timeout = int(params.get("login_timeout", "360")) time_inter = int(params.get("time_inter", "1")) if params.get("extra_params"): params["extra_params"] = params.get("extra_params") if params.get("boot_with_cdrom") == 'yes': iso_path = "%s/test.iso" % data_dir.get_tmp_dir() create_cmd = "dd if=/dev/zero of=%s bs=1M count=10" % iso_path if process.system(create_cmd, ignore_status=True) != 0: test.cancel("Create test iso failed") params["cdrom_cd1"] = iso_path if params.get("start_vm", "yes") == "no": params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) vm.verify_alive() _params = params.object_params(vm.monitor.protocol) if _params.get("cmds_exec"): for cmd in _params.get("cmds_exec").split(","): if re.findall(":", cmd): cmd_type = cmd.split(":")[0] exec_cmds = cmd.split(":")[1] else: cmd_type = "bash" exec_cmds = cmd for cmd_exec in exec_cmds.split(";"): msg = "Execute %s cmd '%s'" % (cmd_type, cmd_exec) error_context.context(msg, logging.info) if cmd_type == "monitor": vm.monitor.send_args_cmd(cmd_exec) elif cmd_type == "bash": guest_session = vm.wait_for_login(timeout=timeout) guest_session.cmd(cmd_exec) error_context.context("Get the output of stap script", logging.info) stap_log_file = utils_misc.get_path(test.profdir, "systemtap.log") start_time = time.time() while (time.time() - start_time) < capdata_timeout: if os.path.isfile(stap_log_file): fd = open(stap_log_file, 'r') data = fd.read() if (not data) or (not re.findall(checking_pattern_re, data)): time.sleep(time_inter) fd.close() continue elif data and re.findall(checking_pattern_re, data): logging.info("Capture the data successfully") logging.info("The capture data is like: %s" % re.findall(checking_pattern_re, data)[-1]) fd.close() break else: time.sleep(time_inter) else: test.error("Timeout for capature the stap log data")
def run(test, params, env): """ Test Step: 1. Boot up two virtual machine 2. Set openflow rules 3. Run ping test, nc(tcp, udp) test, check whether openflow rules take effect. Params: :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def run_tcpdump_bg(vm, addresses, dump_protocol): """ Run tcpdump in background, tcpdump will exit once catch a packet match the rules. """ bg_session = vm.wait_for_login() if tcpdump_is_alive(bg_session): bg_session.cmd("killall -9 tcpdump") tcpdump_cmd = ("setsid tcpdump -iany -n -v %s and 'src %s and dst %s'" " -c 1 >/dev/null 2>&1") bg_session.sendline(tcpdump_cmd % (dump_protocol, addresses[0], addresses[1])) if not utils_misc.wait_for(lambda: tcpdump_is_alive(bg_session), 30, 0, 1, "Waiting tcpdump start..."): test.cancel("Error, can not run tcpdump") bg_session.close() def dump_catch_data(session, dump_log, catch_reg): """ Search data from dump_log """ dump_info = session.cmd_output("cat %s" % dump_log) if re.findall(catch_reg, dump_info, re.I): return True return False def tcpdump_is_alive(session): """ Check whether tcpdump is alive """ if session.cmd_status("pidof tcpdump"): return False return True def tcpdump_catch_packet_test(session, drop_flow=False): """ Check whether tcpdump catch match rules packets, once catch a packet match rules tcpdump will exit. when drop_flow is 'True', tcpdump couldn't catch any packets. """ packet_receive = not tcpdump_is_alive(session) if packet_receive == drop_flow: err_msg = "Error, flow %s" % (drop_flow and "was" or "wasn't") err_msg += " dropped, tcpdump " err_msg += "%s " % (packet_receive and "can" or "can not") err_msg += "receive the packets" test.error(err_msg) logging.info("Correct, flow %s dropped, tcpdump %s receive the packet" % ((drop_flow and "was" or "was not"), (packet_receive and "can" or "can not"))) def arp_entry_clean(entry=None): """ Clean arp catch in guest """ if not entry: arp_clean_cmd = "arp -n | awk '/^[1-2]/{print \"arp -d \" $1}'|sh" else: arp_clean_cmd = "arp -d %s" % entry for session in sessions: session.cmd_output_safe(arp_clean_cmd) def check_arp_info(session, entry, vm, match_mac=None): arp_info = session.cmd_output("arp -n") arp_entries = [_ for _ in arp_info.splitlines() if re.match(entry, _)] match_string = match_mac or "incomplete" if not arp_entries: test.error("Can not find arp entry in %s: %s" % (vm.name, arp_info)) if not re.findall(match_string, arp_entries[0], re.I): test.fail("Can not find the mac address" " %s of %s in arp" " entry %s" % (match_mac, vm.name, arp_entries[0])) def ping_test(session, dst, drop_flow=False): """ Ping test, check icmp """ ping_status, ping_output = utils_test.ping(dest=dst, count=10, timeout=20, session=session) # when drop_flow is true, ping should failed(return not zero) # drop_flow is false, ping should success packets_lost = 100 if ping_status and not drop_flow: test.error("Ping should success when not drop_icmp") elif not ping_status: packets_lost = utils_test.get_loss_ratio(ping_output) if drop_flow and packets_lost != 100: test.error("When drop_icmp, ping shouldn't works") if not drop_flow and packets_lost == 100: test.error("When not drop_icmp, ping should works") info_msg = "Correct, icmp flow %s dropped, ping '%s', " info_msg += "packets lost rate is: '%s'" logging.info(info_msg % ((drop_flow and "was" or "was not"), (ping_status and "failed" or "success"), packets_lost)) def run_ping_bg(vm, dst): """ Run ping in background """ ping_cmd = "ping %s" % dst session = vm.wait_for_login() logging.info("Ping %s in background" % dst) session.sendline(ping_cmd) return session def check_bg_ping(session): ping_pattern = r"\d+ bytes from \d+.\d+.\d+.\d+:" ping_pattern += r" icmp_seq=\d+ ttl=\d+ time=.*? ms" ping_failed_pattern = r"From .*? icmp_seq=\d+ Destination" ping_failed_pattern += r" Host Unreachable" try: out = session.read_until_output_matches([ping_pattern, ping_failed_pattern]) if re.search(ping_failed_pattern, out[1]): return False, out[1] else: return True, out[1] except Exception as msg: return False, msg def file_transfer(sessions, addresses, timeout): prepare_cmd = "dd if=/dev/zero of=/tmp/copy_file count=1024 bs=1M" md5_cmd = "md5sum /tmp/copy_file" port = params.get("shell_port") prompt = params.get("shell_prompt") username = params.get("username") password = params.get("password") sessions[0].cmd(prepare_cmd, timeout=timeout) ori_md5 = sessions[0].cmd_output(md5_cmd) scp_cmd = (r"scp -v -o UserKnownHostsFile=/dev/null " r"-o StrictHostKeyChecking=no " r"-o PreferredAuthentications=password -r " r"-P %s /tmp/copy_file %s@\[%s\]:/tmp/copy_file" % (port, username, addresses[1])) sessions[0].sendline(scp_cmd) remote.handle_prompts(sessions[0], username, password, prompt, 600) new_md5 = sessions[1].cmd_output(md5_cmd) for session in sessions: session.cmd("rm -f /tmp/copy_file") if new_md5 != ori_md5: test.fail("Md5 value changed after file transfer, " "original is %s and the new file" " is: %s" % (ori_md5, new_md5)) def nc_connect_test(sessions, addresses, drop_flow=False, nc_port="8899", udp_model=False): """ Nc connect test, check tcp and udp """ nc_log = "/tmp/nc_log" server_cmd = "nc -l %s" client_cmd = "echo client | nc %s %s" if udp_model: server_cmd += " -u -w 3" client_cmd += " -u -w 3" server_cmd += " > %s &" client_cmd += " &" try: sessions[1].cmd_output_safe(server_cmd % (nc_port, nc_log)) sessions[0].cmd_output_safe(client_cmd % (addresses[1], nc_port)) nc_protocol = udp_model and "UDP" or "TCP" nc_connect = False if utils_misc.wait_for( lambda: dump_catch_data(sessions[1], nc_log, "client"), 10, 0, 2, text="Wait '%s' connect" % nc_protocol): nc_connect = True if nc_connect == drop_flow: err_msg = "Error, '%s' " % nc_protocol err_msg += "flow %s " % (drop_flow and "was" or "was not") err_msg += "dropped, nc connect should" err_msg += " '%s'" % (nc_connect and "failed" or "success") test.error(err_msg) logging.info("Correct, '%s' flow %s dropped, and nc connect %s" % (nc_protocol, (drop_flow and "was" or "was not"), (nc_connect and "success" or "failed"))) finally: for session in sessions: session.cmd_output_safe("killall nc || killall ncat") session.cmd("%s %s" % (clean_cmd, nc_log), ignore_all_errors=True) def acl_rules_check(acl_rules, flow_options): flow_options = re.sub("action=", "actions=", flow_options) if "arp" in flow_options: flow_options = re.sub("nw_src=", "arp_spa=", flow_options) flow_options = re.sub("nw_dst=", "arp_tpa=", flow_options) acl_options = re.split(",", flow_options) for line in acl_rules.splitlines(): rule = [_.lower() for _ in re.split("[ ,]", line) if _] item_in_rule = 0 for acl_item in acl_options: if acl_item.lower() in rule: item_in_rule += 1 if item_in_rule == len(acl_options): return True return False def remove_plus_items(open_flow_rules): plus_items = ["duration", "n_packets", "n_bytes", "idle_age", "hard_age"] for plus_item in plus_items: open_flow_rules = re.sub("%s=.*?," % plus_item, "", open_flow_rules) return open_flow_rules br_name = params.get("netdst", "ovs0") timeout = int(params.get("login_timeout", '360')) prepare_timeout = int(params.get("prepare_timeout", '360')) clean_cmd = params.get("clean_cmd", "rm -f") sessions = [] addresses = [] vms = [] bg_ping_session = None if not utils_net.ovs_br_exists(br_name): test.cancel("%s isn't an openvswith bridge" % br_name) error_context.context("Init boot the vms") for vm_name in params.objects("vms"): vms.append(env.get_vm(vm_name)) for vm in vms: vm.verify_alive() sessions.append(vm.wait_for_login(timeout=timeout)) addresses.append(vm.get_address()) # set openflow rules: f_protocol = params.get("flow", "arp") f_base_options = "%s,nw_src=%s,nw_dst=%s" % (f_protocol, addresses[0], addresses[1]) for session in sessions: session.cmd("systemctl stop firewalld || service firewalld stop", ignore_all_errors=True) try: for drop_flow in [True, False]: if drop_flow: f_command = "add-flow" f_options = f_base_options + ",action=drop" drop_icmp = eval(params.get("drop_icmp", 'True')) drop_tcp = eval(params.get("drop_tcp", 'True')) drop_udp = eval(params.get("drop_udp", 'True')) else: f_command = "mod-flows" f_options = f_base_options + ",action=normal" drop_icmp = False drop_tcp = False drop_udp = False error_context.base_context("Test prepare") error_context.context("Do %s %s on %s" % (f_command, f_options, br_name)) utils_net.openflow_manager(br_name, f_command, f_options) acl_rules = utils_net.openflow_manager( br_name, "dump-flows").stdout.decode() if not acl_rules_check(acl_rules, f_options): test.fail("Can not find the rules from" " ovs-ofctl: %s" % acl_rules) error_context.context("Run tcpdump in guest %s" % vms[1].name, logging.info) run_tcpdump_bg(vms[1], addresses, f_protocol) if drop_flow or f_protocol is not "arp": error_context.context("Clean arp cache in both guest", logging.info) arp_entry_clean(addresses[1]) error_context.base_context( "Exec '%s' flow '%s' test" % (f_protocol, drop_flow and "drop" or "normal")) if drop_flow: error_context.context("Ping test form %s to %s" % (vms[0].name, vms[1].name), logging.info) ping_test(sessions[0], addresses[1], drop_icmp) if params.get("run_file_transfer") == "yes": error_context.context("Transfer file form %s to %s" % (vms[0].name, vms[1].name), logging.info) file_transfer(sessions, addresses, prepare_timeout) else: error_context.context("Ping test form %s to %s in background" % (vms[0].name, vms[1].name), logging.info) bg_ping_session = run_ping_bg(vms[0], addresses[1]) if f_protocol == 'arp' and drop_flow: error_context.context("Check arp inside %s" % vms[0].name, logging.info) check_arp_info(sessions[0], addresses[1], vms[0]) elif f_protocol == 'arp' or params.get("check_arp") == "yes": time.sleep(2) error_context.context("Check arp inside guests.", logging.info) for index, address in enumerate(addresses): sess_index = (index + 1) % 2 mac = vms[index].virtnet.get_mac_address(0) check_arp_info(sessions[sess_index], address, vms[index], mac) error_context.context("Run nc connect test via tcp", logging.info) nc_connect_test(sessions, addresses, drop_tcp) error_context.context("Run nc connect test via udp", logging.info) nc_connect_test(sessions, addresses, drop_udp, udp_model=True) error_context.context("Check tcpdump data catch", logging.info) tcpdump_catch_packet_test(sessions[1], drop_flow) finally: openflow_rules_ori = utils_net.openflow_manager( br_name, "dump-flows").stdout.decode() openflow_rules_ori = remove_plus_items(openflow_rules_ori) utils_net.openflow_manager(br_name, "del-flows", f_protocol) openflow_rules = utils_net.openflow_manager( br_name, "dump-flows").stdout.decode() openflow_rules = remove_plus_items(openflow_rules) removed_rule = list(set(openflow_rules_ori.splitlines()) - set(openflow_rules.splitlines())) if f_protocol == "tcp": error_context.context("Run nc connect test via tcp", logging.info) nc_connect_test(sessions, addresses) elif f_protocol == "udp": error_context.context("Run nc connect test via udp", logging.info) nc_connect_test(sessions, addresses, udp_model=True) for session in sessions: session.close() failed_msg = [] if (not removed_rule or not acl_rules_check(removed_rule[0], f_options)): failed_msg.append("Failed to delete %s" % f_options) if bg_ping_session: bg_ping_ok = check_bg_ping(bg_ping_session) bg_ping_session.close() if not bg_ping_ok[0]: failed_msg.append("There is something wrong happen in " "background ping: %s" % bg_ping_ok[1]) if failed_msg: test.fail(failed_msg)
def run(test, params, env): """ Suspend a running Virtual Machine and verify its state. 1) Boot the vm 2) Do preparation operation (Optional) 3) Start a background process (Optional) 4) Stop the VM 5) Verify the status of VM is 'paused' 6) Verify the session has no response 7) Resume the VM 8) Verify the status of VM is 'running' 9) Re-login the guest 10) Do check operation (Optional) 11) Do clean operation (Optional) :param test: Kvm test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=login_timeout) session_bg = None start_bg_process = params.get("start_bg_process") try: prepare_op = params.get("prepare_op") if prepare_op: error_context.context( "Do preparation operation: '%s'" % prepare_op, logging.info) op_timeout = float(params.get("prepare_op_timeout", 60)) session.cmd(prepare_op, timeout=op_timeout) if start_bg_process: bg_cmd = params.get("bg_cmd") error_context.context("Start a background process: '%s'" % bg_cmd, logging.info) session_bg = vm.wait_for_login(timeout=login_timeout) bg_cmd_timeout = float(params.get("bg_cmd_timeout", 240)) args = (bg_cmd, bg_cmd_timeout) bg = utils_test.BackgroundTest(session_bg.cmd, args) bg.start() error_context.base_context("Stop the VM", logging.info) vm.pause() error_context.context("Verify the status of VM is 'paused'", logging.info) vm.verify_status("paused") error_context.context("Verify the session has no response", logging.info) if session.is_responsive(): msg = "Session is still responsive after stop" logging.error(msg) test.fail(msg) session.close() time.sleep(float(params.get("pause_time", 0))) error_context.base_context("Resume the VM", logging.info) vm.resume() error_context.context("Verify the status of VM is 'running'", logging.info) vm.verify_status("running") error_context.context("Re-login the guest", logging.info) session = vm.wait_for_login(timeout=login_timeout) if start_bg_process: if bg: bg.join() check_op = params.get("check_op") if check_op: error_context.context("Do check operation: '%s'" % check_op, logging.info) op_timeout = float(params.get("check_op_timeout", 60)) s, o = session.cmd_status_output(check_op, timeout=op_timeout) if s != 0: test.fail("Something wrong after stop continue, " "check command report: %s" % o) finally: try: clean_op = params.get("clean_op") if clean_op: error_context.context("Do clean operation: '%s'" % clean_op, logging.info) # session close if exception raised, so get renew a session # to do cleanup step. session = vm.wait_for_login(timeout=login_timeout) op_timeout = float(params.get("clean_op_timeout", 60)) session.cmd(clean_op, timeout=op_timeout, ignore_all_errors=True) session.close() if session_bg: session_bg.close() except Exception as details: logging.warn("Exception occur when clean test environment: %s", details)
def run(test, params, env): """ QEMU nested block resize test 1) Boot the main vm as L1, attach scsi data disk 2) Pass-through L1 data disk to L2 3) Run io on the data disk in L2. 4) Execute block_resize for data disk image on host. 5) Check L1 status should keep running. 6) Check L2 status should keep running. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def _on_exit(obj, msg): logging.info("Receive exit msg:%s", msg) obj.set_msg_loop(False) status = msg.split(":")[1] if status != "0": test.fail("Get L2 guest unexpected exit message") def _on_resize(obj, msg): logging.info("Receive resize msg:%s", msg) data_image_params = params.object_params("stg0") data_image_size = params.get_numeric("new_image_size_stg0") data_image_filename = storage.get_image_filename( data_image_params, data_dir.get_data_dir()) data_image_dev = vm.get_block({'file': data_image_filename}) args = (None, data_image_size, data_image_dev) vm.monitor.block_resize(*args) time.sleep(2) vm.verify_status("running") guest_cmd_output = session.cmd("lsblk -dn", timeout=60).strip() logging.debug("Guest cmd output: '%s'", guest_cmd_output) obj.send_message("status-req") logging.info("Finish handle on_resize") def _on_status(obj, msg): logging.info("Receive status msg:%s", msg) status = msg.split(":")[1] # Notify L2 exit obj.send_message("exit") if status != "running": test.fail("Get unexpected status of L2 guest " + status) logging.info("Finish handle on_status") # Error contexts are used to give more info on what was # going on when one exception happened executing test code. error_context.context("Get the main VM", logging.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() # Handle nested ENV guest_user = params["username"] guest_passwd = params["password"] step_time = params.get_numeric("step_time", 60) ansible_callback_plugin = params.get("ansible_callback_plugin") ansible_addl_opts = params.get("ansible_addl_opts", "") ansible_ssh_extra_args = params["ansible_ssh_extra_args"] ansible_extra_vars = params.get("ansible_extra_vars", "{}") playbook_repo = params["playbook_repo"] playbook_timeout = params.get_numeric("playbook_timeout") playbook_dir = params.get("playbook_dir", os.path.join(test.workdir, "ansible_playbook")) toplevel_playbook = os.path.join(playbook_dir, params["toplevel_playbook"]) # Use this directory to copy some logs back from the guest test_harness_log_dir = test.logdir mq_listen_port = params.get_numeric("mq_listen_port", 5000) guest_ip_list = [vm.get_address()] logging.info("Cloning %s", playbook_repo) process.run("git clone {src} {dst}".format(src=playbook_repo, dst=playbook_dir), verbose=False) error_context.base_context("Generate playbook related options.", logging.info) extra_vars = { "ansible_ssh_extra_args": ansible_ssh_extra_args, "ansible_ssh_pass": guest_passwd, "mq_port": mq_listen_port, "test_harness_log_dir": test_harness_log_dir } extra_vars.update(json.loads(ansible_extra_vars)) error_context.context("Execute the ansible playbook.", logging.info) playbook_executor = ansible.PlaybookExecutor( inventory="{},".format(",".join(guest_ip_list)), site_yml=toplevel_playbook, remote_user=guest_user, extra_vars=json.dumps(extra_vars), callback_plugin=ansible_callback_plugin, addl_opts=ansible_addl_opts) # Handle cases mq_port = params.get("mq_port", 5000) wait_response_timeout = params.get_numeric("wait_response_timeout", 1800) mq_publisher = message_queuing.MQPublisher(mq_port, other_options="--broker") host = "127.0.0.1" logging.info("host:{} port:{}".format(host, mq_port)) client = message_queuing.MQClient(host, mq_port) time.sleep(2) client.register_msg("resize", _on_resize) client.register_msg("status-rsp:", _on_status) client.register_msg("exit:", _on_exit) try: client.msg_loop(timeout=wait_response_timeout) logging.debug("Finish msg_loop") finally: ansible_log = "ansible_playbook.log" try: playbook_executor.wait_for_completed(playbook_timeout, step_time) except ansible.ExecutorTimeoutError as err: test.error(str(err)) else: if playbook_executor.get_status() != 0: test.fail( "Ansible playbook execution failed, please check the " "{} for details.".format(ansible_log)) logging.info("Ansible playbook execution passed.") finally: playbook_executor.store_playbook_log(test_harness_log_dir, ansible_log) playbook_executor.close() client.close() mq_publisher.close() logging.debug("MQ closed")
def run(test, params, env): """ QEMU flow caches stress test test 1) Make sure nf_conntrack is disabled in host and guest. If nf_conntrack is enabled in host, skip this case. 2) Boot guest with vhost=on/off. 3) Enable multi queues support in guest (optional). 4) After installation of netperf, run netserver in host. 5) Run netperf TCP_CRR protocal test in guest. 6) Transfer file between guest and host. 7) Check the md5 of copied file. This is a sample QEMU test, so people can get used to some of the test APIs. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ msg = "Make sure nf_conntrack is disabled in host and guest." error_context.context(msg, logging.info) if "nf_conntrack" in process.system_output("lsmod"): err = "nf_conntrack load in host, skip this case" test.cancel(err) params["start_vm"] = "yes" error_context.context("Boot up guest", logging.info) env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) if "nf_conntrack" in session.cmd_output("lsmod"): msg = "Unload nf_conntrack module in guest." error_context.context(msg, logging.info) black_str = "#disable nf_conntrack\\nblacklist nf_conntrack\\n" \ "blacklist nf_conntrack_ipv6\\nblacklist xt_conntrack\\n" \ "blacklist nf_conntrack_ftp\\nblacklist xt_state\\n" \ "blacklist iptable_nat\\nblacklist ipt_REDIRECT\\n" \ "blacklist nf_nat\\nblacklist nf_conntrack_ipv4" cmd = "echo -e '%s' >> /etc/modprobe.d/blacklist.conf" % black_str session.cmd(cmd) session = vm.reboot(session, timeout=timeout) if "nf_conntrack" in session.cmd_output("lsmod"): err = "Fail to unload nf_conntrack module in guest." test.error(err) netperf_link = utils_misc.get_path(data_dir.get_deps_dir("netperf"), params["netperf_link"]) md5sum = params.get("pkg_md5sum") win_netperf_link = params.get("win_netperf_link") if win_netperf_link: win_netperf_link = utils_misc.get_path( data_dir.get_deps_dir("netperf"), win_netperf_link) win_netperf_md5sum = params.get("win_netperf_md5sum") server_path = params.get("server_path", "/var/tmp/") client_path = params.get("client_path", "/var/tmp/") win_netperf_path = params.get("win_netperf_path", "c:\\") client_num = params.get("netperf_client_num", 520) netperf_timeout = int(params.get("netperf_timeout", 600)) netperf_client_ip = vm.get_address() host_ip = utils_net.get_host_ip_address(params) netperf_server_ip = params.get("netperf_server_ip", host_ip) username = params.get("username", "root") password = params.get("password", "123456") passwd = params.get("hostpasswd", "123456") client = params.get("shell_client", "ssh") port = params.get("shell_port", "22") prompt = params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#") linesep = params.get("shell_linesep", "\n").encode().decode('unicode_escape') status_test_command = params.get("status_test_command", "echo $?") compile_option_client = params.get("compile_option_client", "") compile_option_server = params.get("compile_option_server", "") if int(params.get("queues", 1)) > 1 and params.get("os_type") == "linux": error_context.context("Enable multi queues support in guest.", logging.info) guest_mac = vm.get_mac_address() ifname = utils_net.get_linux_ifname(session, guest_mac) cmd = "ethtool -L %s combined %s" % (ifname, params.get("queues")) status, out = session.cmd_status_output(cmd) msg = "Fail to enable multi queues support in guest." msg += "Command %s fail output: %s" % (cmd, out) test.error(msg) if params.get("os_type") == "linux": session.cmd("iptables -F", ignore_all_errors=True) g_client_link = netperf_link g_client_path = client_path g_md5sum = md5sum elif params.get("os_type") == "windows": g_client_link = win_netperf_link g_client_path = win_netperf_path g_md5sum = win_netperf_md5sum error_context.context("Setup netperf in guest and host", logging.info) netperf_client = utils_netperf.NetperfClient( netperf_client_ip, g_client_path, g_md5sum, g_client_link, username=username, password=password, prompt=prompt, linesep=linesep, status_test_command=status_test_command, compile_option=compile_option_client) netperf_server = utils_netperf.NetperfServer( netperf_server_ip, server_path, md5sum, netperf_link, client, port, password=passwd, prompt=prompt, linesep=linesep, status_test_command=status_test_command, compile_option=compile_option_server) try: error_context.base_context("Run netperf test between host and guest.") error_context.context("Start netserver in host.", logging.info) netperf_server.start() error_context.context( "Start Netperf in guest for %ss." % netperf_timeout, logging.info) test_option = "-t TCP_CRR -l %s -- -b 10 -D" % netperf_timeout netperf_client.bg_start(netperf_server_ip, test_option, client_num) utils_misc.wait_for(lambda: not netperf_client.is_netperf_running(), timeout=netperf_timeout, first=590, step=2) utils_test.run_file_transfer(test, params, env) finally: netperf_server.stop() netperf_client.package.env_cleanup(True) if session: session.close()
def run(test, params, env): """ Test Step: 1. Boot up two virtual machine 2. Set openflow rules 3. Run ping test, nc(tcp, udp) test, check whether openflow rules take effect. Params: :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def run_tcpdump_bg(session, addresses, dump_protocol): """ Run tcpdump in background, tcpdump will exit once catch a packet match the rules. """ tcpdump_cmd = "killall -9 tcpdump; " tcpdump_cmd += "tcpdump -iany -n -v %s and 'src %s and dst %s' -c 1 &" session.cmd_output_safe(tcpdump_cmd % (dump_protocol, addresses[0], addresses[1])) if not utils_misc.wait_for(lambda: tcpdump_is_alive(session), 30, 0, 1, "Waiting tcpdump start..."): test.cancel("Error, can not run tcpdump") def dump_catch_data(session, dump_log, catch_reg): """ Search data from dump_log """ dump_info = session.cmd_output("cat %s" % dump_log) if re.findall(catch_reg, dump_info, re.I): return True return False def tcpdump_is_alive(session): """ Check whether tcpdump is alive """ if session.cmd_status("pidof tcpdump"): return False return True def tcpdump_catch_packet_test(session, drop_flow=False): """ Check whether tcpdump catch match rules packets, once catch a packet match rules tcpdump will exit. when drop_flow is 'True', tcpdump couldn't catch any packets. """ packet_receive = not tcpdump_is_alive(session) if packet_receive == drop_flow: err_msg = "Error, flow %s" % (drop_flow and "was" or "wasn't") err_msg += " dropped, tcpdump " err_msg += "%s " % (packet_receive and "can" or "can not") err_msg += "receive the packets" test.error(err_msg) logging.info("Correct, flow %s dropped, tcpdump %s receive the packet" % ((drop_flow and "was" or "was not"), (packet_receive and "can" or "can not"))) def arp_entry_clean(entry=None): """ Clean arp catch in guest """ if not entry: arp_clean_cmd = "arp -n | awk '/^[1-2]/{print \"arp -d \" $1}'|sh" else: arp_clean_cmd = "arp -d %s" % entry for session in sessions: session.cmd_output_safe(arp_clean_cmd) def check_arp_info(session, entry, vm, match_mac=None): arp_info = session.cmd_output("arp -n") arp_entries = [_ for _ in arp_info.splitlines() if re.match(entry, _)] match_string = match_mac or "incomplete" if not arp_entries: test.error("Can not find arp entry in %s: %s" % (vm.name, arp_info)) if not re.findall(match_string, arp_entries[0], re.I): test.fail("Can not find the mac address" " %s of %s in arp" " entry %s" % (mac, vm.name, arp_entries[0])) def ping_test(session, dst, drop_flow=False): """ Ping test, check icmp """ ping_status, ping_output = utils_test.ping(dest=dst, count=10, timeout=20, session=session) # when drop_flow is true, ping should failed(return not zero) # drop_flow is false, ping should success packets_lost = 100 if ping_status and not drop_flow: test.error("Ping should success when not drop_icmp") elif not ping_status: packets_lost = utils_test.get_loss_ratio(ping_output) if drop_flow and packets_lost != 100: test.error("When drop_icmp, ping shouldn't works") if not drop_flow and packets_lost == 100: test.error("When not drop_icmp, ping should works") info_msg = "Correct, icmp flow %s dropped, ping '%s', " info_msg += "packets lost rate is: '%s'" logging.info(info_msg % ((drop_flow and "was" or "was not"), (ping_status and "failed" or "success"), packets_lost)) def run_ping_bg(vm, dst): """ Run ping in background """ ping_cmd = "ping %s" % dst session = vm.wait_for_login() logging.info("Ping %s in background" % dst) session.sendline(ping_cmd) return session def check_bg_ping(session): ping_pattern = r"\d+ bytes from \d+.\d+.\d+.\d+:" ping_pattern += r" icmp_seq=\d+ ttl=\d+ time=.*? ms" ping_failed_pattern = r"From .*? icmp_seq=\d+ Destination" ping_failed_pattern += r" Host Unreachable" try: out = session.read_until_output_matches([ping_pattern, ping_failed_pattern]) if re.search(ping_failed_pattern, out[1]): return False, out[1] else: return True, out[1] except Exception as msg: return False, msg def file_transfer(sessions, addresses, timeout): prepare_cmd = "dd if=/dev/zero of=/tmp/copy_file count=1024 bs=1M" md5_cmd = "md5sum /tmp/copy_file" port = params.get("shell_port") prompt = params.get("shell_prompt") username = params.get("username") password = params.get("password") sessions[0].cmd(prepare_cmd, timeout=timeout) ori_md5 = sessions[0].cmd_output(md5_cmd) scp_cmd = (r"scp -v -o UserKnownHostsFile=/dev/null " r"-o StrictHostKeyChecking=no " r"-o PreferredAuthentications=password -r " r"-P %s /tmp/copy_file %s@\[%s\]:/tmp/copy_file" % (port, username, addresses[1])) sessions[0].sendline(scp_cmd) remote.handle_prompts(sessions[0], username, password, prompt, 600) new_md5 = sessions[1].cmd_output(md5_cmd) for session in sessions: session.cmd("rm -f /tmp/copy_file") if new_md5 != ori_md5: test.fail("Md5 value changed after file transfer, " "original is %s and the new file" " is: %s" % (ori_md5, new_md5)) def nc_connect_test(sessions, addresses, drop_flow=False, nc_port="8899", udp_model=False): """ Nc connect test, check tcp and udp """ nc_log = "/tmp/nc_log" server_cmd = "nc -l %s" client_cmd = "echo client | nc %s %s" if udp_model: server_cmd += " -u -w 3" client_cmd += " -u -w 3" server_cmd += " > %s &" client_cmd += " &" try: sessions[1].cmd_output_safe(server_cmd % (nc_port, nc_log)) sessions[0].cmd_output_safe(client_cmd % (addresses[1], nc_port)) nc_protocol = udp_model and "UDP" or "TCP" nc_connect = False if utils_misc.wait_for( lambda: dump_catch_data(sessions[1], nc_log, "client"), 10, 0, 2, text="Wait '%s' connect" % nc_protocol): nc_connect = True if nc_connect == drop_flow: err_msg = "Error, '%s' " % nc_protocol err_msg += "flow %s " % (drop_flow and "was" or "was not") err_msg += "dropped, nc connect should" err_msg += " '%s'" % (nc_connect and "failed" or "success") test.error(err_msg) logging.info("Correct, '%s' flow %s dropped, and nc connect %s" % (nc_protocol, (drop_flow and "was" or "was not"), (nc_connect and "success" or "failed"))) finally: for session in sessions: session.cmd_output_safe("killall nc || killall ncat") session.cmd("%s %s" % (clean_cmd, nc_log), ignore_all_errors=True) def acl_rules_check(acl_rules, flow_options): flow_options = re.sub("action=", "actions=", flow_options) if "arp" in flow_options: flow_options = re.sub("nw_src=", "arp_spa=", flow_options) flow_options = re.sub("nw_dst=", "arp_tpa=", flow_options) acl_options = re.split(",", flow_options) for line in acl_rules.splitlines(): rule = [_.lower() for _ in re.split("[ ,]", line) if _] item_in_rule = 0 for acl_item in acl_options: if acl_item.lower() in rule: item_in_rule += 1 if item_in_rule == len(acl_options): return True return False def remove_plus_items(open_flow_rules): plus_items = ["duration", "n_packets", "n_bytes", "idle_age", "hard_age"] for plus_item in plus_items: open_flow_rules = re.sub("%s=.*?," % plus_item, "", open_flow_rules) return open_flow_rules timeout = int(params.get("login_timeout", '360')) prepare_timeout = int(params.get("prepare_timeout", '360')) clean_cmd = params.get("clean_cmd", "rm -f") sessions = [] addresses = [] vms = [] bg_ping_session = None error_context.context("Init boot the vms") for vm_name in params.get("vms", "vm1 vm2").split(): vms.append(env.get_vm(vm_name)) for vm in vms: vm.verify_alive() sessions.append(vm.wait_for_login(timeout=timeout)) addresses.append(vm.get_address()) # set openflow rules: br_name = params.get("netdst", "ovs0") f_protocol = params.get("flow", "arp") f_base_options = "%s,nw_src=%s,nw_dst=%s" % (f_protocol, addresses[0], addresses[1]) for session in sessions: session.cmd("service iptables stop; iptables -F", ignore_all_errors=True) try: for drop_flow in [True, False]: if drop_flow: f_command = "add-flow" f_options = f_base_options + ",action=drop" drop_icmp = eval(params.get("drop_icmp", 'True')) drop_tcp = eval(params.get("drop_tcp", 'True')) drop_udp = eval(params.get("drop_udp", 'True')) else: f_command = "mod-flows" f_options = f_base_options + ",action=normal" drop_icmp = False drop_tcp = False drop_udp = False error_context.base_context("Test prepare") error_context.context("Do %s %s on %s" % (f_command, f_options, br_name)) utils_net.openflow_manager(br_name, f_command, f_options) acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout if not acl_rules_check(acl_rules, f_options): test.fail("Can not find the rules from" " ovs-ofctl: %s" % acl_rules) error_context.context("Run tcpdump in guest %s" % vms[1].name, logging.info) run_tcpdump_bg(sessions[1], addresses, f_protocol) if drop_flow or f_protocol is not "arp": error_context.context("Clean arp cache in both guest", logging.info) arp_entry_clean(addresses[1]) error_context.base_context( "Exec '%s' flow '%s' test" % (f_protocol, drop_flow and "drop" or "normal")) if drop_flow: error_context.context("Ping test form vm1 to vm2", logging.info) ping_test(sessions[0], addresses[1], drop_icmp) if params.get("run_file_transfer") == "yes": error_context.context("Transfer file form vm1 to vm2", logging.info) file_transfer(sessions, addresses, prepare_timeout) else: error_context.context("Ping test form vm1 to vm2 in " "background", logging.info) bg_ping_session = run_ping_bg(vms[0], addresses[1]) if f_protocol == 'arp' and drop_flow: error_context.context("Check arp inside %s" % vms[0].name, logging.info) check_arp_info(sessions[0], addresses[1], vms[0]) elif f_protocol == 'arp' or params.get("check_arp") == "yes": time.sleep(2) error_context.context("Check arp inside guests.", logging.info) for index, address in enumerate(addresses): sess_index = (index + 1) % 2 mac = vms[index].virtnet.get_mac_address(0) check_arp_info(sessions[sess_index], address, vms[index], mac) error_context.context("Run nc connect test via tcp", logging.info) nc_connect_test(sessions, addresses, drop_tcp) error_context.context("Run nc connect test via udp", logging.info) nc_connect_test(sessions, addresses, drop_udp, udp_model=True) error_context.context("Check tcpdump data catch", logging.info) tcpdump_catch_packet_test(sessions[1], drop_flow) finally: openflow_rules_ori = utils_net.openflow_manager(br_name, "dump-flows").stdout openflow_rules_ori = remove_plus_items(openflow_rules_ori) utils_net.openflow_manager(br_name, "del-flows", f_protocol) openflow_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout openflow_rules = remove_plus_items(openflow_rules) removed_rule = list(set(openflow_rules_ori.splitlines()) - set(openflow_rules.splitlines())) if f_protocol == "tcp": error_context.context("Run nc connect test via tcp", logging.info) nc_connect_test(sessions, addresses) elif f_protocol == "udp": error_context.context("Run nc connect test via udp", logging.info) nc_connect_test(sessions, addresses, udp_model=True) for session in sessions: session.close() failed_msg = [] if (not removed_rule or not acl_rules_check(removed_rule[0], f_options)): failed_msg.append("Failed to delete %s" % f_options) if bg_ping_session: bg_ping_ok = check_bg_ping(bg_ping_session) bg_ping_session.close() if not bg_ping_ok[0]: failed_msg.append("There is something wrong happen in " "background ping: %s" % bg_ping_ok[1]) if failed_msg: test.fail(failed_msg)
def run(test, params, env): """ Test hotplug vCPU device during boot stage. 1) Launch a guest without vCPU device. 2) Hotplug vCPU devices during boot stage and check. 3) Check if the number of CPUs changes after guest alive. 4) Reboot guest to hotunplug. (optional) 5) Hotunplug plugged vCPU devices during boot stage. (optional) 6) Recheck the number of CPUs after guest alive. (optional) :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ vcpu_devices = params.objects("vcpu_devices") unplug_during_boot = params.get_boolean("unplug_during_boot") boot_patterns = [r".*Linux version.*", r".*Kernel command line:.*"] reboot_patterns = [ r".*[Rr]ebooting.*", r".*[Rr]estarting system.*", r".*[Mm]achine restart.*" ] vm = env.get_vm(params["main_vm"]) vm.verify_alive() smp = vm.cpuinfo.smp maxcpus = vm.cpuinfo.maxcpus error_context.base_context("Hotplug vCPU devices during boot stage.", logging.info) error_context.context("Verify guest is in the boot stage.", logging.info) vm.serial_console.read_until_any_line_matches(boot_patterns) error_context.context("Hotplug vCPU devices, waiting for guest alive.", logging.info) for vcpu_device in vcpu_devices: vm.hotplug_vcpu_device(vcpu_device) vm.wait_for_login().close() error_context.context("Check number of CPU inside guest.", logging.info) current_guest_cpus = vm.get_cpu_count() if current_guest_cpus != maxcpus: test.fail("Actual number of guest CPUs(%s) is not equal to" " expected(%s) after hotplug." % (current_guest_cpus, maxcpus)) logging.info("CPU quantity(%s) in guest is correct.", current_guest_cpus) if unplug_during_boot: # 1) vm.reboot() will return a new session, which is not what we want. # 2) Send reboot command directly because it will close the ssh client # so we can not get the command status. error_context.base_context( "Reboot guest to boot stage, hotunplug the " "vCPU device.", logging.info) vm.wait_for_login().sendline(params["reboot_command"]) error_context.context("Verify guest is in boot stage after reboot.", logging.info) vm.serial_console.read_until_any_line_matches(reboot_patterns) vm.serial_console.read_until_any_line_matches(boot_patterns) error_context.context( "Hotunplug vCPU devices, waiting for guest " "alive.", logging.info) for vcpu_device in reversed(vcpu_devices): vm.hotunplug_vcpu_device(vcpu_device) vm.wait_for_login().close() error_context.context("Check number of CPU inside guest after unplug.", logging.info) current_guest_cpus = vm.get_cpu_count() if current_guest_cpus != smp: test.fail("Actual number of guest CPUs(%s) is not equal to " "expected(%s) after hotunplug." % (current_guest_cpus, smp)) logging.info("CPU quantity(%s) in guest is correct.", current_guest_cpus)
def run(test, params, env): """ Test hotplug vCPU device during boot stage. 1) Launch a guest without vCPU device. 2) Hotplug vCPU devices during boot stage and check. 3) Check if the number of CPUs changes after guest alive. 4) Reboot guest to hotunplug. (optional) 5) Hotunplug plugged vCPU devices during boot stage. (optional) 6) Recheck the number of CPUs after guest alive. (optional) :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ vcpu_devices = params.objects("vcpu_devices") unplug_during_boot = params.get_boolean("unplug_during_boot") boot_patterns = [ r".*Started udev Wait for Complete Device Initialization.*", r".*Finished .*Wait for udev To Complete Device Initialization.*" ] reboot_patterns = [ r".*[Rr]ebooting.*", r".*[Rr]estarting system.*", r".*[Mm]achine restart.*" ] vm = env.get_vm(params["main_vm"]) vm.verify_alive() error_context.base_context("Hotplug vCPU devices during boot stage.", logging.info) error_context.context("Verify guest is in the boot stage.", logging.info) vm.serial_console.read_until_any_line_matches(boot_patterns) error_context.context("Hotplug vCPU devices, waiting for guest alive.", logging.info) for vcpu_device in vcpu_devices: vm.hotplug_vcpu_device(vcpu_device) vm.wait_for_login().close() error_context.context("Check number of CPU inside guest.", logging.info) if not cpu_utils.check_if_vm_vcpus_match_qemu(vm): test.fail("Actual number of guest CPUs is not equal to expected") if unplug_during_boot: # 1) vm.reboot() will return a new session, which is not what we want. # 2) Send reboot command directly because it will close the ssh client # so we can not get the command status. error_context.base_context( "Reboot guest to boot stage, hotunplug the " "vCPU device.", logging.info) vm.wait_for_login().sendline(params["reboot_command"]) error_context.context("Verify guest is in boot stage after reboot.", logging.info) vm.serial_console.read_until_any_line_matches(reboot_patterns) vm.serial_console.read_until_any_line_matches(boot_patterns) error_context.context( "Hotunplug vCPU devices, waiting for guest " "alive.", logging.info) for vcpu_device in reversed(vcpu_devices): vm.hotunplug_vcpu_device(vcpu_device) vm.wait_for_login().close() error_context.context("Check number of CPU inside guest after unplug.", logging.info) if not cpu_utils.check_if_vm_vcpus_match_qemu(vm): test.fail("Actual number of guest CPUs is not equal to expected " "after hotunplug.")