def raw_ping(command, timeout, session, output_func): """ Low-level ping command execution. @param command: Ping command. @param timeout: Timeout of the ping command. @param session: Local executon hint or session to execute the ping command. """ if session is None: process = kvm_subprocess.run_bg(command, output_func=output_func, timeout=timeout) # Send SIGINT signal to notify the timeout of running ping process, # Because ping have the ability to catch the SIGINT signal so we can # always get the packet loss ratio even if timeout. if process.is_alive(): kvm_utils.kill_process_tree(process.get_pid(), signal.SIGINT) status = process.get_status() output = process.get_output() process.close() return status, output else: output = "" try: output = session.cmd_output(command, timeout=timeout, print_func=output_func) except kvm_subprocess.ShellTimeoutError: # Send ctrl+c (SIGINT) through ssh session session.send("\003") try: output2 = session.read_up_to_prompt(print_func=output_func) output += output2 except kvm_subprocess.ExpectTimeoutError, e: output += e.output # We also need to use this session to query the return value session.send("\003") session.sendline(session.status_test_command) try: o2 = session.read_up_to_prompt() except kvm_subprocess.ExpectError: status = -1 else: try: status = int(re.findall("\d+", o2)[0]) except: status = -1 return status, output
def run_timedrift(test, params, env): """ Time drift test (mainly for Windows guests): 1) Log into a guest. 2) Take a time reading from the guest and host. 3) Run load on the guest and host. 4) Take a second time reading. 5) Stop the load and rest for a while. 6) Take a third time reading. 7) If the drift immediately after load is higher than a user- specified value (in %), fail. If the drift after the rest period is higher than a user-specified value, fail. @param test: KVM test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ vm = kvm_utils.env_get_vm(env, params.get("main_vm")) if not vm: raise error.TestError("VM object not found in environment") if not vm.is_alive(): raise error.TestError("VM seems to be dead; Test requires a living VM") logging.info("Waiting for guest to be up...") session = kvm_utils.wait_for(vm.ssh_login, 240, 0, 2) if not session: raise error.TestFail("Could not log into guest") logging.info("Logged in") # Collect test parameters: # Command to run to get the current time time_command = params.get("time_command") # Filter which should match a string to be passed to time.strptime() time_filter_re = params.get("time_filter_re") # Time format for time.strptime() time_format = params.get("time_format") guest_load_command = params.get("guest_load_command") guest_load_stop_command = params.get("guest_load_stop_command") host_load_command = params.get("host_load_command") guest_load_instances = int(params.get("guest_load_instances", "1")) host_load_instances = int(params.get("host_load_instances", "0")) # CPU affinity mask for taskset cpu_mask = params.get("cpu_mask", "0xFF") load_duration = float(params.get("load_duration", "30")) rest_duration = float(params.get("rest_duration", "10")) drift_threshold = float(params.get("drift_threshold", "200")) drift_threshold_after_rest = float(params.get("drift_threshold_after_rest", "200")) guest_load_sessions = [] host_load_sessions = [] # Remember the VM's previous CPU affinity prev_cpu_mask = commands.getoutput("taskset -p %s" % vm.get_pid()) prev_cpu_mask = prev_cpu_mask.split()[-1] # Set the VM's CPU affinity commands.getoutput("taskset -p %s %s" % (cpu_mask, vm.get_pid())) try: # Get time before load host_time_0 = time.time() session.sendline(time_command) (match, s) = session.read_up_to_prompt() s = re.findall(time_filter_re, s)[0] guest_time_0 = time.mktime(time.strptime(s, time_format)) # Run some load on the guest logging.info("Starting load on guest...") for i in range(guest_load_instances): load_session = vm.ssh_login() if not load_session: raise error.TestFail("Could not log into guest") load_session.set_output_prefix("(guest load %d) " % i) load_session.set_output_func(logging.debug) load_session.sendline(guest_load_command) guest_load_sessions.append(load_session) # Run some load on the host logging.info("Starting load on host...") for i in range(host_load_instances): host_load_sessions.append( kvm_subprocess.run_bg( host_load_command, output_func=logging.debug, output_prefix="(host load %d) " % i, timeout=0.5 ) ) # Set the CPU affinity of the shell running the load process pid = host_load_sessions[-1].get_shell_pid() commands.getoutput("taskset -p %s %s" % (cpu_mask, pid)) # Try setting the CPU affinity of the load process itself pid = host_load_sessions[-1].get_pid() if pid: commands.getoutput("taskset -p %s %s" % (cpu_mask, pid)) # Sleep for a while (during load) logging.info("Sleeping for %s seconds..." % load_duration) time.sleep(load_duration) # Get time delta after load host_time_1 = time.time() session.sendline(time_command) (match, s) = session.read_up_to_prompt() s = re.findall(time_filter_re, s)[0] guest_time_1 = time.mktime(time.strptime(s, time_format)) # Report results host_delta = host_time_1 - host_time_0 guest_delta = guest_time_1 - guest_time_0 drift = 100.0 * (host_delta - guest_delta) / host_delta logging.info("Host duration: %.2f" % host_delta) logging.info("Guest duration: %.2f" % guest_delta) logging.info("Drift: %.2f%%" % drift) finally: logging.info("Cleaning up...") # Restore the VM's CPU affinity commands.getoutput("taskset -p %s %s" % (prev_cpu_mask, vm.get_pid())) # Stop the guest load if guest_load_stop_command: session.get_command_output(guest_load_stop_command) # Close all load shell sessions for load_session in guest_load_sessions: load_session.close() for load_session in host_load_sessions: load_session.close() # Sleep again (rest) logging.info("Sleeping for %s seconds..." % rest_duration) time.sleep(rest_duration) # Get time after rest host_time_2 = time.time() session.sendline(time_command) (match, s) = session.read_up_to_prompt() s = re.findall(time_filter_re, s)[0] guest_time_2 = time.mktime(time.strptime(s, time_format)) # Report results host_delta_total = host_time_2 - host_time_0 guest_delta_total = guest_time_2 - guest_time_0 drift_total = 100.0 * (host_delta_total - guest_delta_total) / host_delta logging.info("Total host duration including rest: %.2f" % host_delta_total) logging.info("Total guest duration including rest: %.2f" % guest_delta_total) logging.info("Total drift after rest: %.2f%%" % drift_total) # Fail the test if necessary if drift > drift_threshold: raise error.TestFail("Time drift too large: %.2f%%" % drift) if drift > drift_threshold_after_rest: raise error.TestFail("Time drift too large after rest period: %.2f%%" % drift_total) session.close()
def create(self, name=None, params=None, root_dir=None, for_migration=False, timeout=5.0): """ Start the VM by running a qemu command. All parameters are optional. The following applies to all parameters but for_migration: If a parameter is not supplied, the corresponding value stored in the class attributes is used, and if it is supplied, it is stored for later use. @param name: The name of the object @param params: A dict containing VM params @param root_dir: Base directory for relative filenames @param for_migration: If True, start the VM with the -incoming option """ self.destroy() if name != None: self.name = name if params != None: self.params = params if root_dir != None: self.root_dir = root_dir name = self.name params = self.params root_dir = self.root_dir # Verify the md5sum of the ISO image iso = params.get("cdrom") if iso: iso = kvm_utils.get_path(root_dir, iso) if not os.path.exists(iso): logging.error("ISO file not found: %s" % iso) return False compare = False if params.get("md5sum_1m"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "first MB of ISO file...") actual_md5sum = kvm_utils.md5sum_file(iso, 1048576) expected_md5sum = params.get("md5sum_1m") compare = True elif params.get("md5sum"): logging.debug("Comparing expected MD5 sum with MD5 sum of ISO " "file...") actual_md5sum = kvm_utils.md5sum_file(iso) expected_md5sum = params.get("md5sum") compare = True if compare: if actual_md5sum == expected_md5sum: logging.debug("MD5 sums match") else: logging.error("Actual MD5 sum differs from expected one") return False # Make sure the following code is not executed by more than one thread # at the same time lockfile = open("/tmp/kvm-autotest-vm-create.lock", "w+") fcntl.lockf(lockfile, fcntl.LOCK_EX) try: # Handle port redirections redir_names = kvm_utils.get_sub_dict_names(params, "redirs") host_ports = kvm_utils.find_free_ports(5000, 6000, len(redir_names)) self.redirs = {} for i in range(len(redir_names)): redir_params = kvm_utils.get_sub_dict(params, redir_names[i]) guest_port = int(redir_params.get("guest_port")) self.redirs[guest_port] = host_ports[i] # Find available VNC port, if needed if params.get("display") == "vnc": self.vnc_port = kvm_utils.find_free_port(5900, 6000) # Find random UUID if specified 'uuid = random' in config file if params.get("uuid") == "random": f = open("/proc/sys/kernel/random/uuid") self.uuid = f.read().strip() f.close() # Make qemu command qemu_command = self.make_qemu_command() # Is this VM supposed to accept incoming migrations? if for_migration: # Find available migration port self.migration_port = kvm_utils.find_free_port(5200, 6000) # Add -incoming option to the qemu command qemu_command += " -incoming tcp:0:%d" % self.migration_port logging.debug("Running qemu command:\n%s", qemu_command) self.process = kvm_subprocess.run_bg(qemu_command, None, logging.debug, "(qemu) ") if not self.process.is_alive(): logging.error("VM could not be created; " "qemu command failed:\n%s" % qemu_command) logging.error("Status: %s" % self.process.get_status()) logging.error("Output:" + kvm_utils.format_str_for_message(self.process.get_output())) self.destroy() return False if not kvm_utils.wait_for(self.is_alive, timeout, 0, 1): logging.error("VM is not alive for some reason; " "qemu command:\n%s" % qemu_command) self.destroy() return False # Get the output so far, to see if we have any problems with # hugepage setup. output = self.process.get_output() if "alloc_mem_area" in output: logging.error("Could not allocate hugepage memory; " "qemu command:\n%s" % qemu_command) logging.error("Output:" + kvm_utils.format_str_for_message(self.process.get_output())) return False logging.debug("VM appears to be alive with PID %d", self.process.get_pid()) return True finally: fcntl.lockf(lockfile, fcntl.LOCK_UN) lockfile.close()
def run_timedrift(test, params, env): """ Time drift test (mainly for Windows guests): 1) Log into a guest. 2) Take a time reading from the guest and host. 3) Run load on the guest and host. 4) Take a second time reading. 5) Stop the load and rest for a while. 6) Take a third time reading. 7) If the drift immediately after load is higher than a user- specified value (in %), fail. If the drift after the rest period is higher than a user-specified value, fail. @param test: KVM test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ # Helper functions def set_cpu_affinity(pid, mask): """ Set the CPU affinity of all threads of the process with PID pid. Do this recursively for all child processes as well. @param pid: The process ID. @param mask: The CPU affinity mask. @return: A dict containing the previous mask for each thread. """ tids = commands.getoutput("ps -L --pid=%s -o lwp=" % pid).split() prev_masks = {} for tid in tids: prev_mask = commands.getoutput("taskset -p %s" % tid).split()[-1] prev_masks[tid] = prev_mask commands.getoutput("taskset -p %s %s" % (mask, tid)) children = commands.getoutput("ps --ppid=%s -o pid=" % pid).split() for child in children: prev_masks.update(set_cpu_affinity(child, mask)) return prev_masks def restore_cpu_affinity(prev_masks): """ Restore the CPU affinity of several threads. @param prev_masks: A dict containing TIDs as keys and masks as values. """ for tid, mask in prev_masks.items(): commands.getoutput("taskset -p %s %s" % (mask, tid)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) # Collect test parameters: # Command to run to get the current time time_command = params.get("time_command") # Filter which should match a string to be passed to time.strptime() time_filter_re = params.get("time_filter_re") # Time format for time.strptime() time_format = params.get("time_format") guest_load_command = params.get("guest_load_command") guest_load_stop_command = params.get("guest_load_stop_command") host_load_command = params.get("host_load_command") guest_load_instances = int(params.get("guest_load_instances", "1")) host_load_instances = int(params.get("host_load_instances", "0")) # CPU affinity mask for taskset cpu_mask = params.get("cpu_mask", "0xFF") load_duration = float(params.get("load_duration", "30")) rest_duration = float(params.get("rest_duration", "10")) drift_threshold = float(params.get("drift_threshold", "200")) drift_threshold_after_rest = float( params.get("drift_threshold_after_rest", "200")) guest_load_sessions = [] host_load_sessions = [] try: # Set the VM's CPU affinity prev_affinity = set_cpu_affinity(vm.get_shell_pid(), cpu_mask) try: # Open shell sessions with the guest logging.info("Starting load on guest...") for i in range(guest_load_instances): load_session = vm.login() # Set output func to None to stop it from being called so we # can change the callback function and the parameters it takes # with no problems load_session.set_output_func(None) load_session.set_output_params(()) load_session.set_output_prefix("(guest load %d) " % i) load_session.set_output_func(logging.debug) guest_load_sessions.append(load_session) # Get time before load # (ht stands for host time, gt stands for guest time) (ht0, gt0) = kvm_test_utils.get_time(session, time_command, time_filter_re, time_format) # Run some load on the guest for load_session in guest_load_sessions: load_session.sendline(guest_load_command) # Run some load on the host logging.info("Starting load on host...") for i in range(host_load_instances): host_load_sessions.append( kvm_subprocess.run_bg(host_load_command, output_func=logging.debug, output_prefix="(host load %d) " % i, timeout=0.5)) # Set the CPU affinity of the load process pid = host_load_sessions[-1].get_pid() set_cpu_affinity(pid, cpu_mask) # Sleep for a while (during load) logging.info("Sleeping for %s seconds...", load_duration) time.sleep(load_duration) # Get time delta after load (ht1, gt1) = kvm_test_utils.get_time(session, time_command, time_filter_re, time_format) # Report results host_delta = ht1 - ht0 guest_delta = gt1 - gt0 drift = 100.0 * (host_delta - guest_delta) / host_delta logging.info("Host duration: %.2f", host_delta) logging.info("Guest duration: %.2f", guest_delta) logging.info("Drift: %.2f%%", drift) finally: logging.info("Cleaning up...") # Restore the VM's CPU affinity restore_cpu_affinity(prev_affinity) # Stop the guest load if guest_load_stop_command: session.cmd_output(guest_load_stop_command) # Close all load shell sessions for load_session in guest_load_sessions: load_session.close() for load_session in host_load_sessions: load_session.close() # Sleep again (rest) logging.info("Sleeping for %s seconds...", rest_duration) time.sleep(rest_duration) # Get time after rest (ht2, gt2) = kvm_test_utils.get_time(session, time_command, time_filter_re, time_format) finally: session.close() # Report results host_delta_total = ht2 - ht0 guest_delta_total = gt2 - gt0 drift_total = 100.0 * (host_delta_total - guest_delta_total) / host_delta logging.info("Total host duration including rest: %.2f", host_delta_total) logging.info("Total guest duration including rest: %.2f", guest_delta_total) logging.info("Total drift after rest: %.2f%%", drift_total) # Fail the test if necessary if abs(drift) > drift_threshold: raise error.TestFail("Time drift too large: %.2f%%" % drift) if abs(drift_total) > drift_threshold_after_rest: raise error.TestFail("Time drift too large after rest period: %.2f%%" % drift_total)
def run_timedrift(test, params, env): """ Time drift test (mainly for Windows guests): 1) Log into a guest. 2) Take a time reading from the guest and host. 3) Run load on the guest and host. 4) Take a second time reading. 5) Stop the load and rest for a while. 6) Take a third time reading. 7) If the drift immediately after load is higher than a user- specified value (in %), fail. If the drift after the rest period is higher than a user-specified value, fail. @param test: KVM test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ # Helper functions def set_cpu_affinity(pid, mask): """ Set the CPU affinity of all threads of the process with PID pid. Do this recursively for all child processes as well. @param pid: The process ID. @param mask: The CPU affinity mask. @return: A dict containing the previous mask for each thread. """ tids = commands.getoutput("ps -L --pid=%s -o lwp=" % pid).split() prev_masks = {} for tid in tids: prev_mask = commands.getoutput("taskset -p %s" % tid).split()[-1] prev_masks[tid] = prev_mask commands.getoutput("taskset -p %s %s" % (mask, tid)) children = commands.getoutput("ps --ppid=%s -o pid=" % pid).split() for child in children: prev_masks.update(set_cpu_affinity(child, mask)) return prev_masks def restore_cpu_affinity(prev_masks): """ Restore the CPU affinity of several threads. @param prev_masks: A dict containing TIDs as keys and masks as values. """ for tid, mask in prev_masks.items(): commands.getoutput("taskset -p %s %s" % (mask, tid)) def get_time(session, time_command, time_filter_re, time_format): """ Returns the host time and guest time. @param session: A shell session. @param time_command: Command to issue to get the current guest time. @param time_filter_re: Regex filter to apply on the output of time_command in order to get the current time. @param time_format: Format string to pass to time.strptime() with the result of the regex filter. @return: A tuple containing the host time and guest time. """ host_time = time.time() session.sendline(time_command) (match, s) = session.read_up_to_prompt() s = re.findall(time_filter_re, s)[0] guest_time = time.mktime(time.strptime(s, time_format)) return (host_time, guest_time) vm = kvm_test_utils.get_living_vm(env, params.get("main_vm")) session = kvm_test_utils.wait_for_login(vm) # Collect test parameters: # Command to run to get the current time time_command = params.get("time_command") # Filter which should match a string to be passed to time.strptime() time_filter_re = params.get("time_filter_re") # Time format for time.strptime() time_format = params.get("time_format") guest_load_command = params.get("guest_load_command") guest_load_stop_command = params.get("guest_load_stop_command") host_load_command = params.get("host_load_command") guest_load_instances = int(params.get("guest_load_instances", "1")) host_load_instances = int(params.get("host_load_instances", "0")) # CPU affinity mask for taskset cpu_mask = params.get("cpu_mask", "0xFF") load_duration = float(params.get("load_duration", "30")) rest_duration = float(params.get("rest_duration", "10")) drift_threshold = float(params.get("drift_threshold", "200")) drift_threshold_after_rest = float(params.get("drift_threshold_after_rest", "200")) guest_load_sessions = [] host_load_sessions = [] # Set the VM's CPU affinity prev_affinity = set_cpu_affinity(vm.get_pid(), cpu_mask) try: # Get time before load (host_time_0, guest_time_0) = get_time(session, time_command, time_filter_re, time_format) # Run some load on the guest logging.info("Starting load on guest...") for i in range(guest_load_instances): load_session = vm.remote_login() if not load_session: raise error.TestFail("Could not log into guest") load_session.set_output_prefix("(guest load %d) " % i) load_session.set_output_func(logging.debug) load_session.sendline(guest_load_command) guest_load_sessions.append(load_session) # Run some load on the host logging.info("Starting load on host...") for i in range(host_load_instances): host_load_sessions.append( kvm_subprocess.run_bg(host_load_command, output_func=logging.debug, output_prefix="(host load %d) " % i, timeout=0.5)) # Set the CPU affinity of the load process pid = host_load_sessions[-1].get_pid() set_cpu_affinity(pid, cpu_mask) # Sleep for a while (during load) logging.info("Sleeping for %s seconds..." % load_duration) time.sleep(load_duration) # Get time delta after load (host_time_1, guest_time_1) = get_time(session, time_command, time_filter_re, time_format) # Report results host_delta = host_time_1 - host_time_0 guest_delta = guest_time_1 - guest_time_0 drift = 100.0 * (host_delta - guest_delta) / host_delta logging.info("Host duration: %.2f" % host_delta) logging.info("Guest duration: %.2f" % guest_delta) logging.info("Drift: %.2f%%" % drift) finally: logging.info("Cleaning up...") # Restore the VM's CPU affinity restore_cpu_affinity(prev_affinity) # Stop the guest load if guest_load_stop_command: session.get_command_output(guest_load_stop_command) # Close all load shell sessions for load_session in guest_load_sessions: load_session.close() for load_session in host_load_sessions: load_session.close() # Sleep again (rest) logging.info("Sleeping for %s seconds..." % rest_duration) time.sleep(rest_duration) # Get time after rest (host_time_2, guest_time_2) = get_time(session, time_command, time_filter_re, time_format) # Report results host_delta_total = host_time_2 - host_time_0 guest_delta_total = guest_time_2 - guest_time_0 drift_total = 100.0 * (host_delta_total - guest_delta_total) / host_delta logging.info("Total host duration including rest: %.2f" % host_delta_total) logging.info("Total guest duration including rest: %.2f" % guest_delta_total) logging.info("Total drift after rest: %.2f%%" % drift_total) session.close() # Fail the test if necessary if drift > drift_threshold: raise error.TestFail("Time drift too large: %.2f%%" % drift) if drift_total > drift_threshold_after_rest: raise error.TestFail("Time drift too large after rest period: %.2f%%" % drift_total)
def run_timedrift(test, params, env): """ Time drift test (mainly for Windows guests): 1) Log into a guest. 2) Take a time reading from the guest and host. 3) Run load on the guest and host. 4) Take a second time reading. 5) Stop the load and rest for a while. 6) Take a third time reading. 7) If the drift immediately after load is higher than a user- specified value (in %), fail. If the drift after the rest period is higher than a user-specified value, fail. @param test: KVM test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ # Helper functions def set_cpu_affinity(pid, mask): """ Set the CPU affinity of all threads of the process with PID pid. Do this recursively for all child processes as well. @param pid: The process ID. @param mask: The CPU affinity mask. @return: A dict containing the previous mask for each thread. """ tids = commands.getoutput("ps -L --pid=%s -o lwp=" % pid).split() prev_masks = {} for tid in tids: prev_mask = commands.getoutput("taskset -p %s" % tid).split()[-1] prev_masks[tid] = prev_mask commands.getoutput("taskset -p %s %s" % (mask, tid)) children = commands.getoutput("ps --ppid=%s -o pid=" % pid).split() for child in children: prev_masks.update(set_cpu_affinity(child, mask)) return prev_masks def restore_cpu_affinity(prev_masks): """ Restore the CPU affinity of several threads. @param prev_masks: A dict containing TIDs as keys and masks as values. """ for tid, mask in prev_masks.items(): commands.getoutput("taskset -p %s %s" % (mask, tid)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) # Collect test parameters: # Command to run to get the current time time_command = params.get("time_command") # Filter which should match a string to be passed to time.strptime() time_filter_re = params.get("time_filter_re") # Time format for time.strptime() time_format = params.get("time_format") guest_load_command = params.get("guest_load_command") guest_load_stop_command = params.get("guest_load_stop_command") host_load_command = params.get("host_load_command") guest_load_instances = int(params.get("guest_load_instances", "1")) host_load_instances = int(params.get("host_load_instances", "0")) # CPU affinity mask for taskset cpu_mask = params.get("cpu_mask", "0xFF") load_duration = float(params.get("load_duration", "30")) rest_duration = float(params.get("rest_duration", "10")) drift_threshold = float(params.get("drift_threshold", "200")) drift_threshold_after_rest = float(params.get("drift_threshold_after_rest", "200")) guest_load_sessions = [] host_load_sessions = [] try: # Set the VM's CPU affinity prev_affinity = set_cpu_affinity(vm.get_shell_pid(), cpu_mask) try: # Open shell sessions with the guest logging.info("Starting load on guest...") for i in range(guest_load_instances): load_session = vm.login() # Set output func to None to stop it from being called so we # can change the callback function and the parameters it takes # with no problems load_session.set_output_func(None) load_session.set_output_params(()) load_session.set_output_prefix("(guest load %d) " % i) load_session.set_output_func(logging.debug) guest_load_sessions.append(load_session) # Get time before load # (ht stands for host time, gt stands for guest time) (ht0, gt0) = kvm_test_utils.get_time(session, time_command, time_filter_re, time_format) # Run some load on the guest for load_session in guest_load_sessions: load_session.sendline(guest_load_command) # Run some load on the host logging.info("Starting load on host...") for i in range(host_load_instances): host_load_sessions.append( kvm_subprocess.run_bg(host_load_command, output_func=logging.debug, output_prefix="(host load %d) " % i, timeout=0.5)) # Set the CPU affinity of the load process pid = host_load_sessions[-1].get_pid() set_cpu_affinity(pid, cpu_mask) # Sleep for a while (during load) logging.info("Sleeping for %s seconds...", load_duration) time.sleep(load_duration) # Get time delta after load (ht1, gt1) = kvm_test_utils.get_time(session, time_command, time_filter_re, time_format) # Report results host_delta = ht1 - ht0 guest_delta = gt1 - gt0 drift = 100.0 * (host_delta - guest_delta) / host_delta logging.info("Host duration: %.2f", host_delta) logging.info("Guest duration: %.2f", guest_delta) logging.info("Drift: %.2f%%", drift) finally: logging.info("Cleaning up...") # Restore the VM's CPU affinity restore_cpu_affinity(prev_affinity) # Stop the guest load if guest_load_stop_command: session.cmd_output(guest_load_stop_command) # Close all load shell sessions for load_session in guest_load_sessions: load_session.close() for load_session in host_load_sessions: load_session.close() # Sleep again (rest) logging.info("Sleeping for %s seconds...", rest_duration) time.sleep(rest_duration) # Get time after rest (ht2, gt2) = kvm_test_utils.get_time(session, time_command, time_filter_re, time_format) finally: session.close() # Report results host_delta_total = ht2 - ht0 guest_delta_total = gt2 - gt0 drift_total = 100.0 * (host_delta_total - guest_delta_total) / host_delta logging.info("Total host duration including rest: %.2f", host_delta_total) logging.info("Total guest duration including rest: %.2f", guest_delta_total) logging.info("Total drift after rest: %.2f%%", drift_total) # Fail the test if necessary if abs(drift) > drift_threshold: raise error.TestFail("Time drift too large: %.2f%%" % drift) if abs(drift_total) > drift_threshold_after_rest: raise error.TestFail("Time drift too large after rest period: %.2f%%" % drift_total)
def create(self, name=None, params=None, root_dir=None, timeout=5.0, migration_mode=None, mac_source=None): """ Start the VM by running a qemu command. All parameters are optional. If name, params or root_dir are not supplied, the respective values stored as class attributes are used. @param name: The name of the object @param params: A dict containing VM params @param root_dir: Base directory for relative filenames @param migration_mode: If supplied, start VM for incoming migration using this protocol (either 'tcp', 'unix' or 'exec') @param migration_exec_cmd: Command to embed in '-incoming "exec: ..."' (e.g. 'gzip -c -d filename') if migration_mode is 'exec' @param mac_source: A VM object from which to copy MAC addresses. If not specified, new addresses will be generated. """ self.destroy() if name is not None: self.name = name if params is not None: self.params = params if root_dir is not None: self.root_dir = root_dir name = self.name params = self.params root_dir = self.root_dir # Verify the md5sum of the ISO image iso = params.get("cdrom") if iso: iso = kvm_utils.get_path(root_dir, iso) if not os.path.exists(iso): logging.error("ISO file not found: %s" % iso) return False compare = False if params.get("md5sum_1m"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "first MB of ISO file...") actual_hash = utils.hash_file(iso, 1048576, method="md5") expected_hash = params.get("md5sum_1m") compare = True elif params.get("md5sum"): logging.debug("Comparing expected MD5 sum with MD5 sum of ISO " "file...") actual_hash = utils.hash_file(iso, method="md5") expected_hash = params.get("md5sum") compare = True elif params.get("sha1sum"): logging.debug("Comparing expected SHA1 sum with SHA1 sum of " "ISO file...") actual_hash = utils.hash_file(iso, method="sha1") expected_hash = params.get("sha1sum") compare = True if compare: if actual_hash == expected_hash: logging.debug("Hashes match") else: logging.error("Actual hash differs from expected one") return False # Make sure the following code is not executed by more than one thread # at the same time lockfile = open("/tmp/kvm-autotest-vm-create.lock", "w+") fcntl.lockf(lockfile, fcntl.LOCK_EX) try: # Handle port redirections redir_names = kvm_utils.get_sub_dict_names(params, "redirs") host_ports = kvm_utils.find_free_ports(5000, 6000, len(redir_names)) self.redirs = {} for i in range(len(redir_names)): redir_params = kvm_utils.get_sub_dict(params, redir_names[i]) guest_port = int(redir_params.get("guest_port")) self.redirs[guest_port] = host_ports[i] # Generate netdev IDs for all NICs self.netdev_id = [] for nic in kvm_utils.get_sub_dict_names(params, "nics"): self.netdev_id.append(kvm_utils.generate_random_id()) # Find available VNC port, if needed if params.get("display") == "vnc": self.vnc_port = kvm_utils.find_free_port(5900, 6100) # Find random UUID if specified 'uuid = random' in config file if params.get("uuid") == "random": f = open("/proc/sys/kernel/random/uuid") self.uuid = f.read().strip() f.close() # Generate or copy MAC addresses for all NICs num_nics = len(kvm_utils.get_sub_dict_names(params, "nics")) for vlan in range(num_nics): nic_name = kvm_utils.get_sub_dict_names(params, "nics")[vlan] nic_params = kvm_utils.get_sub_dict(params, nic_name) if nic_params.get("nic_mac", None): mac = nic_params.get("nic_mac") kvm_utils.set_mac_address(self.instance, vlan, mac) else: mac = mac_source and mac_source.get_mac_address(vlan) if mac: kvm_utils.set_mac_address(self.instance, vlan, mac) else: kvm_utils.generate_mac_address(self.instance, vlan) # Assign a PCI assignable device self.pci_assignable = None pa_type = params.get("pci_assignable") if pa_type in ["vf", "pf", "mixed"]: pa_devices_requested = params.get("devices_requested") # Virtual Functions (VF) assignable devices if pa_type == "vf": self.pci_assignable = kvm_utils.PciAssignable( type=pa_type, driver=params.get("driver"), driver_option=params.get("driver_option"), devices_requested=pa_devices_requested) # Physical NIC (PF) assignable devices elif pa_type == "pf": self.pci_assignable = kvm_utils.PciAssignable( type=pa_type, names=params.get("device_names"), devices_requested=pa_devices_requested) # Working with both VF and PF elif pa_type == "mixed": self.pci_assignable = kvm_utils.PciAssignable( type=pa_type, driver=params.get("driver"), driver_option=params.get("driver_option"), names=params.get("device_names"), devices_requested=pa_devices_requested) self.pa_pci_ids = self.pci_assignable.request_devs() if self.pa_pci_ids: logging.debug("Successfuly assigned devices: %s", self.pa_pci_ids) else: logging.error("No PCI assignable devices were assigned " "and 'pci_assignable' is defined to %s " "on your config file. Aborting VM creation.", pa_type) return False elif pa_type and pa_type != "no": logging.warn("Unsupported pci_assignable type: %s", pa_type) # Make qemu command qemu_command = self.make_qemu_command() # Add migration parameters if required if migration_mode == "tcp": self.migration_port = kvm_utils.find_free_port(5200, 6000) qemu_command += " -incoming tcp:0:%d" % self.migration_port elif migration_mode == "unix": self.migration_file = "/tmp/migration-unix-%s" % self.instance qemu_command += " -incoming unix:%s" % self.migration_file elif migration_mode == "exec": self.migration_port = kvm_utils.find_free_port(5200, 6000) qemu_command += (' -incoming "exec:nc -l %s"' % self.migration_port) logging.debug("Running qemu command:\n%s", qemu_command) self.process = kvm_subprocess.run_bg(qemu_command, None, logging.debug, "(qemu) ") # Make sure the process was started successfully if not self.process.is_alive(): logging.error("VM could not be created; " "qemu command failed:\n%s" % qemu_command) logging.error("Status: %s" % self.process.get_status()) logging.error("Output:" + kvm_utils.format_str_for_message( self.process.get_output())) self.destroy() return False # Establish monitor connections self.monitors = [] for monitor_name in kvm_utils.get_sub_dict_names(params, "monitors"): monitor_params = kvm_utils.get_sub_dict(params, monitor_name) # Wait for monitor connection to succeed end_time = time.time() + timeout while time.time() < end_time: try: if monitor_params.get("monitor_type") == "qmp": # Add a QMP monitor monitor = kvm_monitor.QMPMonitor( monitor_name, self.get_monitor_filename(monitor_name)) else: # Add a "human" monitor monitor = kvm_monitor.HumanMonitor( monitor_name, self.get_monitor_filename(monitor_name)) except kvm_monitor.MonitorError, e: logging.warn(e) else: if monitor.is_responsive(): break time.sleep(1) else: logging.error("Could not connect to monitor '%s'" % monitor_name) self.destroy() return False # Add this monitor to the list self.monitors += [monitor] # Get the output so far, to see if we have any problems with # KVM modules or with hugepage setup. output = self.process.get_output() if re.search("Could not initialize KVM", output, re.IGNORECASE): logging.error("Could not initialize KVM; " "qemu command:\n%s" % qemu_command) logging.error("Output:" + kvm_utils.format_str_for_message( self.process.get_output())) self.destroy() return False if "alloc_mem_area" in output: logging.error("Could not allocate hugepage memory; " "qemu command:\n%s" % qemu_command) logging.error("Output:" + kvm_utils.format_str_for_message( self.process.get_output())) self.destroy() return False logging.debug("VM appears to be alive with PID %s", self.get_pid()) # Establish a session with the serial console -- requires a version # of netcat that supports -U self.serial_console = kvm_subprocess.ShellSession( "nc -U %s" % self.get_serial_console_filename(), auto_close=False, output_func=kvm_utils.log_line, output_params=("serial-%s.log" % name,)) return True