def read_until_output_matches(self, patterns, filter=lambda x: x, timeout=30.0, internal_timeout=None, print_func=None): """ Read using read_nonblocking until a match is found using match_patterns, or until timeout expires. Before attempting to search for a match, the data is filtered using the filter function provided. @brief: Read from child using read_nonblocking until a pattern matches. @param patterns: List of strings (regular expression patterns) @param filter: Function to apply to the data read from the child before attempting to match it against the patterns (should take and return a string) @param timeout: The duration (in seconds) to wait until a match is found @param internal_timeout: The timeout to pass to read_nonblocking @param print_func: A function to be used to print the data being read (should take a string parameter) @return: Tuple containing the match index (or None if no match was found) and the data read so far. """ match = None data = "" end_time = time.time() + timeout while time.time() < end_time: # Read data from child newdata = self.read_nonblocking(internal_timeout) # Print it if necessary if print_func and newdata: str = newdata if str.endswith("\n"): str = str[:-1] for line in str.split("\n"): print_func(line.decode("utf-8", "replace")) data += newdata done = False # Look for patterns match = self.match_patterns(filter(data), patterns) if match is not None: done = True # Check if child has died if not self.is_alive(): logging.debug("Process terminated with status %s" % self.get_status()) done = True # Are we done? if done: break # Print some debugging info if match is None and (self.is_alive() or self.get_status() != 0): logging.debug("Timeout elapsed or process terminated. Output:" + kvm_utils.format_str_for_message(data.strip())) return (match, data)
def create_image(params, qemu_img_path, image_dir): """ Create an image using qemu_image. @param params: Dictionary containing the test parameters. @param qemu_img_path: The path of the qemu-img binary @param image_dir: The directory where the image is to be located @note: params should contain: image_name -- the name of the image file, without extension image_format -- the format of the image (qcow2, raw etc) image_size -- the requested size of the image (a string qemu-img can understand, such as '10G') """ qemu_img_cmd = qemu_img_path qemu_img_cmd += " create" format = params.get("image_format", "qcow2") qemu_img_cmd += " -f %s" % format image_filename = get_image_filename(params, image_dir) qemu_img_cmd += " %s" % image_filename size = params.get("image_size", "10G") qemu_img_cmd += " %s" % size logging.debug("Running qemu-img command:\n%s" % qemu_img_cmd) (status, output) = kvm_subprocess.run_fg(qemu_img_cmd, logging.debug, "(qemu-img) ", timeout=30) if status is None: logging.error("Timeout elapsed while waiting for qemu-img command " "to complete:\n%s" % qemu_img_cmd) return None elif status != 0: logging.error("Could not create image; " "qemu-img command failed:\n%s" % qemu_img_cmd) logging.error("Status: %s" % status) logging.error("Output:" + kvm_utils.format_str_for_message(output)) return None if not os.path.exists(image_filename): logging.error("Image could not be created for some reason; " "qemu-img command:\n%s" % qemu_img_cmd) return None logging.info("Image created in %s" % image_filename) return image_filename
def run_migration(test, params, env): """ KVM migration test: 1) Get two live VMs. One will be the 'source', the other will be the 'destination'. 2) Verify if the source VM supports migration. If it does, proceed with the test 3) Send a migration command to the source vm and wait until it's finished. 4) Kill off the source vm 3) Log into the destination vm after the migration is finished. 4) Compare the output of a reference command executed on the source with the output of the same command on the destination machine @param test: kvm test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ src_vm_name = params.get("migration_src") vm = kvm_utils.env_get_vm(env, src_vm_name) if not vm: raise error.TestError("VM '%s' not found in environment" % src_vm_name) if not vm.is_alive(): raise error.TestError("VM '%s' seems to be dead; Test requires a" " living VM" % src_vm_name) dest_vm_name = params.get("migration_dst") dest_vm = kvm_utils.env_get_vm(env, dest_vm_name) if not dest_vm: raise error.TestError("VM '%s' not found in environment" % dest_vm_name) if not dest_vm.is_alive(): raise error.TestError("VM '%s' seems to be dead; Test requires a" " living VM" % dest_vm_name) pre_scrdump_filename = os.path.join(test.debugdir, "migration_pre.ppm") post_scrdump_filename = os.path.join(test.debugdir, "migration_post.ppm") # See if migration is supported s, o = vm.send_monitor_cmd("help info") if not "info migrate" in o: raise error.TestError("Migration is not supported") # Log into guest and get the output of migration_test_command logging.info("Waiting for guest to be up...") session = kvm_utils.wait_for(vm.ssh_login, 240, 0, 2) if not session: raise error.TestFail("Could not log into guest") logging.info("Logged in") reference_output = session.get_command_output(params.get("migration_test_" "command")) session.close() # Define the migration command cmd = "migrate -d tcp:localhost:%d" % dest_vm.migration_port logging.debug("Migration command: %s" % cmd) # Migrate s, o = vm.send_monitor_cmd(cmd) if s: logging.error("Migration command failed (command: %r, output: %r)" % (cmd, o)) raise error.TestFail("Migration command failed") # Define some helper functions def mig_finished(): s, o = vm.send_monitor_cmd("info migrate") if s: return False if "Migration status: active" in o: return False return True def mig_succeeded(): s, o = vm.send_monitor_cmd("info migrate") if s == 0 and "Migration status: completed" in o: return True return False def mig_failed(): s, o = vm.send_monitor_cmd("info migrate") if s == 0 and "Migration status: failed" in o: return True return False # Wait for migration to finish if not kvm_utils.wait_for(mig_finished, 90, 2, 2, "Waiting for migration to finish..."): raise error.TestFail("Timeout elapsed while waiting for migration to" "finish") # Report migration status if mig_succeeded(): logging.info("Migration finished successfully") else: if mig_failed(): message = "Migration failed" else: message = "Migration ended with unknown status" raise error.TestFail(message) # Get 'post' screendump dest_vm.send_monitor_cmd("screendump %s" % post_scrdump_filename) # Get 'pre' screendump vm.send_monitor_cmd("screendump %s" % pre_scrdump_filename) # Kill the source VM vm.send_monitor_cmd("quit", block=False) # Hack: it seems that the first attempt to communicate with the SSH port # following migration always fails (or succeeds after a very long time). # So just connect to the port once so the following call to ssh_login # succeeds. dest_vm.is_sshd_running(timeout=0.0) # Log into guest and get the output of migration_test_command logging.info("Logging into guest after migration...") session = dest_vm.ssh_login() if not session: raise error.TestFail("Could not log into guest after migration") logging.info("Logged in after migration") output = session.get_command_output(params.get("migration_test_command")) session.close() # Compare output to reference output if output != reference_output: logging.info("Command output before migration differs from command" " output after migration") logging.info("Command: %s" % params.get("migration_test_command")) logging.info("Output before:" + kvm_utils.format_str_for_message(reference_output)) logging.info("Output after:" + kvm_utils.format_str_for_message(output)) raise error.TestFail("Command produced different output before and" " after migration")
def send_monitor_cmd(self, command, block=True, timeout=20.0): """ Send command to the QEMU monitor. Connect to the VM's monitor socket and wait for the (qemu) prompt. If block is True, read output from the socket until the (qemu) prompt is found again, or until timeout expires. Return a tuple containing an integer indicating success or failure, and the data read so far. The integer is 0 on success and 1 on failure. A failure is any of the following cases: connection to the socket failed, or the first (qemu) prompt could not be found, or block is True and the second prompt could not be found. @param command: Command that will be sent to the monitor @param block: Whether the output from the socket will be read until the timeout expires @param timeout: Timeout (seconds) before giving up on reading from socket """ def read_up_to_qemu_prompt(s, timeout): """ Read data from socket s until the (qemu) prompt is found. If the prompt is found before timeout expires, return a tuple containing True and the data read. Otherwise return a tuple containing False and the data read so far. @param s: Socket object @param timeout: Time (seconds) before giving up trying to get the qemu prompt. """ o = "" end_time = time.time() + timeout while time.time() < end_time: try: o += s.recv(16384) if o.splitlines()[-1].split()[-1] == "(qemu)": return (True, o) except: time.sleep(0.01) return (False, o) # Connect to monitor logging.debug("Sending monitor command: %s" % command) try: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.setblocking(False) s.connect(self.monitor_file_name) except: logging.debug("Could not connect to monitor socket") return (1, "") status, data = read_up_to_qemu_prompt(s, timeout) if not status: s.close() logging.debug("Could not find (qemu) prompt; output so far:" + kvm_utils.format_str_for_message(data)) return (1, "") # Send command s.sendall(command + "\n") # Receive command output data = "" if block: status, data = read_up_to_qemu_prompt(s, timeout) data = "\n".join(data.splitlines()[1:]) if not status: s.close() logging.debug( "Could not find (qemu) prompt after command;" " output so far: %s", kvm_utils.format_str_for_message(data), ) return (1, data) s.close() return (0, data)
def create(self, name=None, params=None, root_dir=None, for_migration=False, timeout=5.0): """ Start the VM by running a qemu command. All parameters are optional. The following applies to all parameters but for_migration: If a parameter is not supplied, the corresponding value stored in the class attributes is used, and if it is supplied, it is stored for later use. @param name: The name of the object @param params: A dict containing VM params @param root_dir: Base directory for relative filenames @param for_migration: If True, start the VM with the -incoming option """ self.destroy() if name != None: self.name = name if params != None: self.params = params if root_dir != None: self.root_dir = root_dir name = self.name params = self.params root_dir = self.root_dir # Verify the md5sum of the ISO image iso = params.get("cdrom") if iso: iso = kvm_utils.get_path(root_dir, iso) if not os.path.exists(iso): logging.error("ISO file not found: %s" % iso) return False compare = False if params.get("md5sum_1m"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "first MB of ISO file...") actual_md5sum = kvm_utils.md5sum_file(iso, 1048576) expected_md5sum = params.get("md5sum_1m") compare = True elif params.get("md5sum"): logging.debug("Comparing expected MD5 sum with MD5 sum of ISO " "file...") actual_md5sum = kvm_utils.md5sum_file(iso) expected_md5sum = params.get("md5sum") compare = True if compare: if actual_md5sum == expected_md5sum: logging.debug("MD5 sums match") else: logging.error("Actual MD5 sum differs from expected one") return False # Make sure the following code is not executed by more than one thread # at the same time lockfile = open("/tmp/kvm-autotest-vm-create.lock", "w+") fcntl.lockf(lockfile, fcntl.LOCK_EX) try: # Handle port redirections redir_names = kvm_utils.get_sub_dict_names(params, "redirs") host_ports = kvm_utils.find_free_ports(5000, 6000, len(redir_names)) self.redirs = {} for i in range(len(redir_names)): redir_params = kvm_utils.get_sub_dict(params, redir_names[i]) guest_port = int(redir_params.get("guest_port")) self.redirs[guest_port] = host_ports[i] # Find available VNC port, if needed if params.get("display") == "vnc": self.vnc_port = kvm_utils.find_free_port(5900, 6000) # Find random UUID if specified 'uuid = random' in config file if params.get("uuid") == "random": f = open("/proc/sys/kernel/random/uuid") self.uuid = f.read().strip() f.close() # Make qemu command qemu_command = self.make_qemu_command() # Is this VM supposed to accept incoming migrations? if for_migration: # Find available migration port self.migration_port = kvm_utils.find_free_port(5200, 6000) # Add -incoming option to the qemu command qemu_command += " -incoming tcp:0:%d" % self.migration_port logging.debug("Running qemu command:\n%s", qemu_command) self.process = kvm_subprocess.run_bg(qemu_command, None, logging.debug, "(qemu) ") if not self.process.is_alive(): logging.error("VM could not be created; " "qemu command failed:\n%s" % qemu_command) logging.error("Status: %s" % self.process.get_status()) logging.error("Output:" + kvm_utils.format_str_for_message(self.process.get_output())) self.destroy() return False if not kvm_utils.wait_for(self.is_alive, timeout, 0, 1): logging.error("VM is not alive for some reason; " "qemu command:\n%s" % qemu_command) self.destroy() return False # Get the output so far, to see if we have any problems with # hugepage setup. output = self.process.get_output() if "alloc_mem_area" in output: logging.error("Could not allocate hugepage memory; " "qemu command:\n%s" % qemu_command) logging.error("Output:" + kvm_utils.format_str_for_message(self.process.get_output())) return False logging.debug("VM appears to be alive with PID %d", self.process.get_pid()) return True finally: fcntl.lockf(lockfile, fcntl.LOCK_UN) lockfile.close()
def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). """ # Start tcpdump if it isn't already running if not env.has_key("address_cache"): env["address_cache"] = {} if env.has_key("tcpdump") and not env["tcpdump"].is_alive(): env["tcpdump"].close() del env["tcpdump"] if not env.has_key("tcpdump"): command = "/usr/sbin/tcpdump -npvi any 'dst port 68'" logging.debug("Starting tcpdump (%s)...", command) env["tcpdump"] = kvm_subprocess.kvm_tail( command=command, output_func=_update_address_cache, output_params=(env["address_cache"],)) if kvm_utils.wait_for(lambda: not env["tcpdump"].is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s" % env["tcpdump"].get_status()) logging.warn("Output:" + kvm_utils.format_str_for_message( env["tcpdump"].get_output())) # Destroy and remove VMs that are no longer needed in the environment requested_vms = kvm_utils.get_sub_dict_names(params, "vms") for key in env.keys(): vm = env[key] if not kvm_utils.is_vm(vm): continue if not vm.name in requested_vms: logging.debug("VM '%s' found in environment but not required for" " test; removing it..." % vm.name) vm.destroy() del env[key] # Execute any pre_commands if params.get("pre_command"): process_command(test, params, env, params.get("pre_command"), int(params.get("pre_command_timeout", "600")), params.get("pre_command_noncritical") == "yes") # Preprocess all VMs and images process(test, params, env, preprocess_image, preprocess_vm) # Get the KVM kernel module version and write it as a keyval logging.debug("Fetching KVM module version...") if os.path.exists("/dev/kvm"): kvm_version = os.uname()[2] try: file = open("/sys/module/kvm/version", "r") kvm_version = file.read().strip() file.close() except: pass else: kvm_version = "Unknown" logging.debug("KVM module not loaded") logging.debug("KVM version: %s" % kvm_version) test.write_test_keyval({"kvm_version": kvm_version}) # Get the KVM userspace version and write it as a keyval logging.debug("Fetching KVM userspace version...") qemu_path = kvm_utils.get_path(test.bindir, params.get("qemu_binary", "qemu")) version_line = commands.getoutput("%s -help | head -n 1" % qemu_path) exp = re.compile("[Vv]ersion .*?,") match = exp.search(version_line) if match: kvm_userspace_version = " ".join(match.group().split()[1:]).strip(",") else: kvm_userspace_version = "Unknown" logging.debug("Could not fetch KVM userspace version") logging.debug("KVM userspace version: %s" % kvm_userspace_version) test.write_test_keyval({"kvm_userspace_version": kvm_userspace_version})
def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). """ error.context("preprocessing") # Start tcpdump if it isn't already running if "address_cache" not in env: env["address_cache"] = {} if "tcpdump" in env and not env["tcpdump"].is_alive(): env["tcpdump"].close() del env["tcpdump"] if "tcpdump" not in env and params.get("run_tcpdump", "yes") == "yes": cmd = "%s -npvi any 'dst port 68'" % kvm_utils.find_command("tcpdump") logging.debug("Starting tcpdump (%s)...", cmd) env["tcpdump"] = kvm_subprocess.Tail( command=cmd, output_func=_update_address_cache, output_params=(env["address_cache"],)) if kvm_utils.wait_for(lambda: not env["tcpdump"].is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s" % env["tcpdump"].get_status()) logging.warn("Output:" + kvm_utils.format_str_for_message( env["tcpdump"].get_output())) # Destroy and remove VMs that are no longer needed in the environment requested_vms = params.objects("vms") for key in env.keys(): vm = env[key] if not kvm_utils.is_vm(vm): continue if not vm.name in requested_vms: logging.debug("VM '%s' found in environment but not required for " "test; removing it..." % vm.name) vm.destroy() del env[key] # Get the KVM kernel module version and write it as a keyval logging.debug("Fetching KVM module version...") if os.path.exists("/dev/kvm"): try: kvm_version = open("/sys/module/kvm/version").read().strip() except: kvm_version = os.uname()[2] else: kvm_version = "Unknown" logging.debug("KVM module not loaded") logging.debug("KVM version: %s" % kvm_version) test.write_test_keyval({"kvm_version": kvm_version}) # Get the KVM userspace version and write it as a keyval logging.debug("Fetching KVM userspace version...") qemu_path = kvm_utils.get_path(test.bindir, params.get("qemu_binary", "qemu")) version_line = commands.getoutput("%s -help | head -n 1" % qemu_path) matches = re.findall("[Vv]ersion .*?,", version_line) if matches: kvm_userspace_version = " ".join(matches[0].split()[1:]).strip(",") else: kvm_userspace_version = "Unknown" logging.debug("Could not fetch KVM userspace version") logging.debug("KVM userspace version: %s" % kvm_userspace_version) test.write_test_keyval({"kvm_userspace_version": kvm_userspace_version}) if params.get("setup_hugepages") == "yes": h = test_setup.HugePageConfig(params) h.setup() if params.get("type") == "unattended_install": u = test_setup.UnattendedInstallConfig(test, params) u.setup() if params.get("type") == "enospc": e = test_setup.EnospcConfig(test, params) e.setup() # Execute any pre_commands if params.get("pre_command"): process_command(test, params, env, params.get("pre_command"), int(params.get("pre_command_timeout", "600")), params.get("pre_command_noncritical") == "yes") # Preprocess all VMs and images process(test, params, env, preprocess_image, preprocess_vm) # Start the screendump thread if params.get("take_regular_screendumps") == "yes": logging.debug("Starting screendump thread") global _screendump_thread, _screendump_thread_termination_event _screendump_thread_termination_event = threading.Event() _screendump_thread = threading.Thread(target=_take_screendumps, args=(test, params, env)) _screendump_thread.start()
def run_migration(test, params, env): """ KVM migration test: 1) Get a live VM and clone it. 2) Verify that the source VM supports migration. If it does, proceed with the test. 3) Send a migration command to the source VM and wait until it's finished. 4) Kill off the source VM. 3) Log into the destination VM after the migration is finished. 4) Compare the output of a reference command executed on the source with the output of the same command on the destination machine. @param test: kvm test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ vm = kvm_test_utils.get_living_vm(env, params.get("main_vm")) # See if migration is supported s, o = vm.send_monitor_cmd("help info") if not "info migrate" in o: raise error.TestError("Migration is not supported") # Log into guest and get the output of migration_test_command session = kvm_test_utils.wait_for_login(vm) migration_test_command = params.get("migration_test_command") reference_output = session.get_command_output(migration_test_command) session.close() # Clone the main VM and ask it to wait for incoming migration dest_vm = vm.clone() dest_vm.create(for_migration=True) try: # Define the migration command cmd = "migrate -d tcp:localhost:%d" % dest_vm.migration_port logging.debug("Migration command: %s" % cmd) # Migrate s, o = vm.send_monitor_cmd(cmd) if s: logging.error("Migration command failed (command: %r, output: %r)" % (cmd, o)) raise error.TestFail("Migration command failed") # Define some helper functions def mig_finished(): s, o = vm.send_monitor_cmd("info migrate") return s == 0 and not "Migration status: active" in o def mig_succeeded(): s, o = vm.send_monitor_cmd("info migrate") return s == 0 and "Migration status: completed" in o def mig_failed(): s, o = vm.send_monitor_cmd("info migrate") return s == 0 and "Migration status: failed" in o # Wait for migration to finish if not kvm_utils.wait_for(mig_finished, 90, 2, 2, "Waiting for migration to finish..."): raise error.TestFail("Timeout elapsed while waiting for migration " "to finish") # Report migration status if mig_succeeded(): logging.info("Migration finished successfully") elif mig_failed(): raise error.TestFail("Migration failed") else: raise error.TestFail("Migration ended with unknown status") # Kill the source VM vm.destroy(gracefully=False) # Replace the source VM with the new cloned VM kvm_utils.env_register_vm(env, params.get("main_vm"), dest_vm) except: dest_vm.destroy(gracefully=False) raise # Log into guest and get the output of migration_test_command logging.info("Logging into guest after migration...") session = dest_vm.remote_login() if not session: raise error.TestFail("Could not log into guest after migration") logging.info("Logged in after migration") output = session.get_command_output(migration_test_command) session.close() # Compare output to reference output if output != reference_output: logging.info("Command output before migration differs from command " "output after migration") logging.info("Command: %s" % params.get("migration_test_command")) logging.info("Output before:" + kvm_utils.format_str_for_message(reference_output)) logging.info("Output after:" + kvm_utils.format_str_for_message(output)) raise error.TestFail("Command produced different output before and " "after migration")
def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). """ error.context("preprocessing") # Start tcpdump if it isn't already running if "address_cache" not in env: env["address_cache"] = {} if "tcpdump" in env and not env["tcpdump"].is_alive(): env["tcpdump"].close() del env["tcpdump"] if "tcpdump" not in env and params.get("run_tcpdump", "yes") == "yes": cmd = "%s -npvi any 'dst port 68'" % kvm_utils.find_command("tcpdump") logging.debug("Starting tcpdump (%s)...", cmd) env["tcpdump"] = kvm_subprocess.Tail( command=cmd, output_func=_update_address_cache, output_params=(env["address_cache"], )) if kvm_utils.wait_for(lambda: not env["tcpdump"].is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s" % env["tcpdump"].get_status()) logging.warn( "Output:" + kvm_utils.format_str_for_message(env["tcpdump"].get_output())) # Destroy and remove VMs that are no longer needed in the environment requested_vms = params.objects("vms") for key in env.keys(): vm = env[key] if not kvm_utils.is_vm(vm): continue if not vm.name in requested_vms: logging.debug("VM '%s' found in environment but not required for " "test; removing it..." % vm.name) vm.destroy() del env[key] # Get the KVM kernel module version and write it as a keyval logging.debug("Fetching KVM module version...") if os.path.exists("/dev/kvm"): try: kvm_version = open("/sys/module/kvm/version").read().strip() except: kvm_version = os.uname()[2] else: kvm_version = "Unknown" logging.debug("KVM module not loaded") logging.debug("KVM version: %s" % kvm_version) test.write_test_keyval({"kvm_version": kvm_version}) # Get the KVM userspace version and write it as a keyval logging.debug("Fetching KVM userspace version...") qemu_path = kvm_utils.get_path(test.bindir, params.get("qemu_binary", "qemu")) version_line = commands.getoutput("%s -help | head -n 1" % qemu_path) matches = re.findall("[Vv]ersion .*?,", version_line) if matches: kvm_userspace_version = " ".join(matches[0].split()[1:]).strip(",") else: kvm_userspace_version = "Unknown" logging.debug("Could not fetch KVM userspace version") logging.debug("KVM userspace version: %s" % kvm_userspace_version) test.write_test_keyval({"kvm_userspace_version": kvm_userspace_version}) if params.get("setup_hugepages") == "yes": h = test_setup.HugePageConfig(params) h.setup() if params.get("type") == "unattended_install": u = test_setup.UnattendedInstallConfig(test, params) u.setup() if params.get("type") == "enospc": e = test_setup.EnospcConfig(test, params) e.setup() # Execute any pre_commands if params.get("pre_command"): process_command(test, params, env, params.get("pre_command"), int(params.get("pre_command_timeout", "600")), params.get("pre_command_noncritical") == "yes") # Preprocess all VMs and images process(test, params, env, preprocess_image, preprocess_vm) # Start the screendump thread if params.get("take_regular_screendumps") == "yes": logging.debug("Starting screendump thread") global _screendump_thread, _screendump_thread_termination_event _screendump_thread_termination_event = threading.Event() _screendump_thread = threading.Thread(target=_take_screendumps, args=(test, params, env)) _screendump_thread.start()
def create(self, name=None, params=None, root_dir=None, timeout=5.0, migration_mode=None, mac_source=None): """ Start the VM by running a qemu command. All parameters are optional. If name, params or root_dir are not supplied, the respective values stored as class attributes are used. @param name: The name of the object @param params: A dict containing VM params @param root_dir: Base directory for relative filenames @param migration_mode: If supplied, start VM for incoming migration using this protocol (either 'tcp', 'unix' or 'exec') @param migration_exec_cmd: Command to embed in '-incoming "exec: ..."' (e.g. 'gzip -c -d filename') if migration_mode is 'exec' @param mac_source: A VM object from which to copy MAC addresses. If not specified, new addresses will be generated. """ self.destroy() if name is not None: self.name = name if params is not None: self.params = params if root_dir is not None: self.root_dir = root_dir name = self.name params = self.params root_dir = self.root_dir # Verify the md5sum of the ISO image iso = params.get("cdrom") if iso: iso = kvm_utils.get_path(root_dir, iso) if not os.path.exists(iso): logging.error("ISO file not found: %s" % iso) return False compare = False if params.get("md5sum_1m"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "first MB of ISO file...") actual_hash = utils.hash_file(iso, 1048576, method="md5") expected_hash = params.get("md5sum_1m") compare = True elif params.get("md5sum"): logging.debug("Comparing expected MD5 sum with MD5 sum of ISO " "file...") actual_hash = utils.hash_file(iso, method="md5") expected_hash = params.get("md5sum") compare = True elif params.get("sha1sum"): logging.debug("Comparing expected SHA1 sum with SHA1 sum of " "ISO file...") actual_hash = utils.hash_file(iso, method="sha1") expected_hash = params.get("sha1sum") compare = True if compare: if actual_hash == expected_hash: logging.debug("Hashes match") else: logging.error("Actual hash differs from expected one") return False # Make sure the following code is not executed by more than one thread # at the same time lockfile = open("/tmp/kvm-autotest-vm-create.lock", "w+") fcntl.lockf(lockfile, fcntl.LOCK_EX) try: # Handle port redirections redir_names = kvm_utils.get_sub_dict_names(params, "redirs") host_ports = kvm_utils.find_free_ports(5000, 6000, len(redir_names)) self.redirs = {} for i in range(len(redir_names)): redir_params = kvm_utils.get_sub_dict(params, redir_names[i]) guest_port = int(redir_params.get("guest_port")) self.redirs[guest_port] = host_ports[i] # Generate netdev IDs for all NICs self.netdev_id = [] for nic in kvm_utils.get_sub_dict_names(params, "nics"): self.netdev_id.append(kvm_utils.generate_random_id()) # Find available VNC port, if needed if params.get("display") == "vnc": self.vnc_port = kvm_utils.find_free_port(5900, 6100) # Find random UUID if specified 'uuid = random' in config file if params.get("uuid") == "random": f = open("/proc/sys/kernel/random/uuid") self.uuid = f.read().strip() f.close() # Generate or copy MAC addresses for all NICs num_nics = len(kvm_utils.get_sub_dict_names(params, "nics")) for vlan in range(num_nics): nic_name = kvm_utils.get_sub_dict_names(params, "nics")[vlan] nic_params = kvm_utils.get_sub_dict(params, nic_name) if nic_params.get("nic_mac", None): mac = nic_params.get("nic_mac") kvm_utils.set_mac_address(self.instance, vlan, mac) else: mac = mac_source and mac_source.get_mac_address(vlan) if mac: kvm_utils.set_mac_address(self.instance, vlan, mac) else: kvm_utils.generate_mac_address(self.instance, vlan) # Assign a PCI assignable device self.pci_assignable = None pa_type = params.get("pci_assignable") if pa_type in ["vf", "pf", "mixed"]: pa_devices_requested = params.get("devices_requested") # Virtual Functions (VF) assignable devices if pa_type == "vf": self.pci_assignable = kvm_utils.PciAssignable( type=pa_type, driver=params.get("driver"), driver_option=params.get("driver_option"), devices_requested=pa_devices_requested) # Physical NIC (PF) assignable devices elif pa_type == "pf": self.pci_assignable = kvm_utils.PciAssignable( type=pa_type, names=params.get("device_names"), devices_requested=pa_devices_requested) # Working with both VF and PF elif pa_type == "mixed": self.pci_assignable = kvm_utils.PciAssignable( type=pa_type, driver=params.get("driver"), driver_option=params.get("driver_option"), names=params.get("device_names"), devices_requested=pa_devices_requested) self.pa_pci_ids = self.pci_assignable.request_devs() if self.pa_pci_ids: logging.debug("Successfuly assigned devices: %s", self.pa_pci_ids) else: logging.error("No PCI assignable devices were assigned " "and 'pci_assignable' is defined to %s " "on your config file. Aborting VM creation.", pa_type) return False elif pa_type and pa_type != "no": logging.warn("Unsupported pci_assignable type: %s", pa_type) # Make qemu command qemu_command = self.make_qemu_command() # Add migration parameters if required if migration_mode == "tcp": self.migration_port = kvm_utils.find_free_port(5200, 6000) qemu_command += " -incoming tcp:0:%d" % self.migration_port elif migration_mode == "unix": self.migration_file = "/tmp/migration-unix-%s" % self.instance qemu_command += " -incoming unix:%s" % self.migration_file elif migration_mode == "exec": self.migration_port = kvm_utils.find_free_port(5200, 6000) qemu_command += (' -incoming "exec:nc -l %s"' % self.migration_port) logging.debug("Running qemu command:\n%s", qemu_command) self.process = kvm_subprocess.run_bg(qemu_command, None, logging.debug, "(qemu) ") # Make sure the process was started successfully if not self.process.is_alive(): logging.error("VM could not be created; " "qemu command failed:\n%s" % qemu_command) logging.error("Status: %s" % self.process.get_status()) logging.error("Output:" + kvm_utils.format_str_for_message( self.process.get_output())) self.destroy() return False # Establish monitor connections self.monitors = [] for monitor_name in kvm_utils.get_sub_dict_names(params, "monitors"): monitor_params = kvm_utils.get_sub_dict(params, monitor_name) # Wait for monitor connection to succeed end_time = time.time() + timeout while time.time() < end_time: try: if monitor_params.get("monitor_type") == "qmp": # Add a QMP monitor monitor = kvm_monitor.QMPMonitor( monitor_name, self.get_monitor_filename(monitor_name)) else: # Add a "human" monitor monitor = kvm_monitor.HumanMonitor( monitor_name, self.get_monitor_filename(monitor_name)) except kvm_monitor.MonitorError, e: logging.warn(e) else: if monitor.is_responsive(): break time.sleep(1) else: logging.error("Could not connect to monitor '%s'" % monitor_name) self.destroy() return False # Add this monitor to the list self.monitors += [monitor] # Get the output so far, to see if we have any problems with # KVM modules or with hugepage setup. output = self.process.get_output() if re.search("Could not initialize KVM", output, re.IGNORECASE): logging.error("Could not initialize KVM; " "qemu command:\n%s" % qemu_command) logging.error("Output:" + kvm_utils.format_str_for_message( self.process.get_output())) self.destroy() return False if "alloc_mem_area" in output: logging.error("Could not allocate hugepage memory; " "qemu command:\n%s" % qemu_command) logging.error("Output:" + kvm_utils.format_str_for_message( self.process.get_output())) self.destroy() return False logging.debug("VM appears to be alive with PID %s", self.get_pid()) # Establish a session with the serial console -- requires a version # of netcat that supports -U self.serial_console = kvm_subprocess.ShellSession( "nc -U %s" % self.get_serial_console_filename(), auto_close=False, output_func=kvm_utils.log_line, output_params=("serial-%s.log" % name,)) return True
def run_migration(test, params, env): """ KVM migration test: 1) Get a live VM and clone it. 2) Verify that the source VM supports migration. If it does, proceed with the test. 3) Send a migration command to the source VM and wait until it's finished. 4) Kill off the source VM. 3) Log into the destination VM after the migration is finished. 4) Compare the output of a reference command executed on the source with the output of the same command on the destination machine. @param test: kvm test object. @param params: Dictionary with test parameters. @param env: Dictionary with the test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 offline = params.get("offline", "no") == "yes" check = params.get("vmstate_check", "no") == "yes" # Get the output of migration_test_command test_command = params.get("migration_test_command") reference_output = session.cmd_output(test_command) # Start some process in the background (and leave the session open) background_command = params.get("migration_bg_command", "") session.sendline(background_command) time.sleep(5) # Start another session with the guest and make sure the background # process is running session2 = vm.wait_for_login(timeout=timeout) try: check_command = params.get("migration_bg_check_command", "") session2.cmd(check_command, timeout=30) session2.close() # Migrate the VM vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, offline, check) # Log into the guest again logging.info("Logging into guest after migration...") session2 = vm.wait_for_login(timeout=30) logging.info("Logged in after migration") # Make sure the background process is still running session2.cmd(check_command, timeout=30) # Get the output of migration_test_command output = session2.cmd_output(test_command) # Compare output to reference output if output != reference_output: logging.info("Command output before migration differs from " "command output after migration") logging.info("Command: %s", test_command) logging.info("Output before:" + kvm_utils.format_str_for_message(reference_output)) logging.info("Output after:" + kvm_utils.format_str_for_message(output)) raise error.TestFail("Command '%s' produced different output " "before and after migration" % test_command) finally: # Kill the background process if session2 and session2.is_alive(): session2.cmd_output(params.get("migration_bg_kill_command", "")) session2.close() session.close()
def get_command_status_output(self, command, timeout=30.0, internal_timeout=None, print_func=None): """ Send a command and return its exit status and output. @param command: Command to send (must not contain newline characters) @param timeout: The duration (in seconds) to wait until a match is found @param internal_timeout: The timeout to pass to read_nonblocking @param print_func: A function to be used to print the data being read (should take a string parameter) @return: A tuple (status, output) where status is the exit status or None if no exit status is available (e.g. timeout elapsed), and output is the output of command. """ def remove_command_echo(str, cmd): if str and str.splitlines()[0] == cmd: str = "".join(str.splitlines(True)[1:]) return str def remove_last_nonempty_line(str): return "".join(str.rstrip().splitlines(True)[:-1]) # Print some debugging info logging.debug("Sending command: %s" % command) # Read everything that's waiting to be read self.read_nonblocking(0.1) # Send the command and get its output self.sendline(command) (match, output) = self.read_up_to_prompt(timeout, internal_timeout, print_func) # Remove the echoed command from the output output = remove_command_echo(output, command) # If the prompt was not found, return the output so far if not match: return (None, output) # Remove the final shell prompt from the output output = remove_last_nonempty_line(output) # Send the 'echo ...' command to get the last exit status self.sendline(self.status_test_command) (match, status) = self.read_up_to_prompt(10.0, internal_timeout) if not match: return (None, output) status = remove_command_echo(status, self.status_test_command) status = remove_last_nonempty_line(status) # Get the first line consisting of digits only digit_lines = [l for l in status.splitlines() if l.strip().isdigit()] if not digit_lines: return (None, output) status = int(digit_lines[0].strip()) # Print some debugging info if status != 0: logging.debug("Command failed; status: %d, output:%s", status, kvm_utils.format_str_for_message(output.strip())) return (status, output)