def reboot(self, session=None, method="shell", nic_index=0, timeout=240): """ Reboot the VM and wait for it to come back up by trying to log in until timeout expires. @param session: A shell session object or None. @param method: Reboot method. Can be "shell" (send a shell reboot command). @param nic_index: Index of NIC to access in the VM, when logging in after rebooting. @param timeout: Time to wait for login to succeed (after rebooting). @return: A new shell session object. """ error.base_context("rebooting '%s'" % self.name, logging.info) error.context("before reboot") session = session or self.login() error.context() if method == "shell": session.sendline(self.params.get("reboot_command")) else: raise virt_vm.VMRebootError("Unknown reboot method: %s" % method) error.context("waiting for guest to go down", logging.info) if not virt_utils.wait_for( lambda: not session.is_responsive(timeout=30), 120, 0, 1): raise virt_vm.VMRebootError("Guest refuses to go down") session.close() error.context("logging in after reboot", logging.info) return self.wait_for_login(nic_index, timeout=timeout)
def reboot(self, session=None, method="shell", nic_index=0, timeout=240): """ Reboot the VM and wait for it to come back up by trying to log in until timeout expires. @param session: A shell session object or None. @param method: Reboot method. Can be "shell" (send a shell reboot command). @param nic_index: Index of NIC to access in the VM, when logging in after rebooting. @param timeout: Time to wait for login to succeed (after rebooting). @return: A new shell session object. """ error.base_context("rebooting '%s'" % self.name, logging.info) error.context("before reboot") session = session or self.login() error.context() if method == "shell": session.sendline(self.params.get("reboot_command")) else: raise virt_vm.VMRebootError("Unknown reboot method: %s" % method) error.context("waiting for guest to go down", logging.info) if not virt_utils.wait_for(lambda: not session.is_responsive(timeout=30), 120, 0, 1): raise virt_vm.VMRebootError("Guest refuses to go down") session.close() error.context("logging in after reboot", logging.info) return self.wait_for_login(nic_index, timeout=timeout)
def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). """ error.context("preprocessing") # Start tcpdump if it isn't already running if "address_cache" not in env: env["address_cache"] = {} if "tcpdump" in env and not env["tcpdump"].is_alive(): env["tcpdump"].close() del env["tcpdump"] if "tcpdump" not in env and params.get("run_tcpdump", "yes") == "yes": cmd = "%s -npvi any 'dst port 68'" % virt_utils.find_command("tcpdump") logging.debug("Starting tcpdump '%s'", cmd) env["tcpdump"] = aexpect.Tail( command=cmd, output_func=_update_address_cache, output_params=(env["address_cache"],)) if virt_utils.wait_for(lambda: not env["tcpdump"].is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s" % env["tcpdump"].get_status()) logging.warn("Output:" + virt_utils.format_str_for_message( env["tcpdump"].get_output())) # Destroy and remove VMs that are no longer needed in the environment requested_vms = params.objects("vms") for key in env.keys(): vm = env[key] if not virt_utils.is_vm(vm): continue if not vm.name in requested_vms: logging.debug("VM '%s' found in environment but not required for " "test, destroying it" % vm.name) vm.destroy() del env[key] # Get Host cpu type if params.get("auto_cpu_model") == "yes": if not env.get("cpu_model"): env["cpu_model"] = virt_utils.get_cpu_model() params["cpu_model"] = env.get("cpu_model") kvm_ver_cmd = params.get("kvm_ver_cmd", "") if kvm_ver_cmd: try: cmd_result = utils.run(kvm_ver_cmd) kvm_version = cmd_result.stdout.strip() except error.CmdError, e: kvm_version = "Unknown"
def reboot(vm, session, method="shell", sleep_before_reset=10, nic_index=0, timeout=240): """ Reboot the VM and wait for it to come back up by trying to log in until timeout expires. @param vm: VM object. @param session: A shell session object. @param method: Reboot method. Can be "shell" (send a shell reboot command) or "system_reset" (send a system_reset monitor command). @param nic_index: Index of NIC to access in the VM, when logging in after rebooting. @param timeout: Time to wait before giving up (after rebooting). @return: A new shell session object. """ if method == "shell": # Send a reboot command to the guest's shell session.sendline(vm.get_params().get("reboot_command")) logging.info("Reboot command sent. Waiting for guest to go down") elif method == "system_reset": # Sleep for a while before sending the command time.sleep(sleep_before_reset) # Clear the event list of all QMP monitors monitors = [m for m in vm.monitors if m.protocol == "qmp"] for m in monitors: m.clear_events() # Send a system_reset monitor command vm.monitor.cmd("system_reset") logging.info("Monitor command system_reset sent. Waiting for guest to " "go down") # Look for RESET QMP events time.sleep(1) for m in monitors: if not m.get_event("RESET"): raise error.TestFail("RESET QMP event not received after " "system_reset (monitor '%s')" % m.name) else: logging.info("RESET QMP event received") else: logging.error("Unknown reboot method: %s", method) # Wait for the session to become unresponsive and close it if not virt_utils.wait_for(lambda: not session.is_responsive(timeout=30), 120, 0, 1): raise error.TestFail("Guest refuses to go down") session.close() # Try logging into the guest until timeout expires logging.info("Guest is down. Waiting for it to go up again, timeout %ds", timeout) session = vm.wait_for_login(nic_index, timeout=timeout) logging.info("Guest is up again") return session
def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). """ error.context("preprocessing") # Start tcpdump if it isn't already running if "address_cache" not in env: env["address_cache"] = {} if "tcpdump" in env and not env["tcpdump"].is_alive(): env["tcpdump"].close() del env["tcpdump"] if "tcpdump" not in env and params.get("run_tcpdump", "yes") == "yes": cmd = "%s -npvi any 'dst port 68'" % virt_utils.find_command("tcpdump") logging.debug("Starting tcpdump '%s'", cmd) env["tcpdump"] = aexpect.Tail(command=cmd, output_func=_update_address_cache, output_params=(env["address_cache"], )) if virt_utils.wait_for(lambda: not env["tcpdump"].is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s" % env["tcpdump"].get_status()) logging.warn( "Output:" + virt_utils.format_str_for_message(env["tcpdump"].get_output())) # Destroy and remove VMs that are no longer needed in the environment requested_vms = params.objects("vms") for key in env.keys(): vm = env[key] if not virt_utils.is_vm(vm): continue if not vm.name in requested_vms: logging.debug("VM '%s' found in environment but not required for " "test, destroying it" % vm.name) vm.destroy() del env[key] # Get Host cpu type if params.get("auto_cpu_model") == "yes": if not env.get("cpu_model"): env["cpu_model"] = virt_utils.get_cpu_model() params["cpu_model"] = env.get("cpu_model") kvm_ver_cmd = params.get("kvm_ver_cmd", "") if kvm_ver_cmd: try: cmd_result = utils.run(kvm_ver_cmd) kvm_version = cmd_result.stdout.strip() except error.CmdError, e: kvm_version = "Unknown"
def read_until_output_matches(self, patterns, filter=lambda x: x, timeout=60, internal_timeout=None, print_func=None): """ Read using read_nonblocking until a match is found using match_patterns, or until timeout expires. Before attempting to search for a match, the data is filtered using the filter function provided. @brief: Read from child using read_nonblocking until a pattern matches. @param patterns: List of strings (regular expression patterns) @param filter: Function to apply to the data read from the child before attempting to match it against the patterns (should take and return a string) @param timeout: The duration (in seconds) to wait until a match is found @param internal_timeout: The timeout to pass to read_nonblocking @param print_func: A function to be used to print the data being read (should take a string parameter) @return: Tuple containing the match index and the data read so far @raise ExpectTimeoutError: Raised if timeout expires @raise ExpectProcessTerminatedError: Raised if the child process terminates while waiting for output @raise ExpectError: Raised if an unknown error occurs """ fd = self._get_fd("expect") o = "" end_time = time.time() + timeout while True: try: r, w, x = select.select([fd], [], [], max(0, end_time - time.time())) except (select.error, TypeError): break if not r: raise ExpectTimeoutError(patterns, o) # Read data from child data = self.read_nonblocking(internal_timeout) if not data: break # Print it if necessary if print_func: for line in data.splitlines(): print_func(line) # Look for patterns o += data match = self.match_patterns(filter(o), patterns) if match is not None: return match, o # Check if the child has terminated if virt_utils.wait_for(lambda: not self.is_alive(), 5, 0, 0.1): raise ExpectProcessTerminatedError(patterns, self.get_status(), o) else: # This shouldn't happen raise ExpectError(patterns, o)
def read_until_output_matches( self, patterns, filter=lambda x: x, timeout=60, internal_timeout=None, print_func=None ): """ Read using read_nonblocking until a match is found using match_patterns, or until timeout expires. Before attempting to search for a match, the data is filtered using the filter function provided. @brief: Read from child using read_nonblocking until a pattern matches. @param patterns: List of strings (regular expression patterns) @param filter: Function to apply to the data read from the child before attempting to match it against the patterns (should take and return a string) @param timeout: The duration (in seconds) to wait until a match is found @param internal_timeout: The timeout to pass to read_nonblocking @param print_func: A function to be used to print the data being read (should take a string parameter) @return: Tuple containing the match index and the data read so far @raise ExpectTimeoutError: Raised if timeout expires @raise ExpectProcessTerminatedError: Raised if the child process terminates while waiting for output @raise ExpectError: Raised if an unknown error occurs """ fd = self._get_fd("expect") o = "" end_time = time.time() + timeout while True: try: r, w, x = select.select([fd], [], [], max(0, end_time - time.time())) except (select.error, TypeError): break if not r: raise ExpectTimeoutError(patterns, o) # Read data from child data = self.read_nonblocking(internal_timeout) if not data: break # Print it if necessary if print_func: for line in data.splitlines(): print_func(line) # Look for patterns o += data match = self.match_patterns(filter(o), patterns) if match is not None: return match, o # Check if the child has terminated if virt_utils.wait_for(lambda: not self.is_alive(), 5, 0, 0.1): raise ExpectProcessTerminatedError(patterns, self.get_status(), o) else: # This shouldn't happen raise ExpectError(patterns, o)
def wait_for_get_address(self, nic_index_or_name, timeout=30, internal_timeout=1): """ Wait for a nic to acquire an IP address, then return it. """ # Don't let VMIPAddressMissingError/VMAddressVerificationError through def _get_address(): try: return self.get_address(nic_index_or_name) except (VMIPAddressMissingError, VMAddressVerificationError): return False if not virt_utils.wait_for(_get_address, timeout, internal_timeout): raise VMIPAddressMissingError(self.virtnet[nic_index_or_name].mac) return self.get_address(nic_index_or_name)
def start(self): """ Starts this VM. """ if virsh_start(self.name, self.connect_uri): # Wait for the domain to be created has_started = virt_utils.wait_for(func=self.is_alive, timeout=60, text=("waiting for domain %s " "to start" % self.name)) if has_started is None: raise virt_vm.VMStartError(self.name, "libvirt domain not " "active after start") else: raise virt_vm.VMStartError(self.name, "libvirt domain failed " "to start")
def start(self): """ Starts this VM. """ if virsh_start(self.name, self.connect_uri): # Wait for the domain to be created has_started = virt_utils.wait_for(func=self.is_alive, timeout=60, text=("waiting for domain %s " "to start" % self.name)) if has_started is None: raise virt_vm.VMStartError( self.name, "libvirt domain not " "active after start") else: raise virt_vm.VMStartError(self.name, "libvirt domain failed " "to start")
def destroy(self, gracefully=True, free_mac_addresses=True): """ Destroy the VM. If gracefully is True, first attempt to shutdown the VM with a shell command. If that fails, send SIGKILL to the qemu process. @param gracefully: If True, an attempt will be made to end the VM using a shell command before trying to end the qemu process with a 'quit' or a kill signal. @param free_mac_addresses: If True, the MAC addresses used by the VM will be freed. """ try: # Is it already dead? if self.is_dead(): return logging.debug("Destroying VM") if gracefully and self.params.get("shutdown_command"): # Try to destroy with shell command logging.debug("Trying to shutdown VM with shell command") try: session = self.login() except (virt_utils.LoginError, virt_vm.VMError), e: logging.debug(e) else: try: # Send the shutdown command session.sendline(self.params.get("shutdown_command")) logging.debug("Shutdown command sent; waiting for VM " "to go down...") if virt_utils.wait_for(self.is_dead, 60, 1, 1): logging.debug("VM is down") return finally: session.close() virsh_destroy(self.name, self.connect_uri)
def wait_for_migration(): if not virt_utils.wait_for(mig_finished, mig_timeout, 2, 2, "Waiting for migration to finish"): raise error.TestFail("Timeout expired while waiting for migration " "to finish")
def migrate(vm, env=None, mig_timeout=3600, mig_protocol="tcp", mig_cancel=False, offline=False, stable_check=False, clean=False, save_path=None, dest_host='localhost', mig_port=None): """ Migrate a VM locally and re-register it in the environment. @param vm: The VM to migrate. @param env: The environment dictionary. If omitted, the migrated VM will not be registered. @param mig_timeout: timeout value for migration. @param mig_protocol: migration protocol @param mig_cancel: Test migrate_cancel or not when protocol is tcp. @param dest_host: Destination host (defaults to 'localhost'). @param mig_port: Port that will be used for migration. @return: The post-migration VM, in case of same host migration, True in case of multi-host migration. """ def mig_finished(): o = vm.monitor.info("migrate") if isinstance(o, str): return "status: active" not in o else: return o.get("status") != "active" def mig_succeeded(): o = vm.monitor.info("migrate") if isinstance(o, str): return "status: completed" in o else: return o.get("status") == "completed" def mig_failed(): o = vm.monitor.info("migrate") if isinstance(o, str): return "status: failed" in o else: return o.get("status") == "failed" def mig_cancelled(): o = vm.monitor.info("migrate") if isinstance(o, str): return ("Migration status: cancelled" in o or "Migration status: canceled" in o) else: return (o.get("status") == "cancelled" or o.get("status") == "canceled") def wait_for_migration(): if not virt_utils.wait_for(mig_finished, mig_timeout, 2, 2, "Waiting for migration to finish"): raise error.TestFail("Timeout expired while waiting for migration " "to finish") if dest_host == 'localhost': dest_vm = vm.clone() if (dest_host == 'localhost') and stable_check: # Pause the dest vm after creation dest_vm.params['extra_params'] = (dest_vm.params.get('extra_params','') + ' -S') if dest_host == 'localhost': dest_vm.create(migration_mode=mig_protocol, mac_source=vm) try: try: if mig_protocol == "tcp": if dest_host == 'localhost': uri = "tcp:localhost:%d" % dest_vm.migration_port else: uri = 'tcp:%s:%d' % (dest_host, mig_port) elif mig_protocol == "unix": uri = "unix:%s" % dest_vm.migration_file elif mig_protocol == "exec": uri = '"exec:nc localhost %s"' % dest_vm.migration_port if offline: vm.monitor.cmd("stop") vm.monitor.migrate(uri) if mig_cancel: time.sleep(2) vm.monitor.cmd("migrate_cancel") if not virt_utils.wait_for(mig_cancelled, 60, 2, 2, "Waiting for migration " "cancellation"): raise error.TestFail("Failed to cancel migration") if offline: vm.monitor.cmd("cont") if dest_host == 'localhost': dest_vm.destroy(gracefully=False) return vm else: wait_for_migration() if (dest_host == 'localhost') and stable_check: save_path = None or "/tmp" save1 = os.path.join(save_path, "src") save2 = os.path.join(save_path, "dst") vm.save_to_file(save1) dest_vm.save_to_file(save2) # Fail if we see deltas md5_save1 = utils.hash_file(save1) md5_save2 = utils.hash_file(save2) if md5_save1 != md5_save2: raise error.TestFail("Mismatch of VM state before " "and after migration") if (dest_host == 'localhost') and offline: dest_vm.monitor.cmd("cont") except Exception: if dest_host == 'localhost': dest_vm.destroy() raise finally: if (dest_host == 'localhost') and stable_check and clean: logging.debug("Cleaning the state files") if os.path.isfile(save1): os.remove(save1) if os.path.isfile(save2): os.remove(save2) # Report migration status if mig_succeeded(): logging.info("Migration finished successfully") elif mig_failed(): raise error.TestFail("Migration failed") else: raise error.TestFail("Migration ended with unknown status") if dest_host == 'localhost': if dest_vm.monitor.verify_status("paused"): logging.debug("Destination VM is paused, resuming it") dest_vm.monitor.cmd("cont") # Kill the source VM vm.destroy(gracefully=False) # Replace the source VM with the new cloned VM if (dest_host == 'localhost') and (env is not None): env.register_vm(vm.name, dest_vm) # Return the new cloned VM if dest_host == 'localhost': return dest_vm else: return vm
def create(self, name=None, params=None, root_dir=None, timeout=5.0, migration_mode=None, mac_source=None): """ Start the VM by running a qemu command. All parameters are optional. If name, params or root_dir are not supplied, the respective values stored as class attributes are used. @param name: The name of the object @param params: A dict containing VM params @param root_dir: Base directory for relative filenames @param migration_mode: If supplied, start VM for incoming migration using this protocol (either 'tcp', 'unix' or 'exec') @param migration_exec_cmd: Command to embed in '-incoming "exec: ..."' (e.g. 'gzip -c -d filename') if migration_mode is 'exec' @param mac_source: A VM object from which to copy MAC addresses. If not specified, new addresses will be generated. @raise VMCreateError: If qemu terminates unexpectedly @raise VMKVMInitError: If KVM initialization fails @raise VMHugePageError: If hugepage initialization fails @raise VMImageMissingError: If a CD image is missing @raise VMHashMismatchError: If a CD image hash has doesn't match the expected hash @raise VMBadPATypeError: If an unsupported PCI assignment type is requested @raise VMPAError: If no PCI assignable devices could be assigned """ error.context("creating '%s'" % self.name) self.destroy(free_mac_addresses=False) if name is not None: self.name = name if params is not None: self.params = params if root_dir is not None: self.root_dir = root_dir name = self.name params = self.params root_dir = self.root_dir # Verify the md5sum of the ISO images for cdrom in params.objects("cdroms"): cdrom_params = params.object_params(cdrom) iso = cdrom_params.get("cdrom") if iso: iso = virt_utils.get_path(root_dir, iso) if not os.path.exists(iso): raise virt_vm.VMImageMissingError(iso) compare = False if cdrom_params.get("md5sum_1m"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "first MB of ISO file...") actual_hash = utils.hash_file(iso, 1048576, method="md5") expected_hash = cdrom_params.get("md5sum_1m") compare = True elif cdrom_params.get("md5sum"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "ISO file...") actual_hash = utils.hash_file(iso, method="md5") expected_hash = cdrom_params.get("md5sum") compare = True elif cdrom_params.get("sha1sum"): logging.debug("Comparing expected SHA1 sum with SHA1 sum " "of ISO file...") actual_hash = utils.hash_file(iso, method="sha1") expected_hash = cdrom_params.get("sha1sum") compare = True if compare: if actual_hash == expected_hash: logging.debug("Hashes match") else: raise virt_vm.VMHashMismatchError(actual_hash, expected_hash) # Make sure the following code is not executed by more than one thread # at the same time lockfile = open("/tmp/libvirt-autotest-vm-create.lock", "w+") fcntl.lockf(lockfile, fcntl.LOCK_EX) try: # Handle port redirections redir_names = params.objects("redirs") host_ports = virt_utils.find_free_ports(5000, 6000, len(redir_names)) self.redirs = {} for i in range(len(redir_names)): redir_params = params.object_params(redir_names[i]) guest_port = int(redir_params.get("guest_port")) self.redirs[guest_port] = host_ports[i] # Generate netdev/device IDs for all NICs self.netdev_id = [] self.device_id = [] for nic in params.objects("nics"): self.netdev_id.append(virt_utils.generate_random_id()) self.device_id.append(virt_utils.generate_random_id()) # Find available PCI devices self.pci_devices = [] for device in params.objects("pci_devices"): self.pci_devices.append(device) # Find available VNC port, if needed if params.get("display") == "vnc": self.vnc_port = virt_utils.find_free_port(5900, 6100) # Find available spice port, if needed if params.get("spice"): self.spice_port = virt_utils.find_free_port(8000, 8100) # Find random UUID if specified 'uuid = random' in config file if params.get("uuid") == "random": f = open("/proc/sys/kernel/random/uuid") self.uuid = f.read().strip() f.close() # Generate or copy MAC addresses for all NICs num_nics = len(params.objects("nics")) for vlan in range(num_nics): nic_name = params.objects("nics")[vlan] nic_params = params.object_params(nic_name) mac = (nic_params.get("nic_mac") or mac_source and mac_source.get_mac_address(vlan)) if mac: virt_utils.set_mac_address(self.instance, vlan, mac) else: virt_utils.generate_mac_address(self.instance, vlan) # Make qemu command install_command = self.__make_libvirt_command() logging.info("Running libvirt command:\n%s", install_command) utils.run(install_command, verbose=False) # Wait for the domain to be created virt_utils.wait_for(func=self.is_alive, timeout=60, text=("waiting for domain %s to start" % self.name)) # Establish a session with the serial console -- requires a version # of netcat that supports -U self.serial_console = aexpect.ShellSession( "nc -U %s" % self.get_serial_console_filename(), auto_close=False, output_func=virt_utils.log_line, output_params=("serial-%s.log" % name,)) finally: fcntl.lockf(lockfile, fcntl.LOCK_UN) lockfile.close()
def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). """ error.context("preprocessing") # Start tcpdump if it isn't already running if "address_cache" not in env: env["address_cache"] = {} if "tcpdump" in env and not env["tcpdump"].is_alive(): env["tcpdump"].close() del env["tcpdump"] if "tcpdump" not in env and params.get("run_tcpdump", "yes") == "yes": cmd = "%s -npvi any 'dst port 68'" % virt_utils.find_command("tcpdump") logging.debug("Starting tcpdump '%s'", cmd) env["tcpdump"] = aexpect.Tail(command=cmd, output_func=_update_address_cache, output_params=(env["address_cache"], )) if virt_utils.wait_for(lambda: not env["tcpdump"].is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s" % env["tcpdump"].get_status()) logging.warn( "Output:" + virt_utils.format_str_for_message(env["tcpdump"].get_output())) # Destroy and remove VMs that are no longer needed in the environment requested_vms = params.objects("vms") for key in env.keys(): vm = env[key] if not virt_utils.is_vm(vm): continue if not vm.name in requested_vms: logging.debug("VM '%s' found in environment but not required for " "test, destroying it" % vm.name) vm.destroy() del env[key] # Get the KVM kernel module version and write it as a keyval if os.path.exists("/dev/kvm"): try: kvm_version = open("/sys/module/kvm/version").read().strip() except Exception: kvm_version = os.uname()[2] else: kvm_version = "Unknown" logging.debug("KVM module not loaded") logging.debug("KVM version: %s" % kvm_version) test.write_test_keyval({"kvm_version": kvm_version}) # Get the KVM userspace version and write it as a keyval qemu_path = virt_utils.get_path(test.bindir, params.get("qemu_binary", "qemu")) version_line = commands.getoutput("%s -help | head -n 1" % qemu_path) matches = re.findall("[Vv]ersion .*?,", version_line) if matches: kvm_userspace_version = " ".join(matches[0].split()[1:]).strip(",") else: kvm_userspace_version = "Unknown" logging.debug("KVM userspace version: %s" % kvm_userspace_version) test.write_test_keyval({"kvm_userspace_version": kvm_userspace_version}) if params.get("setup_hugepages") == "yes": h = virt_test_setup.HugePageConfig(params) h.setup() if params.get("vm_type") == "libvirt": libvirt_vm.libvirtd_restart() # Execute any pre_commands if params.get("pre_command"): process_command(test, params, env, params.get("pre_command"), int(params.get("pre_command_timeout", "600")), params.get("pre_command_noncritical") == "yes") #Clone master image from vms. if params.get("master_images_clone"): for vm_name in params.get("vms").split(): vm = env.get_vm(vm_name) if vm: vm.destroy(free_mac_addresses=False) env.unregister_vm(vm_name) vm_params = params.object_params(vm_name) for image in vm_params.get("master_images_clone").split(): virt_vm.clone_image(params, vm_name, image, test.bindir) # Preprocess all VMs and images if params.get("not_preprocess", "no") == "no": process(test, params, env, preprocess_image, preprocess_vm) # Start the screendump thread if params.get("take_regular_screendumps") == "yes": logging.debug("Starting screendump thread") global _screendump_thread, _screendump_thread_termination_event _screendump_thread_termination_event = threading.Event() _screendump_thread = threading.Thread(target=_take_screendumps, args=(test, params, env)) _screendump_thread.start()
else: video_file = os.path.join( test.debugdir, "%s-%s.ogg" % (vm.name, test.iteration)) video.start(screendump_dir, video_file) except Exception, detail: logging.info( "Param 'encode_video_files' specified, but video " "creation failed for vm %s: %s", vm.name, detail) if params.get("kill_vm") == "yes": kill_vm_timeout = float(params.get("kill_vm_timeout", 0)) if kill_vm_timeout: logging.debug("Param 'kill_vm' specified, waiting for VM to shut " "down before killing it") virt_utils.wait_for(vm.is_dead, kill_vm_timeout, 0, 1) else: logging.debug("Param 'kill_vm' specified, killing VM") vm.destroy(gracefully=params.get("kill_vm_gracefully") == "yes") def process_command(test, params, env, command, command_timeout, command_noncritical): """ Pre- or post- custom commands to be executed before/after a test is run @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). @param command: Command to be run. @param command_timeout: Timeout for command execution.
def preprocess_vm(test, params, env, name): """ Preprocess a single VM object according to the instructions in params. Start the VM if requested and get a screendump. @param test: An Autotest test object. @param params: A dict containing VM preprocessing parameters. @param env: The environment (a dict-like object). @param name: The name of the VM object. """ logging.debug("Preprocessing VM '%s'", name) vm = env.get_vm(name) vm_type = params.get('vm_type') if not vm: logging.debug("VM object for '%s' does not exist, creating it", name) if vm_type == 'kvm': vm = kvm_vm.VM(name, params, test.bindir, env.get("address_cache")) if vm_type == 'libvirt': vm = libvirt_vm.VM(name, params, test.bindir, env.get("address_cache")) env.register_vm(name, vm) remove_vm = False if params.get("force_remove_vm") == "yes": logging.debug("'force_remove_vm' specified; removing VM...") remove_vm = True if remove_vm: vm.remove() start_vm = False if params.get("restart_vm") == "yes": logging.debug("Param 'restart_vm' specified, (re)starting VM") start_vm = True elif params.get("migration_mode"): logging.debug("Param 'migration_mode' specified, starting VM in " "incoming migration mode") start_vm = True elif params.get("start_vm") == "yes": # need to deal with libvirt VM differently than qemu if vm_type == 'libvirt': if not vm.is_alive(): logging.debug("VM is not alive; starting it...") start_vm = True else: if not vm.is_alive(): logging.debug("VM is not alive, starting it") start_vm = True if vm.needs_restart(name=name, params=params, basedir=test.bindir): logging.debug("Current VM specs differ from requested one; " "restarting it") start_vm = True if start_vm: if vm_type == "libvirt" and params.get("type") != "unattended_install": vm.params = params vm.start() # Wait for the domain to be created virt_utils.wait_for(func=vm.is_alive, timeout=60, text=("waiting for domain %s to start" % vm.name)) else: # Start the VM (or restart it if it's already up) vm.create(name, params, test.bindir, migration_mode=params.get("migration_mode")) else: # Don't start the VM, just update its params vm.params = params
def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). """ error.context("preprocessing") # Start tcpdump if it isn't already running if "address_cache" not in env: env["address_cache"] = {} if "tcpdump" in env and not env["tcpdump"].is_alive(): env["tcpdump"].close() del env["tcpdump"] if "tcpdump" not in env and params.get("run_tcpdump", "yes") == "yes": cmd = "%s -npvi any 'dst port 68'" % virt_utils.find_command("tcpdump") logging.debug("Starting tcpdump '%s'", cmd) env["tcpdump"] = aexpect.Tail( command=cmd, output_func=_update_address_cache, output_params=(env["address_cache"],)) if virt_utils.wait_for(lambda: not env["tcpdump"].is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s" % env["tcpdump"].get_status()) logging.warn("Output:" + virt_utils.format_str_for_message( env["tcpdump"].get_output())) # Destroy and remove VMs that are no longer needed in the environment requested_vms = params.objects("vms") for key in env.keys(): vm = env[key] if not virt_utils.is_vm(vm): continue if not vm.name in requested_vms: logging.debug("VM '%s' found in environment but not required for " "test, destroying it" % vm.name) vm.destroy() del env[key] # Get Host cpu type if params.get("auto_cpu_model") == "yes": if not env.get("cpu_model"): env["cpu_model"] = virt_utils.get_cpu_model() params["cpu_model"] = env.get("cpu_model") # Get the KVM kernel module version and write it as a keyval if os.path.exists("/dev/kvm"): try: kvm_version = open("/sys/module/kvm/version").read().strip() except Exception: kvm_version = os.uname()[2] else: kvm_version = "Unknown" logging.debug("KVM module not loaded") logging.debug("KVM version: %s" % kvm_version) test.write_test_keyval({"kvm_version": kvm_version}) # Get the KVM userspace version and write it as a keyval qemu_path = virt_utils.get_path(test.bindir, params.get("qemu_binary", "qemu")) version_line = commands.getoutput("%s -help | head -n 1" % qemu_path) matches = re.findall("[Vv]ersion .*?,", version_line) if matches: kvm_userspace_version = " ".join(matches[0].split()[1:]).strip(",") else: kvm_userspace_version = "Unknown" logging.debug("KVM userspace version: %s" % kvm_userspace_version) test.write_test_keyval({"kvm_userspace_version": kvm_userspace_version}) if params.get("setup_hugepages") == "yes": h = virt_test_setup.HugePageConfig(params) h.setup() if params.get("vm_type") == "libvirt": libvirt_vm.libvirtd_restart() if params.get("setup_thp") == "yes": thp = virt_test_setup.TransparentHugePageConfig(test, params) thp.setup() # Execute any pre_commands if params.get("pre_command"): process_command(test, params, env, params.get("pre_command"), int(params.get("pre_command_timeout", "600")), params.get("pre_command_noncritical") == "yes") #Clone master image from vms. if params.get("master_images_clone"): for vm_name in params.get("vms").split(): vm = env.get_vm(vm_name) if vm: vm.destroy(free_mac_addresses=False) env.unregister_vm(vm_name) vm_params = params.object_params(vm_name) for image in vm_params.get("master_images_clone").split(): virt_utils.clone_image(params, vm_name, image, test.bindir) # Preprocess all VMs and images if params.get("not_preprocess","no") == "no": process(test, params, env, preprocess_image, preprocess_vm) # Start the screendump thread if params.get("take_regular_screendumps") == "yes": logging.debug("Starting screendump thread") global _screendump_thread, _screendump_thread_termination_event _screendump_thread_termination_event = threading.Event() _screendump_thread = threading.Thread(target=_take_screendumps, args=(test, params, env)) _screendump_thread.start()
(vm.name, test.iteration)) else: video_file = os.path.join(test.debugdir, "%s-%s.ogg" % (vm.name, test.iteration)) video.start(screendump_dir, video_file) except Exception, detail: logging.info("Param 'encode_video_files' specified, but video " "creation failed for vm %s: %s", vm.name, detail) if params.get("kill_vm") == "yes": kill_vm_timeout = float(params.get("kill_vm_timeout", 0)) if kill_vm_timeout: logging.debug("Param 'kill_vm' specified, waiting for VM to shut " "down before killing it") virt_utils.wait_for(vm.is_dead, kill_vm_timeout, 0, 1) else: logging.debug("Param 'kill_vm' specified, killing VM") vm.destroy(gracefully = params.get("kill_vm_gracefully") == "yes") def process_command(test, params, env, command, command_timeout, command_noncritical): """ Pre- or post- custom commands to be executed before/after a test is run @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). @param command: Command to be run. @param command_timeout: Timeout for command execution.
def create(self, name=None, params=None, root_dir=None, timeout=5.0, migration_mode=None, mac_source=None): """ Start the VM by running a qemu command. All parameters are optional. If name, params or root_dir are not supplied, the respective values stored as class attributes are used. @param name: The name of the object @param params: A dict containing VM params @param root_dir: Base directory for relative filenames @param migration_mode: If supplied, start VM for incoming migration using this protocol (either 'tcp', 'unix' or 'exec') @param migration_exec_cmd: Command to embed in '-incoming "exec: ..."' (e.g. 'gzip -c -d filename') if migration_mode is 'exec' @param mac_source: A VM object from which to copy MAC addresses. If not specified, new addresses will be generated. @raise VMCreateError: If qemu terminates unexpectedly @raise VMKVMInitError: If KVM initialization fails @raise VMHugePageError: If hugepage initialization fails @raise VMImageMissingError: If a CD image is missing @raise VMHashMismatchError: If a CD image hash has doesn't match the expected hash @raise VMBadPATypeError: If an unsupported PCI assignment type is requested @raise VMPAError: If no PCI assignable devices could be assigned """ error.context("creating '%s'" % self.name) self.destroy(free_mac_addresses=False) if name is not None: self.name = name if params is not None: self.params = params if root_dir is not None: self.root_dir = root_dir name = self.name params = self.params root_dir = self.root_dir # Verify the md5sum of the ISO images for cdrom in params.objects("cdroms"): cdrom_params = params.object_params(cdrom) iso = cdrom_params.get("cdrom") if ((self.driver_type == self.LIBVIRT_XEN) and (params.get('hvm_or_pv') == 'pv') and (os.path.basename(iso) == 'ks.iso')): continue if iso: iso = virt_utils.get_path(root_dir, iso) if not os.path.exists(iso): raise virt_vm.VMImageMissingError(iso) compare = False if cdrom_params.get("md5sum_1m"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "first MB of ISO file...") actual_hash = utils.hash_file(iso, 1048576, method="md5") expected_hash = cdrom_params.get("md5sum_1m") compare = True elif cdrom_params.get("md5sum"): logging.debug("Comparing expected MD5 sum with MD5 sum of " "ISO file...") actual_hash = utils.hash_file(iso, method="md5") expected_hash = cdrom_params.get("md5sum") compare = True elif cdrom_params.get("sha1sum"): logging.debug("Comparing expected SHA1 sum with SHA1 sum " "of ISO file...") actual_hash = utils.hash_file(iso, method="sha1") expected_hash = cdrom_params.get("sha1sum") compare = True if compare: if actual_hash == expected_hash: logging.debug("Hashes match") else: raise virt_vm.VMHashMismatchError( actual_hash, expected_hash) # Make sure the following code is not executed by more than one thread # at the same time lockfile = open("/tmp/libvirt-autotest-vm-create.lock", "w+") fcntl.lockf(lockfile, fcntl.LOCK_EX) try: # Handle port redirections redir_names = params.objects("redirs") host_ports = virt_utils.find_free_ports(5000, 6000, len(redir_names)) self.redirs = {} for i in range(len(redir_names)): redir_params = params.object_params(redir_names[i]) guest_port = int(redir_params.get("guest_port")) self.redirs[guest_port] = host_ports[i] # Generate netdev/device IDs for all NICs self.netdev_id = [] self.device_id = [] for nic in params.objects("nics"): self.netdev_id.append(virt_utils.generate_random_id()) self.device_id.append(virt_utils.generate_random_id()) # Find available PCI devices self.pci_devices = [] for device in params.objects("pci_devices"): self.pci_devices.append(device) # Find available VNC port, if needed if params.get("display") == "vnc": self.vnc_port = virt_utils.find_free_port(5900, 6100) # Find available spice port, if needed if params.get("spice"): self.spice_port = virt_utils.find_free_port(8000, 8100) # Find random UUID if specified 'uuid = random' in config file if params.get("uuid") == "random": f = open("/proc/sys/kernel/random/uuid") self.uuid = f.read().strip() f.close() # Generate or copy MAC addresses for all NICs num_nics = len(params.objects("nics")) for vlan in range(num_nics): nic_name = params.objects("nics")[vlan] nic_params = params.object_params(nic_name) mac = (nic_params.get("nic_mac") or mac_source and mac_source.get_mac_address(vlan)) if mac: virt_utils.set_mac_address(self.instance, vlan, mac) else: virt_utils.generate_mac_address(self.instance, vlan) # Make qemu command install_command = self.__make_libvirt_command() logging.info("Running libvirt command:\n%s", install_command) utils.run(install_command, verbose=False) # Wait for the domain to be created virt_utils.wait_for(func=self.is_alive, timeout=60, text=("waiting for domain %s to start" % self.name)) # Establish a session with the serial console -- requires a version # of netcat that supports -U self.serial_console = aexpect.ShellSession( "nc -U %s" % self.get_serial_console_filename(), auto_close=False, output_func=virt_utils.log_line, output_params=("serial-%s.log" % name, )) finally: fcntl.lockf(lockfile, fcntl.LOCK_UN) lockfile.close()