def _start_tcpdump(self): port = self._params.get('shell_port') prompt = self._params.get('shell_prompt') address = self._params.get('ovirt_node_address') username = self._params.get('ovirt_node_user') password = self._params.get('ovirt_node_password') cmd = "%s -npvi any 'port 68'" % utils_misc.find_command("tcpdump") if self._params.get("remote_preprocess") == "yes": login_cmd = ("ssh -o UserKnownHostsFile=/dev/null -o " "PreferredAuthentications=password -p %s %s@%s" % (port, username, address)) self._tcpdump = aexpect.ShellSession( login_cmd, output_func=_update_address_cache, output_params=(self,)) remote.handle_prompts(self._tcpdump, username, password, prompt) self._tcpdump.sendline(cmd) else: self._tcpdump = aexpect.Tail(command=cmd, output_func=_tcpdump_handler, output_params=(self, "tcpdump.log")) if utils_misc.wait_for(lambda: not self._tcpdump.is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s", self._tcpdump.get_status()) msg = utils_misc.format_str_for_message(self._tcpdump.get_output()) logging.warn("Output: %s", msg)
def serial_login(self, timeout=LOGIN_TIMEOUT, username=None, password=None): """ Log into the guest via the serial console. If timeout expires while waiting for output from the guest (e.g. a password prompt or a shell prompt) -- fail. :param timeout: Time (seconds) before giving up logging into the guest. :return: ShellSession object on success and None on failure. """ error.context("logging into '%s' via serial console" % self.name) if not username: username = self.params.get("username", "") if not password: password = self.params.get("password", "") prompt = self.params.get("shell_prompt", "[\#\$]") linesep = eval("'%s'" % self.params.get("shell_linesep", r"\n")) status_test_command = self.params.get("status_test_command", "") # Some times need recreate the serial_console. if not (self.serial_console and os.path.exists(self.serial_console.inpipe_filename)): self.create_serial_console() self.serial_console.set_linesep(linesep) self.serial_console.set_status_test_command(status_test_command) # Try to get a login prompt self.serial_console.sendline() remote.handle_prompts(self.serial_console, username, password, prompt, timeout) return self.serial_console
def serial_login(self, timeout=LOGIN_TIMEOUT, username=None, password=None): """ Log into the guest via the serial console. If timeout expires while waiting for output from the guest (e.g. a password prompt or a shell prompt) -- fail. @param timeout: Time (seconds) before giving up logging into the guest. @return: ShellSession object on success and None on failure. """ error.context("logging into '%s' via serial console" % self.name) if not username: username = self.params.get("username", "") if not password: password = self.params.get("password", "") prompt = self.params.get("shell_prompt", "[\#\$]") linesep = eval("'%s'" % self.params.get("shell_linesep", r"\n")) status_test_command = self.params.get("status_test_command", "") self.serial_console.set_linesep(linesep) self.serial_console.set_status_test_command(status_test_command) # Try to get a login prompt self.serial_console.sendline() remote.handle_prompts(self.serial_console, username, password, prompt, timeout) return self.serial_console
def _start_tcpdump(self): port = self._params.get('shell_port') prompt = self._params.get('shell_prompt') address = self._params.get('ovirt_node_address') username = self._params.get('ovirt_node_user') password = self._params.get('ovirt_node_password') cmd_template = "%s -npvvvi any 'port 68 or port 546'" cmd = cmd_template % utils_misc.find_command("tcpdump") if self._params.get("remote_preprocess") == "yes": login_cmd = ("ssh -o UserKnownHostsFile=/dev/null " "-o StrictHostKeyChecking=no " "-o PreferredAuthentications=password -p %s %s@%s" % (port, username, address)) self._tcpdump = aexpect.ShellSession( login_cmd, output_func=_update_address_cache, output_params=(self, )) remote.handle_prompts(self._tcpdump, username, password, prompt) self._tcpdump.sendline(cmd) else: self._tcpdump = aexpect.Tail(command=cmd, output_func=_tcpdump_handler, output_params=(self, "tcpdump.log")) if utils_misc.wait_for(lambda: not self._tcpdump.is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s", self._tcpdump.get_status()) msg = utils_misc.format_str_for_message(self._tcpdump.get_output()) logging.warn("Output: %s", msg)
def _start_remote(self): address, port, username, password, prompt = self._remote_opts cmd = "%s %s" % (self.command, self.options) logging.debug("Run '%s' on host '%s'", cmd, address) login_cmd = ("ssh -o UserKnownHostsFile=/dev/null " "-o StrictHostKeyChecking=no " "-o PreferredAuthentications=password -p %s %s@%s" % (port, username, address)) self._process = aexpect.ShellSession( login_cmd, output_func=self._output_logger_handler) handle_prompts(self._process, username, password, prompt) self._process.sendline(cmd)
def _start_tcpdump(self): cmd_template = "%s -npvvvi any 'port 68 or port 546'" if self._params.get("remote_preprocess") == "yes": client = self._params.get('remote_shell_client', 'ssh') port = self._params.get('remote_shell_port', '22') prompt = self._params.get('remote_shell_prompt', '#') address = self._params.get('remote_node_address') username = self._params.get('remote_node_user') password = self._params.get('remote_node_password') rsession = None try: rsession = remote.remote_login(client, address, port, username, password, prompt) tcpdump_bin = rsession.cmd_output("which tcpdump") rsession.close() except process.CmdError: rsession.close() raise exceptions.TestError("Can't find tcpdump binary!") cmd = cmd_template % tcpdump_bin.strip() logging.debug("Run '%s' on host '%s'", cmd, address) login_cmd = ("ssh -o UserKnownHostsFile=/dev/null " "-o StrictHostKeyChecking=no " "-o PreferredAuthentications=password -p %s %s@%s" % (port, username, address)) self._tcpdump = aexpect.ShellSession( login_cmd, output_func=_update_address_cache, output_params=(self,)) remote.handle_prompts(self._tcpdump, username, password, prompt) self._tcpdump.sendline(cmd) else: cmd = cmd_template % utils_path.find_command("tcpdump") self._tcpdump = aexpect.Tail(command=cmd, output_func=_tcpdump_handler, output_params=(self, "tcpdump.log")) if utils_misc.wait_for(lambda: not self._tcpdump.is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s", self._tcpdump.get_status()) msg = utils_misc.format_str_for_message(self._tcpdump.get_output()) logging.warn("Output: %s", msg)
def _start_tcpdump(self): cmd_template = "%s -npvvvi any 'port 68 or port 546'" if self._params.get("remote_preprocess") == "yes": client = self._params.get('remote_shell_client', 'ssh') port = self._params.get('remote_shell_port', '22') prompt = self._params.get('remote_shell_prompt', '#') address = self._params.get('remote_node_address') username = self._params.get('remote_node_user') password = self._params.get('remote_node_password') rsession = None try: rsession = remote.remote_login(client, address, port, username, password, prompt) tcpdump_bin = rsession.cmd_output("which tcpdump") rsession.close() except process.CmdError: rsession.close() raise exceptions.TestError("Can't find tcpdump binary!") cmd = cmd_template % tcpdump_bin.strip() logging.debug("Run '%s' on host '%s'", cmd, address) login_cmd = ("ssh -o UserKnownHostsFile=/dev/null " "-o StrictHostKeyChecking=no " "-o PreferredAuthentications=password -p %s %s@%s" % (port, username, address)) self._tcpdump = aexpect.ShellSession( login_cmd, output_func=_update_address_cache, output_params=(self,)) remote.handle_prompts(self._tcpdump, username, password, prompt) self._tcpdump.sendline(cmd) else: cmd = cmd_template % utils_path.find_command("tcpdump") self._tcpdump = aexpect.Tail(command=cmd, output_func=_tcpdump_handler, output_params=(self, "tcpdump.log")) if not utils_misc.wait_for(lambda: self._tcpdump.is_alive(), 1.0, 0.1, 0.1): logging.warn("Could not start tcpdump") logging.warn("Status: %s", self._tcpdump.get_status()) msg = utils_misc.format_str_for_message(self._tcpdump.get_output()) logging.warn("Output: %s", msg)
def serial_login(self, timeout=LOGIN_TIMEOUT, username=None, password=None): """ Log into the guest via the serial console. If timeout expires while waiting for output from the guest (e.g. a password prompt or a shell prompt) -- fail. :param timeout: Time (seconds) before giving up logging into the guest. :return: ShellSession object on success and None on failure. """ error.context("logging into '%s' via serial console" % self.name) if not username: username = self.params.get("username", "") if not password: password = self.params.get("password", "") prompt = self.params.get("shell_prompt", "[\#\$]") linesep = eval("'%s'" % self.params.get("shell_linesep", r"\n")) status_test_command = self.params.get("status_test_command", "") # Some times need recreate the serial_console. if not os.path.exists(self.serial_console.inpipe_filename): try: tmp_serial = self.serial_ports[0] except IndexError: raise self.VMConfigMissingError(self.name, "isa_serial") self.serial_console = aexpect.ShellSession( "nc -U %s" % self.get_serial_console_filename(tmp_serial), auto_close=False, output_func=utils_misc.log_line, output_params=("serial-%s-%s.log" % (tmp_serial, self.name), ), prompt=self.params.get("shell_prompt", "[\#\$]")) del tmp_serial self.serial_console.set_linesep(linesep) self.serial_console.set_status_test_command(status_test_command) # Try to get a login prompt self.serial_console.sendline() remote.handle_prompts(self.serial_console, username, password, prompt, timeout) return self.serial_console
def serial_login(self, timeout=LOGIN_TIMEOUT, username=None, password=None): """ Log into the guest via the serial console. If timeout expires while waiting for output from the guest (e.g. a password prompt or a shell prompt) -- fail. :param timeout: Time (seconds) before giving up logging into the guest. :return: ShellSession object on success and None on failure. """ error.context("logging into '%s' via serial console" % self.name) if not username: username = self.params.get("username", "") if not password: password = self.params.get("password", "") prompt = self.params.get("shell_prompt", "[\#\$]") linesep = eval("'%s'" % self.params.get("shell_linesep", r"\n")) status_test_command = self.params.get("status_test_command", "") # Some times need recreate the serial_console. if not os.path.exists(self.serial_console.inpipe_filename): try: tmp_serial = self.serial_ports[0] except IndexError: raise self.VMConfigMissingError(self.name, "isa_serial") self.serial_console = aexpect.ShellSession( "nc -U %s" % self.get_serial_console_filename(tmp_serial), auto_close=False, output_func=utils_misc.log_line, output_params=("serial-%s-%s.log" % (tmp_serial, self.name),), prompt=self.params.get("shell_prompt", "[\#\$]")) del tmp_serial self.serial_console.set_linesep(linesep) self.serial_console.set_status_test_command(status_test_command) # Try to get a login prompt self.serial_console.sendline() remote.handle_prompts(self.serial_console, username, password, prompt, timeout) return self.serial_console
def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). """ error.context("preprocessing") # First, let's verify if this test does require root or not. If it # does and the test suite is running as a regular user, we shall just # throw a TestNAError exception, which will skip the test. if params.get('requires_root', 'no') == 'yes': utils_misc.verify_running_as_root() port = params.get('shell_port') prompt = params.get('shell_prompt') address = params.get('ovirt_node_address') username = params.get('ovirt_node_user') password = params.get('ovirt_node_password') setup_pb = False for nic in params.get('nics', "").split(): nic_params = params.object_params(nic) if nic_params.get('netdst') == 'private': setup_pb = True params_pb = nic_params params['netdst_%s' % nic] = nic_params.get("priv_brname", 'atbr0') if setup_pb: brcfg = test_setup.PrivateBridgeConfig(params_pb) brcfg.setup() # Start tcpdump if it isn't already running if "address_cache" not in env: env["address_cache"] = {} if "tcpdump" in env and not env["tcpdump"].is_alive(): env["tcpdump"].close() del env["tcpdump"] if "tcpdump" not in env and params.get("run_tcpdump", "yes") == "yes": cmd = "%s -npvi any 'port 68'" % utils_misc.find_command("tcpdump") if params.get("remote_preprocess") == "yes": login_cmd = ("ssh -o UserKnownHostsFile=/dev/null -o \ PreferredAuthentications=password -p %s %s@%s" % (port, username, address)) env["tcpdump"] = aexpect.ShellSession( login_cmd, output_func=_update_address_cache, output_params=(env["address_cache"],)) remote.handle_prompts(env["tcpdump"], username, password, prompt) env["tcpdump"].sendline(cmd) else: env["tcpdump"] = aexpect.Tail( command=cmd, output_func=_tcpdump_handler, output_params=(env["address_cache"], "tcpdump.log",)) if utils_misc.wait_for(lambda: not env["tcpdump"].is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s" % env["tcpdump"].get_status()) logging.warn("Output:" + utils_misc.format_str_for_message( env["tcpdump"].get_output())) # Destroy and remove VMs that are no longer needed in the environment requested_vms = params.objects("vms") for key in env.keys(): vm = env[key] if not isinstance(vm, virt_vm.BaseVM): continue if not vm.name in requested_vms: vm.destroy() del env[key] if (params.get("auto_cpu_model") == "yes" and params.get("vm_type") == "qemu"): if not env.get("cpu_model"): env["cpu_model"] = utils_misc.get_qemu_best_cpu_model(params) params["cpu_model"] = env.get("cpu_model") kvm_ver_cmd = params.get("kvm_ver_cmd", "") if kvm_ver_cmd: try: cmd_result = utils.run(kvm_ver_cmd) kvm_version = cmd_result.stdout.strip() except error.CmdError: kvm_version = "Unknown" else: # Get the KVM kernel module version and write it as a keyval if os.path.exists("/dev/kvm"): try: kvm_version = open("/sys/module/kvm/version").read().strip() except Exception: kvm_version = os.uname()[2] else: logging.warning("KVM module not loaded") kvm_version = "Unknown" logging.debug("KVM version: %s" % kvm_version) test.write_test_keyval({"kvm_version": kvm_version}) # Get the KVM userspace version and write it as a keyval kvm_userspace_ver_cmd = params.get("kvm_userspace_ver_cmd", "") if kvm_userspace_ver_cmd: try: cmd_result = utils.run(kvm_userspace_ver_cmd) kvm_userspace_version = cmd_result.stdout.strip() except error.CmdError: kvm_userspace_version = "Unknown" else: qemu_path = utils_misc.get_qemu_binary(params) version_line = commands.getoutput("%s -help | head -n 1" % qemu_path) matches = re.findall("[Vv]ersion .*?,", version_line) if matches: kvm_userspace_version = " ".join(matches[0].split()[1:]).strip(",") else: kvm_userspace_version = "Unknown" logging.debug("KVM userspace version: %s" % kvm_userspace_version) test.write_test_keyval({"kvm_userspace_version": kvm_userspace_version}) if params.get("setup_hugepages") == "yes": h = test_setup.HugePageConfig(params) h.setup() if params.get("vm_type") == "libvirt": libvirt_vm.libvirtd_restart() if params.get("setup_thp") == "yes": thp = test_setup.TransparentHugePageConfig(test, params) thp.setup() if params.get("setup_ksm") == "yes": ksm = test_setup.KSMConfig(params, env) ksm.setup(env) # Execute any pre_commands if params.get("pre_command"): process_command(test, params, env, params.get("pre_command"), int(params.get("pre_command_timeout", "600")), params.get("pre_command_noncritical") == "yes") #Clone master image from vms. base_dir = data_dir.get_data_dir() if params.get("master_images_clone"): for vm_name in params.get("vms").split(): vm = env.get_vm(vm_name) if vm: vm.destroy(free_mac_addresses=False) env.unregister_vm(vm_name) vm_params = params.object_params(vm_name) for image in vm_params.get("master_images_clone").split(): image_obj = qemu_storage.QemuImg(params, base_dir, image) image_obj.clone_image(params, vm_name, image, base_dir) # Preprocess all VMs and images if params.get("not_preprocess","no") == "no": process(test, params, env, preprocess_image, preprocess_vm) # Start the screendump thread if params.get("take_regular_screendumps") == "yes": global _screendump_thread, _screendump_thread_termination_event _screendump_thread_termination_event = threading.Event() _screendump_thread = threading.Thread(target=_take_screendumps, name='ScreenDump', args=(test, params, env)) _screendump_thread.start()
def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. :param test: An Autotest test object. :param params: A dict containing all VM and image parameters. :param env: The environment (a dict-like object). """ error.context("preprocessing") # First, let's verify if this test does require root or not. If it # does and the test suite is running as a regular user, we shall just # throw a TestNAError exception, which will skip the test. if params.get('requires_root', 'no') == 'yes': utils_misc.verify_running_as_root() port = params.get('shell_port') prompt = params.get('shell_prompt') address = params.get('ovirt_node_address') username = params.get('ovirt_node_user') password = params.get('ovirt_node_password') setup_pb = False for nic in params.get('nics', "").split(): nic_params = params.object_params(nic) if nic_params.get('netdst') == 'private': setup_pb = True params_pb = nic_params params['netdst_%s' % nic] = nic_params.get("priv_brname", 'atbr0') if setup_pb: brcfg = test_setup.PrivateBridgeConfig(params_pb) brcfg.setup() base_dir = data_dir.get_data_dir() if params.get("storage_type") == "iscsi": iscsidev = qemu_storage.Iscsidev(params, base_dir, "iscsi") params["image_name"] = iscsidev.setup() params["image_raw_device"] = "yes" # Start tcpdump if it isn't already running if "address_cache" not in env: env["address_cache"] = {} if "tcpdump" in env and not env["tcpdump"].is_alive(): env["tcpdump"].close() del env["tcpdump"] if "tcpdump" not in env and params.get("run_tcpdump", "yes") == "yes": cmd = "%s -npvi any 'port 68'" % utils_misc.find_command("tcpdump") if params.get("remote_preprocess") == "yes": login_cmd = ("ssh -o UserKnownHostsFile=/dev/null -o \ PreferredAuthentications=password -p %s %s@%s" % (port, username, address)) env["tcpdump"] = aexpect.ShellSession( login_cmd, output_func=_update_address_cache, output_params=(env["address_cache"],)) remote.handle_prompts(env["tcpdump"], username, password, prompt) env["tcpdump"].sendline(cmd) else: env["tcpdump"] = aexpect.Tail( command=cmd, output_func=_tcpdump_handler, output_params=(env["address_cache"], "tcpdump.log",)) if utils_misc.wait_for(lambda: not env["tcpdump"].is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s" % env["tcpdump"].get_status()) logging.warn("Output:" + utils_misc.format_str_for_message( env["tcpdump"].get_output())) # Destroy and remove VMs that are no longer needed in the environment requested_vms = params.objects("vms") for key in env.keys(): vm = env[key] if not isinstance(vm, virt_vm.BaseVM): continue if not vm.name in requested_vms: vm.destroy() del env[key] if (params.get("auto_cpu_model") == "yes" and params.get("vm_type") == "qemu"): if not env.get("cpu_model"): env["cpu_model"] = utils_misc.get_qemu_best_cpu_model(params) params["cpu_model"] = env.get("cpu_model") kvm_ver_cmd = params.get("kvm_ver_cmd", "") if kvm_ver_cmd: try: cmd_result = utils.run(kvm_ver_cmd) kvm_version = cmd_result.stdout.strip() except error.CmdError: kvm_version = "Unknown" else: # Get the KVM kernel module version and write it as a keyval if os.path.exists("/dev/kvm"): try: kvm_version = open("/sys/module/kvm/version").read().strip() except Exception: kvm_version = os.uname()[2] else: logging.warning("KVM module not loaded") kvm_version = "Unknown" logging.debug("KVM version: %s" % kvm_version) test.write_test_keyval({"kvm_version": kvm_version}) # Get the KVM userspace version and write it as a keyval kvm_userspace_ver_cmd = params.get("kvm_userspace_ver_cmd", "") if kvm_userspace_ver_cmd: try: cmd_result = utils.run(kvm_userspace_ver_cmd) kvm_userspace_version = cmd_result.stdout.strip() except error.CmdError: kvm_userspace_version = "Unknown" else: qemu_path = utils_misc.get_qemu_binary(params) version_line = commands.getoutput("%s -help | head -n 1" % qemu_path) matches = re.findall("[Vv]ersion .*?,", version_line) if matches: kvm_userspace_version = " ".join(matches[0].split()[1:]).strip(",") else: kvm_userspace_version = "Unknown" logging.debug("KVM userspace version: %s" % kvm_userspace_version) test.write_test_keyval({"kvm_userspace_version": kvm_userspace_version}) if params.get("setup_hugepages") == "yes": h = test_setup.HugePageConfig(params) suggest_mem = h.setup() if suggest_mem is not None: params['mem'] = suggest_mem if params.get("vm_type") == "libvirt": utils_libvirtd.libvirtd_restart() if params.get("setup_thp") == "yes": thp = test_setup.TransparentHugePageConfig(test, params) thp.setup() if params.get("setup_ksm") == "yes": ksm = test_setup.KSMConfig(params, env) ksm.setup(env) # Execute any pre_commands if params.get("pre_command"): process_command(test, params, env, params.get("pre_command"), int(params.get("pre_command_timeout", "600")), params.get("pre_command_noncritical") == "yes") # if you want set "pci=nomsi" before test, set "disable_pci_msi = yes" # and pci_msi_sensitive = "yes" if params.get("pci_msi_sensitive", "no") == "yes": disable_pci_msi = params.get("disable_pci_msi", "no") image_filename = storage.get_image_filename(params, data_dir.get_data_dir()) grub_file = params.get("grub_file", "/boot/grub2/grub.cfg") kernel_cfg_pos_reg = params.get("kernel_cfg_pos_reg", r".*vmlinuz-\d+.*") msi_keyword = params.get("msi_keyword", " pci=nomsi") disk_obj = utils_disk.GuestFSModiDisk(image_filename) kernel_config_ori = disk_obj.read_file(grub_file) kernel_config = re.findall(kernel_cfg_pos_reg, kernel_config_ori) if not kernel_config: raise error.TestError("Cannot find the kernel config, reg is %s" % kernel_cfg_pos_reg) kernel_config_line = kernel_config[0] kernel_need_modify = False if disable_pci_msi == "yes": if not re.findall(msi_keyword, kernel_config_line): kernel_config_set = kernel_config_line + msi_keyword kernel_need_modify = True else: if re.findall(msi_keyword, kernel_config_line): kernel_config_set = re.sub(msi_keyword, "", kernel_config_line) kernel_need_modify = True if kernel_need_modify: for vm in env.get_all_vms(): if vm: vm.destroy() env.unregister_vm(vm.name) disk_obj.replace_image_file_content(grub_file, kernel_config_line, kernel_config_set) logging.debug("Guest cmdline 'pci=nomsi' setting is: [ %s ]" % disable_pci_msi) kernel_extra_params = params.get("kernel_extra_params") if kernel_extra_params: image_filename = storage.get_image_filename(params, data_dir.get_data_dir()) grub_file = params.get("grub_file", "/boot/grub2/grub.cfg") kernel_cfg_pos_reg = params.get("kernel_cfg_pos_reg", r".*vmlinuz-\d+.*") disk_obj = utils_disk.GuestFSModiDisk(image_filename) kernel_config_ori = disk_obj.read_file(grub_file) kernel_config = re.findall(kernel_cfg_pos_reg, kernel_config_ori) if not kernel_config: raise error.TestError("Cannot find the kernel config, reg is %s" % kernel_cfg_pos_reg) kernel_config_line = kernel_config[0] kernel_need_modify = False if not re.findall(kernel_extra_params, kernel_config_line): kernel_config_set = kernel_config_line + kernel_extra_params kernel_need_modify = True if kernel_need_modify: for vm in env.get_all_vms(): if vm: vm.destroy() env.unregister_vm(vm.name) disk_obj.replace_image_file_content(grub_file, kernel_config_line, kernel_config_set) logging.debug("Guest cmdline extra_params setting is: [ %s ]" % kernel_extra_params) # Clone master image from vms. base_dir = data_dir.get_data_dir() if params.get("master_images_clone"): for vm_name in params.get("vms").split(): vm = env.get_vm(vm_name) if vm: vm.destroy(free_mac_addresses=False) env.unregister_vm(vm_name) vm_params = params.object_params(vm_name) for image in vm_params.get("master_images_clone").split(): image_obj = qemu_storage.QemuImg(params, base_dir, image) image_obj.clone_image(params, vm_name, image, base_dir) # Preprocess all VMs and images if params.get("not_preprocess", "no") == "no": process(test, params, env, preprocess_image, preprocess_vm) # Start the screendump thread if params.get("take_regular_screendumps") == "yes": global _screendump_thread, _screendump_thread_termination_event _screendump_thread_termination_event = threading.Event() _screendump_thread = threading.Thread(target=_take_screendumps, name='ScreenDump', args=(test, params, env)) _screendump_thread.start() return params
def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. @param test: An Autotest test object. @param params: A dict containing all VM and image parameters. @param env: The environment (a dict-like object). """ error.context("preprocessing") # First, let's verify if this test does require root or not. If it # does and the test suite is running as a regular user, we shall just # throw a TestNAError exception, which will skip the test. if params.get('requires_root', 'no') == 'yes': utils_test.verify_running_as_root() port = params.get('shell_port') prompt = params.get('shell_prompt') address = params.get('ovirt_node_address') username = params.get('ovirt_node_user') password = params.get('ovirt_node_password') # Start tcpdump if it isn't already running if "address_cache" not in env: env["address_cache"] = {} if "tcpdump" in env and not env["tcpdump"].is_alive(): env["tcpdump"].close() del env["tcpdump"] if "tcpdump" not in env and params.get("run_tcpdump", "yes") == "yes": cmd = "%s -npvi any 'port 68'" % utils_misc.find_command("tcpdump") if params.get("remote_preprocess") == "yes": login_cmd = ("ssh -o UserKnownHostsFile=/dev/null -o \ PreferredAuthentications=password -p %s %s@%s" % (port, username, address)) env["tcpdump"] = aexpect.ShellSession( login_cmd, output_func=_update_address_cache, output_params=(env["address_cache"], )) remote.handle_prompts(env["tcpdump"], username, password, prompt) env["tcpdump"].sendline(cmd) else: env["tcpdump"] = aexpect.Tail(command=cmd, output_func=_tcpdump_handler, output_params=( env["address_cache"], "tcpdump.log", )) if utils_misc.wait_for(lambda: not env["tcpdump"].is_alive(), 0.1, 0.1, 1.0): logging.warn("Could not start tcpdump") logging.warn("Status: %s" % env["tcpdump"].get_status()) logging.warn( "Output:" + utils_misc.format_str_for_message(env["tcpdump"].get_output())) # Destroy and remove VMs that are no longer needed in the environment requested_vms = params.objects("vms") for key in env.keys(): vm = env[key] if not isinstance(vm, virt_vm.BaseVM): continue if not vm.name in requested_vms: vm.destroy() del env[key] if (params.get("auto_cpu_model") == "yes" and params.get("vm_type") == "qemu"): if not env.get("cpu_model"): env["cpu_model"] = utils_misc.get_qemu_best_cpu_model(params) params["cpu_model"] = env.get("cpu_model") kvm_ver_cmd = params.get("kvm_ver_cmd", "") if kvm_ver_cmd: try: cmd_result = utils.run(kvm_ver_cmd) kvm_version = cmd_result.stdout.strip() except error.CmdError: kvm_version = "Unknown" else: # Get the KVM kernel module version and write it as a keyval if os.path.exists("/dev/kvm"): try: kvm_version = open("/sys/module/kvm/version").read().strip() except Exception: kvm_version = os.uname()[2] else: logging.warning("KVM module not loaded") kvm_version = "Unknown" logging.debug("KVM version: %s" % kvm_version) test.write_test_keyval({"kvm_version": kvm_version}) # Get the KVM userspace version and write it as a keyval kvm_userspace_ver_cmd = params.get("kvm_userspace_ver_cmd", "") if kvm_userspace_ver_cmd: try: cmd_result = utils.run(kvm_userspace_ver_cmd) kvm_userspace_version = cmd_result.stdout.strip() except error.CmdError: kvm_userspace_version = "Unknown" else: qemu_path = utils_misc.get_path(test.bindir, params.get("qemu_binary", "qemu")) version_line = commands.getoutput("%s -help | head -n 1" % qemu_path) matches = re.findall("[Vv]ersion .*?,", version_line) if matches: kvm_userspace_version = " ".join(matches[0].split()[1:]).strip(",") else: kvm_userspace_version = "Unknown" logging.debug("KVM userspace version: %s" % kvm_userspace_version) test.write_test_keyval({"kvm_userspace_version": kvm_userspace_version}) if params.get("setup_hugepages") == "yes": h = test_setup.HugePageConfig(params) h.setup() if params.get("vm_type") == "libvirt": libvirt_vm.libvirtd_restart() if params.get("setup_thp") == "yes": thp = test_setup.TransparentHugePageConfig(test, params) thp.setup() # Execute any pre_commands if params.get("pre_command"): process_command(test, params, env, params.get("pre_command"), int(params.get("pre_command_timeout", "600")), params.get("pre_command_noncritical") == "yes") #Clone master image from vms. base_dir = data_dir.get_data_dir() if params.get("master_images_clone"): for vm_name in params.get("vms").split(): vm = env.get_vm(vm_name) if vm: vm.destroy(free_mac_addresses=False) env.unregister_vm(vm_name) vm_params = params.object_params(vm_name) for image in vm_params.get("master_images_clone").split(): image_obj = qemu_storage.QemuImg(params, base_dir, image) image_obj.clone_image(params, vm_name, image, base_dir) # Preprocess all VMs and images if params.get("not_preprocess", "no") == "no": process(test, params, env, preprocess_image, preprocess_vm) # Start the screendump thread if params.get("take_regular_screendumps") == "yes": global _screendump_thread, _screendump_thread_termination_event _screendump_thread_termination_event = threading.Event() _screendump_thread = threading.Thread(target=_take_screendumps, name='ScreenDump', args=(test, params, env)) _screendump_thread.start()