def preprocess(test, params, env): """ Preprocess all VMs and images according to the instructions in params. Also, collect some host information, such as the KVM version. :param test: An Autotest test object. :param params: A dict containing all VM and image parameters. :param env: The environment (a dict-like object). """ error.context("preprocessing") # First, let's verify if this test does require root or not. If it # does and the test suite is running as a regular user, we shall just # throw a TestNAError exception, which will skip the test. if params.get('requires_root', 'no') == 'yes': utils_misc.verify_running_as_root() port = params.get('shell_port') prompt = params.get('shell_prompt') address = params.get('ovirt_node_address') username = params.get('ovirt_node_user') password = params.get('ovirt_node_password') setup_pb = False for nic in params.get('nics', "").split(): nic_params = params.object_params(nic) if nic_params.get('netdst') == 'private': setup_pb = True params_pb = nic_params params['netdst_%s' % nic] = nic_params.get("priv_brname", 'atbr0') if setup_pb: brcfg = test_setup.PrivateBridgeConfig(params_pb) brcfg.setup() base_dir = data_dir.get_data_dir() if params.get("storage_type") == "iscsi": iscsidev = qemu_storage.Iscsidev(params, base_dir, "iscsi") params["image_name"] = iscsidev.setup() params["image_raw_device"] = "yes" if params.get("storage_type") == "lvm": lvmdev = qemu_storage.LVMdev(params, base_dir, "lvm") params["image_name"] = lvmdev.setup() params["image_raw_device"] = "yes" env.register_lvmdev("lvm_%s" % params["main_vm"], lvmdev) if params.get("storage_type") == "nfs": image_nfs = nfs.Nfs(params) image_nfs.setup() image_name_only = os.path.basename(params["image_name"]) params['image_name'] = os.path.join(image_nfs.mount_dir, image_name_only) for image_name in params.objects("images"): name_tag = "image_name_%s" % image_name if params.get(name_tag): image_name_only = os.path.basename(params[name_tag]) params[name_tag] = os.path.join(image_nfs.mount_dir, image_name_only) # Start tcpdump if it isn't already running # The fact it has to be started here is so that the test params # have to be honored. env.start_tcpdump(params) # Destroy and remove VMs that are no longer needed in the environment requested_vms = params.objects("vms") for key in env.keys(): vm = env[key] if not isinstance(vm, virt_vm.BaseVM): continue if vm.name not in requested_vms: vm.destroy() del env[key] if (params.get("auto_cpu_model") == "yes" and params.get("vm_type") == "qemu"): if not env.get("cpu_model"): env["cpu_model"] = utils_misc.get_qemu_best_cpu_model(params) params["cpu_model"] = env.get("cpu_model") kvm_ver_cmd = params.get("kvm_ver_cmd", "") if kvm_ver_cmd: try: cmd_result = utils.run(kvm_ver_cmd) kvm_version = cmd_result.stdout.strip() except error.CmdError: kvm_version = "Unknown" else: # Get the KVM kernel module version and write it as a keyval if os.path.exists("/dev/kvm"): try: kvm_version = open("/sys/module/kvm/version").read().strip() except Exception: kvm_version = os.uname()[2] else: logging.warning("KVM module not loaded") kvm_version = "Unknown" logging.debug("KVM version: %s" % kvm_version) test.write_test_keyval({"kvm_version": kvm_version}) # Get the KVM userspace version and write it as a keyval kvm_userspace_ver_cmd = params.get("kvm_userspace_ver_cmd", "") if kvm_userspace_ver_cmd: try: cmd_result = utils.run(kvm_userspace_ver_cmd) kvm_userspace_version = cmd_result.stdout.strip() except error.CmdError: kvm_userspace_version = "Unknown" else: qemu_path = utils_misc.get_qemu_binary(params) version_line = commands.getoutput("%s -help | head -n 1" % qemu_path) matches = re.findall("[Vv]ersion .*?,", version_line) if matches: kvm_userspace_version = " ".join(matches[0].split()[1:]).strip(",") else: kvm_userspace_version = "Unknown" logging.debug("KVM userspace version: %s" % kvm_userspace_version) test.write_test_keyval({"kvm_userspace_version": kvm_userspace_version}) libvirtd_inst = utils_libvirtd.Libvirtd() if params.get("setup_hugepages") == "yes": h = test_setup.HugePageConfig(params) suggest_mem = h.setup() if suggest_mem is not None: params['mem'] = suggest_mem if params.get("vm_type") == "libvirt": libvirtd_inst.restart() if params.get("setup_thp") == "yes": thp = test_setup.TransparentHugePageConfig(test, params) thp.setup() if params.get("setup_ksm") == "yes": ksm = test_setup.KSMConfig(params, env) ksm.setup(env) if params.get("vm_type") == "libvirt": if params.get("setup_libvirt_polkit") == "yes": pol = test_setup.LibvirtPolkitConfig(params) try: pol.setup() except test_setup.PolkitWriteLibvirtdConfigError, e: logging.error("e") except test_setup.PolkitRulesSetupError, e: logging.error("e") except Exception, e: logging.error("Unexpected error:" % e)
aexpect.kill_tail_threads() living_vms = [vm for vm in env.get_all_vms() if vm.is_alive()] # Close all monitor socket connections of living vm. for vm in living_vms: if hasattr(vm, "monitors"): for m in vm.monitors: try: m.close() except Exception: pass # Close the serial console session, as it'll help # keeping the number of filedescriptors used by virt-test honest. vm.cleanup_serial_console() libvirtd_inst = utils_libvirtd.Libvirtd() if params.get("setup_hugepages") == "yes": try: h = test_setup.HugePageConfig(params) h.cleanup() if params.get("vm_type") == "libvirt": libvirtd_inst.restart() except Exception, details: err += "\nHP cleanup: %s" % str(details).replace('\\n', '\n ') logging.error(details) if params.get("setup_thp") == "yes": try: thp = test_setup.TransparentHugePageConfig(test, params) thp.cleanup()