def test_cpu_vendor_intel(self): cpu_output = b"""processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 60 model name : Intel(R) Core(TM) i7-4810MQ CPU @ 2.80GHz stepping : 3 microcode : 0x24 cpu MHz : 1766.058 cache size : 6144 KB physical id : 0 siblings : 8 core id : 0 cpu cores : 4 apicid : 0 initial apicid : 0 fpu : yes fpu_exception : yes cpuid level : 13 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt ibpb ibrs stibp dtherm ida arat pln pts bugs : cpu_meltdown spectre_v1 spectre_v2 bogomips : 5586.93 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: """ with unittest.mock.patch('builtins.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_vendor_name(), "intel")
def get_kvm_module_list(): if ARCH == 'x86_64': host_cpu_type = cpu.get_cpu_vendor_name() return ["kvm", "kvm-%s" % host_cpu_type] elif ARCH in ('ppc64', 'ppc64le'): # FIXME: Please correct it if anyone still want to use KVM-PR mode return ["kvm", "kvm-hv"] elif ARCH in ('s390', 's390x'): return ["kvm"] elif ARCH == "aarch64": return []
def get_kvm_module_list(): if ARCH == 'x86_64': vendor = cpu.get_vendor() if hasattr( cpu, 'get_vendor') else cpu.get_cpu_vendor_name() return ["kvm", "kvm-%s" % vendor] elif ARCH in ('ppc64', 'ppc64le'): # FIXME: Please correct it if anyone still want to use KVM-PR mode return ["kvm", "kvm-hv"] elif ARCH in ('s390', 's390x', 'mips64', 'loongarch64'): return ["kvm"] elif ARCH == "aarch64": return []
def test_cpu_vendor_power9(self): cpu_output = b"""processor : 20 cpu : POWER9 (raw), altivec supported clock : 2050.000000MHz revision : 1.0 (pvr 004e 0100) timebase : 512000000 platform : PowerNV model : 8375-42A machine : PowerNV 8375-42A firmware : OPAL """ with unittest.mock.patch('builtins.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_vendor_name(), "power9")
def test_cpu_vendor_power8(self): cpu_output = b"""processor : 88 cpu : POWER8E (raw), altivec supported clock : 3325.000000MHz revision : 2.1 (pvr 004b 0201) timebase : 512000000 platform : PowerNV model : 8247-21L machine : PowerNV 8247-21L firmware : OPAL v3 """ with unittest.mock.patch('builtins.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_vendor_name(), "power8")
def run(test, params, env): """ [seabios] seabios support IOMMU for virtio-blk [seabios] seabios support IOMMU for virtio-scsi [seabios] seabios support IOMMU for virtio-net this case will: 1) Boot guest with virtio devices and iommu is on. 2) Check 'info block'. 3) Read and write data on data disks. 4) Ping guest. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ if cpu.get_cpu_vendor_name() != 'intel': test.cancel("This case only support Intel platform") vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) check_data_disks(test, params, env, vm, session) session.close() error_context.context("Ping guest!", logging.info) guest_ip = vm.get_address() status, output = utils_test.ping(guest_ip, count=10, timeout=20) if status: test.fail("Ping guest failed!") ratio = utils_test.get_loss_ratio(output) if ratio != 0: test.fail("Loss ratio is %s", ratio) error_context.context("Check kernel crash message!", logging.info) vm.verify_kernel_crash()
def run(test, params, env): """ ept test: 1) Turn off ept on host 2) Check if reading kvm_intel parameter crash host 3) Launch a guest 3) Check no error in guest 4) Restore env, turn on ept :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ if cpu.get_cpu_vendor_name() != 'intel': test.cancel("This test is supposed to run on Intel host") unload_cmd = params["unload_cmd"] load_cmd = params["load_cmd"] read_cmd = params["read_cmd"] ept_value = process.system_output(read_cmd % "ept") try: process.system(unload_cmd) process.system(load_cmd % "0") process.system(read_cmd % "vmentry_l1d_flush") params["start_vm"] = "yes" vm = env.get_vm(params["main_vm"]) env_process.preprocess_vm(test, params, env, vm.name) timeout = float(params.get("login_timeout", 240)) vm.wait_for_login(timeout=timeout) vm.verify_kernel_crash() finally: vm.destroy() process.system(unload_cmd) process.system(load_cmd % ept_value)
def run(test, params, env): """ Boot guest with iommu_platform, then do ping test 1) Boot a VM with iommu_platform=on 2) add intel_iommu=on in guest kernel line 3) reboot guest 4) do ping test :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ if cpu.get_cpu_vendor_name() != 'intel': test.cancel("This case only support Intel platform") login_timeout = int(params.get("login_timeout", 360)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) ping_count = int(params.get("ping_count", 10)) guest_ip = vm.get_address() try: status, output = utils_test.ping(guest_ip, ping_count, timeout=float(ping_count) * 1.5) if status != 0: test.fail("Ping returns non-zero value %s" % output) package_lost = utils_test.get_loss_ratio(output) if package_lost != 0: test.fail("%s packeage lost when ping guest ip %s " % (package_lost, guest_ip)) finally: session.close()
def run(test, params, env): """ Qemu reboot test: 1) Get cpu model lists supported by host 2) Check if current cpu model is in the supported lists, if no, cancel test 3) Otherwise, boot guest with the cpu model 4) Check cpu model name in guest 5) Check cpu flags in guest(only for linux guest) 6) Reboot guest :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ qemu_binary = utils_misc.get_qemu_binary(params) qmp_cmds = [ '{"execute": "qmp_capabilities"}', '{"execute": "query-cpu-definitions", "id": "RAND91"}', '{"execute": "quit"}' ] cmd = "echo -e '{0}' | {1} -qmp stdio -vnc none -M none | grep return |"\ "grep RAND91".format(r"\n".join(qmp_cmds), qemu_binary) output = decode_to_text( process.system_output(cmd, timeout=10, ignore_status=True, shell=True, verbose=False)) out = json.loads(output)["return"] model = params["model"] model_pattern = params["model_pattern"] flags = params["flags"] if cpu.get_cpu_vendor_name() == 'intel': model_ib = "%s-IBRS" % model flag_ib = " ibpb ibrs" name_ib = ", IBRS( update)?" else: model_ib = "%s-IBPB" % model flag_ib = " ibpb" name_ib = " \\(with IBPB\\)" models = [x["name"] for x in out if not x["unavailable-features"]] if model_ib in models: cpu_model = model_ib guest_model = model_pattern % name_ib flags += flag_ib elif model in models: cpu_model = model guest_model = model_pattern % "" else: test.cancel("This host doesn't support cpu model %s" % model) params["cpu_model"] = cpu_model params["start_vm"] = "yes" vm_name = params['main_vm'] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) error_context.context("Try to log into guest", logging.info) session = vm.wait_for_login() error_context.context("Check cpu model inside guest", logging.info) cmd = params["get_model_cmd"] out = session.cmd_output(cmd) if not re.search(guest_model, out): test.fail("Guest cpu model is not right") if params["os_type"] == "linux": error_context.context("Check cpu flags inside guest", logging.info) cmd = params["check_flag_cmd"] out = session.cmd_output(cmd).split() missing = [f for f in flags.split() if f not in out] if missing: test.fail("Flag %s not in guest" % missing) if params.get("reboot_method"): error_context.context("Reboot guest '%s'." % vm.name, logging.info) vm.reboot(session=session) vm.verify_kernel_crash() session.close()
def set_condition(vm_name, condn, reset=False, guestbt=None): """ Set domain to given state or reset it. """ bt = None if not reset: if condn == "avocado_test": testlist = utils_test.get_avocadotestlist(params) bt = utils_test.run_avocado_bg(vm, params, test, testlist) if not bt: test.cancel("guest stress failed to start") # Allow stress to start time.sleep(condn_sleep_sec) return bt elif condn == "stress": utils_test.load_stress("stress_in_vms", params=params, vms=[vm]) elif condn in ["save", "managedsave"]: # No action pass elif condn == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "hotplug": result = virsh.setvcpus(vm_name, max_vcpu, "--live", ignore_status=True, debug=True) libvirt.check_exit_status(result) exp_vcpu = { 'max_config': max_vcpu, 'max_live': max_vcpu, 'cur_config': current_vcpu, 'cur_live': max_vcpu, 'guest_live': max_vcpu } result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live") elif condn == "host_smt": if cpuutil.get_cpu_vendor_name() == 'power9': result = process.run("ppc64_cpu --smt=4", shell=True) else: test.cancel( "Host SMT changes not allowed during guest live") else: logging.debug("No operation for the domain") else: if condn == "save": save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) time.sleep(condn_sleep_sec) if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) os.remove(save_file) else: test.error("No save file for domain restore") elif condn == "managedsave": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) time.sleep(condn_sleep_sec) result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "suspend": result = virsh.resume(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "avocado_test": guestbt.join() elif condn == "stress": utils_test.unload_stress("stress_in_vms", params=params, vms=[vm]) elif condn == "hotplug": result = virsh.setvcpus(vm_name, current_vcpu, "--live", ignore_status=True, debug=True) libvirt.check_exit_status(result) exp_vcpu = { 'max_config': max_vcpu, 'max_live': current_vcpu, 'cur_config': current_vcpu, 'cur_live': current_vcpu, 'guest_live': current_vcpu } result = cpu.check_vcpu_value(vm, exp_vcpu, option="--live") elif condn == "host_smt": result = process.run("ppc64_cpu --smt=2", shell=True) # Change back the host smt result = process.run("ppc64_cpu --smt=4", shell=True) # Work around due to known cgroup issue after cpu hot(un)plug # sequence root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset") machine_cpuset_paths = [] if os.path.isdir( os.path.join(root_cpuset_path, "machine.slice")): machine_cpuset_paths.append( os.path.join(root_cpuset_path, "machine.slice")) if os.path.isdir(os.path.join(root_cpuset_path, "machine")): machine_cpuset_paths.append( os.path.join(root_cpuset_path, "machine")) if not machine_cpuset_paths: logging.warning("cgroup cpuset might not recover properly " "for guests after host smt changes, " "restore it manually") root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus") for path in machine_cpuset_paths: machine_cpuset_cpus = os.path.join(path, "cpuset.cpus") # check if file content differs cmd = "diff %s %s" % (root_cpuset_cpus, machine_cpuset_cpus) if process.system(cmd, verbose=True, ignore_status=True): cmd = "cp %s %s" % (root_cpuset_cpus, machine_cpuset_cpus) process.system(cmd, verbose=True) else: logging.debug("No need recover the domain") return bt
def create_host_os_cfg(options): def _forced_or_detected(forced, detected): if forced: return forced else: return detected host_os_cfg_path = data_dir.get_backend_cfg_path( get_opt(options, 'vt.type'), 'host.cfg') with open(host_os_cfg_path, 'w') as cfg: detected = distro.detect() name = host_os_get_distro_name(options, detected) version = _forced_or_detected( get_opt(options, 'vt_host_distro_version'), "m%s" % detected.version) release = _forced_or_detected( get_opt(options, 'vt_host_distro_release'), "u%s" % detected.release) arch = _forced_or_detected(get_opt(options, 'vt_host_distro_arch'), "Host_arch_%s" % detected.arch) vendor = cpu.get_vendor() if hasattr( cpu, 'get_vendor') else cpu.get_cpu_vendor_name() family = None if hasattr(cpu, 'get_family'): try: family = cpu.get_family() except Exception: pass cpu_version = cpu.get_version() if hasattr(cpu, 'get_version') else None # Replace special chars with _ to avoid bootstrap failure cpu_version = re.sub(r'[^\w-]', '_', cpu_version) if cpu_version else cpu_version cfg.write("variants:\n") cfg.write(" - @Host:\n") cfg.write(" variants:\n") cfg.write(" - @%s:\n" % name) cfg.write(" variants:\n") cfg.write(" - @%s:\n" % version) cfg.write(" variants:\n") cfg.write(" - @%s:\n" % release) cfg.write(" variants:\n") cfg.write(" - @%s:\n" % arch) if vendor: cfg.write("variants:\n") cfg.write(" - @HostCpuVendor:\n") cfg.write(" variants:\n") cfg.write(" - @%s:\n" % vendor) if family: cfg.write("variants:\n") cfg.write(" - @HostCpuFamily:\n") cfg.write(" variants:\n") cfg.write(" - @%s:\n" % family) if cpu_version: cfg.write(" variants:\n") cfg.write(" - @HostCpuVersion:\n") cfg.write(" variants:\n") cfg.write(" - @%s:\n" % cpu_version) count = [ get_opt(options, 'vt_host_distro_name'), get_opt(options, 'vt_host_distro_version'), get_opt(options, 'vt_host_distro_release'), get_opt(options, 'vt_host_distro_arch') ].count(None) if count == 4: source = "distro detection" elif count == 0: source = "command line parameters" else: source = "distro detection and command line parameters" LOG.debug("Config file %s generated from %s", host_os_cfg_path, source)
def get_kvm_module_list(): if ARCH == 'x86_64': host_cpu_type = cpu.get_cpu_vendor_name() return ["kvm", "kvm-%s" % host_cpu_type] elif ARCH in ('ppc64', 'ppc64le'): return ["kvm"]
def run(test, params, env): """ Qemu reboot test: 1) Start qemu to get cpu model supported by host 3) Boot guest with the cpu model 4) Check cpu model name in guest 5) Check cpu flags in guest(only for linux guest) 6) Reboot guest :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ error_context.context("Start qemu to get support cpu model", logging.info) vm = env.get_vm(params["main_vm"]) out = vm.monitor.info("cpu-definitions") vm.destroy() model = params["model"] model_pattern = params["model_pattern"] flags = params["flags"] if cpu.get_cpu_vendor_name() == 'intel': model_ib = "%s-IBRS" % model flag_ib = " ibpb ibrs" name_ib = ", IBRS( update)?" else: model_ib = "%s-IBPB" % model flag_ib = " ibpb" name_ib = " \\(with IBPB\\)" models = [x["name"] for x in out if not x["unavailable-features"]] if model_ib in models: cpu_model = model_ib guest_model = model_pattern % name_ib flags += flag_ib elif model in models: cpu_model = model guest_model = model_pattern % "" else: test.cancel("This host doesn't support cpu model %s" % model) params["paused_after_start_vm"] = "no" params["cpu_model"] = cpu_model vm.create(params=params) vm.verify_alive() error_context.context("Try to log into guest", logging.info) session = vm.wait_for_login() error_context.context("Check cpu model inside guest", logging.info) cmd = params["get_model_cmd"] out = session.cmd_output(cmd) if not re.search(guest_model, out): test.fail("Guest cpu model is not right") if params["os_type"] == "linux": error_context.context("Check cpu flags inside guest", logging.info) cmd = params["check_flag_cmd"] out = session.cmd_output(cmd).split() missing = [f for f in flags.split() if f not in out] if missing: test.fail("Flag %s not in guest" % missing) if params.get("reboot_method"): error_context.context("Reboot guest '%s'." % vm.name, logging.info) vm.reboot(session=session) vm.verify_kernel_crash() session.close()