def setUp(self): """ Get the number of cores and threads per core Set the SMT value to 4/8 """ if 'ppc' not in platform.processor(): self.cancel("Processor is not ppc64") self.nfail = 0 self.CORES = process.system_output( "lscpu | grep 'Core(s) per socket:'" "| awk '{print $4}'", shell=True) self.SOCKETS = process.system_output( "lscpu | grep 'Socket(s):'" "| awk '{print $2}'", shell=True) self.THREADS = process.system_output( "lscpu | grep 'Thread(s) per core" ":'| awk '{print $4}'", shell=True) self.T_CORES = int(self.CORES) * int(self.SOCKETS) self.log.info(" Cores = %s and threads = %s " % (self.T_CORES, self.THREADS)) process.system("echo 8 > /proc/sys/kernel/printk", shell=True, ignore_status=True) self.max_smt = 4 if cpu.get_cpu_arch().lower() == 'power8': self.max_smt = 8 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt = 2 process.system_output("ppc64_cpu --smt=%s" % self.max_smt, shell=True) self.path = "/sys/devices/system/cpu"
def setUp(self): """ Verifies if powerpc-utils is installed, and gets current SMT value. """ if 'ppc' not in distro.detect().arch: self.cancel("Processor is not ppc64") if SoftwareManager().check_installed("powerpc-utils") is False: if SoftwareManager().install("powerpc-utils") is False: self.cancel("powerpc-utils is not installing") self.smt_str = "ppc64_cpu --smt" smt_op = process.system_output(self.smt_str, shell=True) if "is not SMT capable" in smt_op: self.cancel("Machine is not SMT capable") if "Inconsistent state" in smt_op: self.cancel("Machine has mix of ST and SMT cores") self.curr_smt = smt_op.strip().split("=")[-1].split()[-1] self.smt_subcores = 0 if os.path.exists("/sys/devices/system/cpu/subcores_per_core"): self.smt_subcores = 1 self.failures = 0 self.failure_message = "\n" self.smt_values = {1: "off"} self.key = 0 self.value = "" self.max_smt_value = 4 if cpu.get_cpu_arch().lower() == 'power9': if 'Hash' in genio.read_file('/proc/cpuinfo').rstrip('\t\r\n\0'): self.max_smt_value = 8 if cpu.get_cpu_arch().lower() == 'power8': self.max_smt_value = 8 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt_value = 2
def setUp(self): """ Get the number of cores and threads per core Set the SMT value to 4/8 """ if 'ppc' not in platform.processor(): self.cancel("Processor is not ppc64") self.nfail = 0 self.CORES = process.system_output("lscpu | grep 'Core(s) per socket:'" "| awk '{print $4}'", shell=True) self.SOCKETS = process.system_output("lscpu | grep 'Socket(s):'" "| awk '{print $2}'", shell=True) self.THREADS = process.system_output("lscpu | grep 'Thread(s) per core" ":'| awk '{print $4}'", shell=True) self.T_CORES = int(self.CORES) * int(self.SOCKETS) self.log.info(" Cores = %s and threads = %s " % (self.T_CORES, self.THREADS)) process.system("echo 8 > /proc/sys/kernel/printk", shell=True, ignore_status=True) self.max_smt = 4 if cpu.get_cpu_arch().lower() == 'power8': self.max_smt = 8 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt = 2 process.system_output("ppc64_cpu --smt=%s" % self.max_smt, shell=True) self.path = "/sys/devices/system/cpu"
def setUp(self): """ Get the number of cores and threads per core Set the SMT value to 4/8 """ if distro.detect().arch not in ['ppc64', 'ppc64le']: self.cancel("Only supported in powerpc system") self.loop = int(self.params.get('test_loop', default=100)) self.nfail = 0 self.CORES = process.system_output( "lscpu | grep 'Core(s) per socket:'" "| awk '{print $4}'", shell=True) self.SOCKETS = process.system_output( "lscpu | grep 'Socket(s):'" "| awk '{print $2}'", shell=True) self.THREADS = process.system_output( "lscpu | grep 'Thread(s) per core" ":'| awk '{print $4}'", shell=True) self.T_CORES = int(self.CORES) * int(self.SOCKETS) self.log.info(" Cores = %s and threads = %s " % (self.T_CORES, self.THREADS)) genio.write_one_line('/proc/sys/kernel/printk', "8") self.max_smt = 4 if cpu.get_cpu_arch().lower() == 'power8': self.max_smt = 8 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt = 2 process.system("ppc64_cpu --smt=%s" % self.max_smt, shell=True) self.path = "/sys/devices/system/cpu"
def setUp(self): """ Verifies if powerpc-utils is installed, and gets current SMT value. """ command = "uname -p" if 'ppc' not in process.system_output(command, ignore_status=True): self.cancel("Processor is not ppc64") if SoftwareManager().check_installed("powerpc-utils") is False: if SoftwareManager().install("powerpc-utils") is False: self.cancel("powerpc-utils is not installing") if "is not SMT capable" in process.system_output("ppc64_cpu --smt"): self.cancel("Machine is not SMT capable") self.curr_smt = process.system_output("ppc64_cpu --smt | awk -F'=' \ '{print $NF}' | awk '{print $NF}'", shell=True) self.smt_subcores = 0 if os.path.exists("/sys/devices/system/cpu/subcores_per_core"): self.smt_subcores = 1 self.failures = 0 self.failure_message = "\n" self.smt_values = {1: "off"} self.key = 0 self.value = "" self.max_smt_value = 4 if cpu.get_cpu_arch().lower() == 'power8': self.max_smt_value = 8 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt_value = 2
def setUp(self): """ Verifies if powerpc-utils is installed, and gets current SMT value. """ if 'ppc' not in distro.detect().arch: self.cancel("Processor is not ppc64") if SoftwareManager().check_installed("powerpc-utils") is False: if SoftwareManager().install("powerpc-utils") is False: self.cancel("powerpc-utils is not installing") self.smt_str = "ppc64_cpu --smt" smt_op = process.system_output(self.smt_str, shell=True) if "is not SMT capable" in smt_op: self.cancel("Machine is not SMT capable") if "Inconsistent state" in smt_op: self.cancel("Machine has mix of ST and SMT cores") self.curr_smt = smt_op.strip().split("=")[-1].split()[-1] self.smt_subcores = 0 if os.path.exists("/sys/devices/system/cpu/subcores_per_core"): self.smt_subcores = 1 self.failures = 0 self.failure_message = "\n" self.smt_values = {1: "off"} self.key = 0 self.value = "" self.max_smt_value = 4 if cpu.get_cpu_arch().lower() == 'power8': self.max_smt_value = 8 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt_value = 2
def setUp(self): """ Build 'HTX'. """ if 'ppc64' not in process.system_output('uname -a', shell=True): self.cancel("Supported only on Power Architecture") detected_distro = distro.detect() self.mdt_file = self.params.get('mdt_file', default='mdt.mem') self.time_limit = int(self.params.get('time_limit', default=2)) * 3600 self.smt = self.params.get('smt_change', default=False) packages = ['git', 'gcc', 'make'] if detected_distro.name in ['centos', 'fedora', 'rhel', 'redhat']: packages.extend(['gcc-c++', 'ncurses-devel', 'tar']) elif detected_distro.name == "Ubuntu": packages.extend( ['libncurses5', 'g++', 'ncurses-dev', 'libncurses-dev']) elif detected_distro.name == 'SuSE': packages.extend(['libncurses5', 'gcc-c++', 'ncurses-devel', 'tar']) else: self.cancel("Test not supported in %s" % detected_distro.name) smm = SoftwareManager() for pkg in packages: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Can not install %s" % pkg) url = "https://github.com/open-power/HTX/archive/master.zip" tarball = self.fetch_asset("htx.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) htx_path = os.path.join(self.teststmpdir, "HTX-master") os.chdir(htx_path) exercisers = ["hxecapi_afu_dir", "hxedapl", "hxecapi", "hxeocapi"] for exerciser in exercisers: process.run("sed -i 's/%s//g' %s/bin/Makefile" % (exerciser, htx_path)) build.make(htx_path, extra_args='all') build.make(htx_path, extra_args='tar') process.run('tar --touch -xvzf htx_package.tar.gz') os.chdir('htx_package') if process.system('./installer.sh -f'): self.fail("Installation of htx fails:please refer job.log") if self.smt: self.max_smt_value = 8 if cpu.get_cpu_arch().lower() == 'power7': self.max_smt_value = 4 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt_value = 2 self.smt_values = ["off", "on"] for i in range(2, self.max_smt_value + 1): self.smt_values.append(str(i)) self.curr_smt = process.system_output( "ppc64_cpu --smt | awk -F'=' \ '{print $NF}' | awk '{print $NF}'", shell=True)
def setUp(self): """ Build 'HTX'. """ if 'ppc64' not in process.system_output('uname -a', shell=True): self.cancel("Platform does not supports") if distro.detect().name != 'Ubuntu': self.cancel("Distro does not support") self.mdt_file = self.params.get('mdt_file', default='mdt.hd') self.time_limit = int(self.params.get('time_limit', default=2)) * 3600 self.block_devices = self.params.get('disk', default=None) if self.block_devices is None: self.cancel("Needs the block devices to run the HTX") self.block_device = [] for disk in self.block_devices.split(): self.block_device.append(disk.rsplit("/")[-1]) self.block_device = " ".join(self.block_device) packages = [ 'git', 'gcc', 'make', 'libncurses5', 'g++', 'libdapl-dev', 'ncurses-dev', 'libncurses-dev', 'libcxl-dev' ] smm = SoftwareManager() for pkg in packages: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Can not install %s" % pkg) url = "https://github.com/open-power/HTX/archive/master.zip" tarball = self.fetch_asset("htx.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) htx_path = os.path.join(self.teststmpdir, "HTX-master") os.chdir(htx_path) build.run_make(htx_path, extra_args='all') build.run_make(htx_path, extra_args='deb') process.run('dpkg -r htxubuntu') process.run('dpkg --purge htxubuntu') process.run('dpkg -i htxubuntu.deb') if not os.path.exists("/usr/lpp/htx/mdt/%s" % self.mdt_file): self.cancel("MDT file %s not found" % self.mdt_file) self.smt = self.params.get('smt_change', default=False) if self.smt: self.max_smt_value = 8 if cpu.get_cpu_arch().lower() == 'power7': self.max_smt_value = 4 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt_value = 2 self.smt_values = ["off", "on"] for i in range(2, self.max_smt_value + 1): self.smt_values.append(str(i)) self.curr_smt = process.system_output( "ppc64_cpu --smt | awk -F'=' \ '{print $NF}' | awk '{print $NF}'", shell=True)
def setUp(self): """ Build 'HTX'. """ if 'ppc64' not in process.system_output('uname -a', shell=True): self.cancel("Platform does not supports") if distro.detect().name != 'Ubuntu': self.cancel("Distro does not support") self.mdt_file = self.params.get('mdt_file', default='mdt.hd') self.time_limit = int(self.params.get('time_limit', default=2)) * 3600 self.block_devices = self.params.get('disk', default=None) if self.block_devices is None: self.cancel("Needs the block devices to run the HTX") self.block_device = [] for disk in self.block_devices.split(): self.block_device.append(disk.rsplit("/")[-1]) self.block_device = " ".join(self.block_device) packages = ['git', 'gcc', 'make', 'libncurses5', 'g++', 'libdapl-dev', 'ncurses-dev', 'libncurses-dev', 'libcxl-dev'] smm = SoftwareManager() for pkg in packages: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Can not install %s" % pkg) url = "https://github.com/open-power/HTX/archive/master.zip" tarball = self.fetch_asset("htx.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) htx_path = os.path.join(self.teststmpdir, "HTX-master") os.chdir(htx_path) build.run_make(htx_path, extra_args='all') build.run_make(htx_path, extra_args='deb') process.run('dpkg -r htxubuntu') process.run('dpkg --purge htxubuntu') process.run('dpkg -i htxubuntu.deb') if not os.path.exists("/usr/lpp/htx/mdt/%s" % self.mdt_file): self.cancel("MDT file %s not found" % self.mdt_file) self.smt = self.params.get('smt_change', default=False) if self.smt: self.max_smt_value = 8 if cpu.get_cpu_arch().lower() == 'power7': self.max_smt_value = 4 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt_value = 2 self.smt_values = ["off", "on"] for i in range(2, self.max_smt_value + 1): self.smt_values.append(str(i)) self.curr_smt = process.system_output("ppc64_cpu --smt | awk -F'=' \ '{print $NF}' | awk '{print $NF}'", shell=True)
def setUp(self): """ Build 'HTX'. """ if 'ppc64' not in process.system_output('uname -a', shell=True): self.cancel("Supported only on Power Architecture") detected_distro = distro.detect() self.mdt_file = self.params.get('mdt_file', default='mdt.mem') self.time_limit = int(self.params.get('time_limit', default=2)) * 3600 self.smt = self.params.get('smt_change', default=False) packages = ['git', 'gcc', 'make'] if detected_distro.name in ['centos', 'fedora', 'rhel', 'redhat']: packages.extend(['gcc-c++', 'ncurses-devel', 'dapl-devel', 'libcxl-devel']) elif detected_distro.name == "Ubuntu": packages.extend(['libncurses5', 'g++', 'libdapl-dev', 'ncurses-dev', 'libncurses-dev', 'libcxl-dev']) else: self.cancel("Test not supported in %s" % detected_distro.name) smm = SoftwareManager() for pkg in packages: if not smm.check_installed(pkg) and not smm.install(pkg): self.cancel("Can not install %s" % pkg) url = "https://github.com/open-power/HTX/archive/master.zip" tarball = self.fetch_asset("htx.zip", locations=[url], expire='7d') archive.extract(tarball, self.teststmpdir) htx_path = os.path.join(self.teststmpdir, "HTX-master") os.chdir(htx_path) build.make(htx_path, extra_args='all') build.make(htx_path, extra_args='tar') process.run('tar --touch -xvzf htx_package.tar.gz') os.chdir('htx_package') if process.system('./installer.sh -f'): self.fail("Installation of htx fails:please refer job.log") if self.smt: self.max_smt_value = 8 if cpu.get_cpu_arch().lower() == 'power7': self.max_smt_value = 4 if cpu.get_cpu_arch().lower() == 'power6': self.max_smt_value = 2 self.smt_values = ["off", "on"] for i in range(2, self.max_smt_value + 1): self.smt_values.append(str(i)) self.curr_smt = process.system_output("ppc64_cpu --smt | awk -F'=' \ '{print $NF}' | awk '{print $NF}'", shell=True)
def test(self): """ Validate the number of cpu idle states against device tree """ for var in range(1, 10): cpu_num = random.choice(cpu.cpu_online_list()) self.log.info("--------CPU: %s--------" % cpu_num) states = process.system_output("cpupower -c %s idle-info --silent" " | grep 'Number of idle states:' |" "awk '{print $5}'" % cpu_num, shell=True) cpu_idle_states = [] for i in range(1, int(states)): val = process.system_output("cat /sys/devices/system/cpu/" "cpu%s/cpuidle/state%s/" "name" % (cpu_num, i)) if 'power8' in cpu.get_cpu_arch(): val = self.set_idle_states(val) cpu_idle_states.append(val) devicetree_list = self.read_from_device_tree() res = cmp(cpu_idle_states, devicetree_list) if res == 0: self.log.info("PASS : Validated the idle states") else: self.log.info(" cpupower tool : %s and device tree" ": %s" % (cpu_idle_states, devicetree_list)) self.fail("FAIL: Please check the idle states")
def test_cpu_arch_s390(self): cpu_output = b"""vendor_id : IBM/S390 # processors : 2 bogomips per cpu: 2913.00 max thread id : 0 features : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te sie facilities : 0 1 2 3 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 30 31 32 33 34 35 36 37 40 41 42 43 44 45 46 47 48 49 50 51 52 57 64 65 66 67 68 69 70 71 72 73 75 76 77 78 81 82 131 132 cache0 : level=1 type=Data scope=Private size=96K line_size=256 associativity=6 cache1 : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4 cache2 : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8 cache3 : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8 cache4 : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12 cache5 : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24 processor 0: version = 00, identification = 3FC047, machine = 2827 processor 1: version = 00, identification = 3FC047, machine = 2827 cpu number : 0 cpu MHz dynamic : 5504 cpu MHz static : 5504 cpu number : 1 cpu MHz dynamic : 5504 cpu MHz static : 5504 """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "s390")
def test_cpu_arch_x86_64(self): cpu_output = u"""processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 60 model name : Intel(R) Core(TM) i7-4810MQ CPU @ 2.80GHz stepping : 3 microcode : 0x24 cpu MHz : 1766.058 cache size : 6144 KB physical id : 0 siblings : 8 core id : 0 cpu cores : 4 apicid : 0 initial apicid : 0 fpu : yes fpu_exception : yes cpuid level : 13 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt ibpb ibrs stibp dtherm ida arat pln pts bugs : cpu_meltdown spectre_v1 spectre_v2 bogomips : 5586.93 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "x86_64")
def test_cpu_arch_s390(self): cpu_output = u"""vendor_id : IBM/S390 # processors : 2 bogomips per cpu: 2913.00 max thread id : 0 features : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te sie facilities : 0 1 2 3 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 30 31 32 33 34 35 36 37 40 41 42 43 44 45 46 47 48 49 50 51 52 57 64 65 66 67 68 69 70 71 72 73 75 76 77 78 81 82 131 132 cache0 : level=1 type=Data scope=Private size=96K line_size=256 associativity=6 cache1 : level=1 type=Instruction scope=Private size=64K line_size=256 associativity=4 cache2 : level=2 type=Data scope=Private size=1024K line_size=256 associativity=8 cache3 : level=2 type=Instruction scope=Private size=1024K line_size=256 associativity=8 cache4 : level=3 type=Unified scope=Shared size=49152K line_size=256 associativity=12 cache5 : level=4 type=Unified scope=Shared size=393216K line_size=256 associativity=24 processor 0: version = 00, identification = 3FC047, machine = 2827 processor 1: version = 00, identification = 3FC047, machine = 2827 cpu number : 0 cpu MHz dynamic : 5504 cpu MHz static : 5504 cpu number : 1 cpu MHz dynamic : 5504 cpu MHz static : 5504 """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "s390")
def test_cpu_arch_x86_64(self): cpu_output = b"""processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 60 model name : Intel(R) Core(TM) i7-4810MQ CPU @ 2.80GHz stepping : 3 microcode : 0x24 cpu MHz : 1766.058 cache size : 6144 KB physical id : 0 siblings : 8 core id : 0 cpu cores : 4 apicid : 0 initial apicid : 0 fpu : yes fpu_exception : yes cpuid level : 13 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt ibpb ibrs stibp dtherm ida arat pln pts bugs : cpu_meltdown spectre_v1 spectre_v2 bogomips : 5586.93 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "x86_64")
def test_cpu_arch_i386(self): cpu_output = b"""processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 13 model name : Intel(R) Pentium(R) M processor 2.00GHz stepping : 8 microcode : 0x20 cpu MHz : 2000.000 cache size : 2048 KB physical id : 0 siblings : 1 core id : 0 cpu cores : 1 apicid : 0 initial apicid : 0 fdiv_bug : no f00f_bug : no coma_bug : no fpu : yes fpu_exception : yes cpuid level : 2 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov clflush dts acpi mmx fxsr sse sse2 ss tm pbe nx bts cpuid est tm2 bugs : cpu_meltdown spectre_v1 spectre_v2 bogomips : 3990.09 clflush size : 64 cache_alignment : 64 address sizes : 32 bits physical, 32 bits virtual power management: """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "i386")
def test_cpu_arch_i386(self): cpu_output = u"""processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 13 model name : Intel(R) Pentium(R) M processor 2.00GHz stepping : 8 microcode : 0x20 cpu MHz : 2000.000 cache size : 2048 KB physical id : 0 siblings : 1 core id : 0 cpu cores : 1 apicid : 0 initial apicid : 0 fdiv_bug : no f00f_bug : no coma_bug : no fpu : yes fpu_exception : yes cpuid level : 2 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov clflush dts acpi mmx fxsr sse sse2 ss tm pbe nx bts cpuid est tm2 bugs : cpu_meltdown spectre_v1 spectre_v2 bogomips : 3990.09 clflush size : 64 cache_alignment : 64 address sizes : 32 bits physical, 32 bits virtual power management: """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "i386")
def setUp(self): """ Build 'ndctl' and setup the binary. """ if "powerpc" not in cpu.get_cpu_arch(): self.cancel("Test supported only on POWER arch") deps = [] self.dist = distro.detect() self.package = self.params.get('package', default='distro') if self.dist.name not in ['SuSE', 'rhel']: self.cancel('Unsupported OS %s' % self.dist.name) self.smm = SoftwareManager() if self.package == 'upstream': deps.extend(['gcc', 'make', 'automake', 'autoconf']) if self.dist.name == 'SuSE': deps.extend(['libtool', 'libkmod-devel', 'libudev-devel', 'systemd-devel', 'libuuid-devel-static', 'libjson-c-devel', 'keyutils-devel', 'kmod-bash-completion']) elif self.dist.name == 'rhel': deps.extend(['libtool', 'kmod-devel', 'libuuid-devel', 'json-c-devel', 'systemd-devel', 'keyutils-libs-devel', 'jq', 'parted', 'libtool']) for pkg in deps: if not self.smm.check_installed(pkg) and not \ self.smm.install(pkg): self.cancel('%s is needed for the test to be run' % pkg) locations = ["https://github.com/pmem/ndctl/archive/master.zip"] tarball = self.fetch_asset("ndctl.zip", locations=locations, expire='7d') archive.extract(tarball, self.teststmpdir) os.chdir("%s/ndctl-master" % self.teststmpdir) process.run('./autogen.sh', sudo=True, shell=True) process.run("./configure CFLAGS='-g -O2' --prefix=/usr " "--disable-docs " "--sysconfdir=/etc --libdir=" "/usr/lib64", shell=True, sudo=True) build.make(".") self.ndctl = os.path.abspath('./ndctl/ndctl') self.daxctl = os.path.abspath('./daxctl/daxctl') else: deps.extend(['ndctl']) if self.dist.name == 'rhel': deps.extend(['daxctl']) for pkg in deps: if not self.smm.check_installed(pkg) and not \ self.smm.install(pkg): self.cancel('%s is needed for the test to be run' % pkg) self.ndctl = 'ndctl' self.daxctl = 'daxctl' self.plib = pmem.PMem(self.ndctl, self.daxctl) if not self.plib.check_buses(): self.cancel("Test needs atleast one region")
def setUp(self): """ Setup checks : 0. Processor should be ppc64. 1. Perf package 2. 24x7 is present 3. Performance measurement is enabled in lpar through BMC """ smm = SoftwareManager() detected_distro = distro.detect() processor = process.system_output("uname -m", ignore_status=True).decode("utf-8") if 'ppc' not in processor: if 'unknown' in processor and 'ppc' not in os.uname(): self.cancel("Processor is not ppc64") deps = ['gcc', 'make'] if 'Ubuntu' in detected_distro.name: deps.extend( ['linux-tools-common', 'linux-tools-%s' % platform.uname()[2]]) elif detected_distro.name in ['debian']: deps.extend(['linux-perf', 'perf-tools-unstable']) elif detected_distro.name in ['rhel', 'SuSE', 'fedora', 'centos']: deps.extend(['perf']) else: self.cancel("Install the package for perf supported by %s" % detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) self.cpu_arch = cpu.get_cpu_arch().lower() self.perf_args = "perf stat -v -C 0 -e" if self.cpu_arch == 'power8': self.perf_stat = "%s hv_24x7/HPM_0THRD_NON_IDLE_CCYC" % self.perf_args if self.cpu_arch == 'power9': self.perf_stat = "%s hv_24x7/CPM_TLBIE" % self.perf_args self.event_sysfs = "/sys/bus/event_source/devices/hv_24x7" # Check if 24x7 is present if os.path.exists("%s" % self.event_sysfs): self.log.info('hv_24x7 present') else: self.cancel("%s doesn't exist.This feature is supported" " only on LPAR" % self.event_sysfs) # Performance measurement has to be enabled in lpar through BMC # Check if its enabled result_perf = process.run("%s,domain=2,core=1/ sleep 1" % self.perf_stat, ignore_status=True) if "not supported" in result_perf.stderr.decode("utf-8"): self.cancel("Please enable lpar to allow collecting" " the 24x7 counters info") if "You may not have permission to collect stats." in result_perf.stderr.decode( "utf-8"): self.cancel("Please enable lpar to allow collecting" " the 24x7 counters info")
def test_cpu_arch_risc_v(self): cpu_output = b"""hart : 1 isa : rv64imafdc mmu : sv39 uarch : sifive,rocket0 """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "riscv")
def test_cpu_arch_risc_v(self): cpu_output = b"""hart : 1 isa : rv64imafdc mmu : sv39 uarch : sifive,rocket0 """ with unittest.mock.patch('builtins.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "riscv")
def test_cpu_arch_risc_v(self): cpu_output = u"""hart : 1 isa : rv64imafdc mmu : sv39 uarch : sifive,rocket0 """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "riscv")
def setUp(self): """ Setup checks : 0. Processor should be ppc64. 1. Perf package 2. 24x7 is not supported on guest 3. 24x7 is present 4. Performance measurement is enabled in lpar through BMC """ smm = SoftwareManager() detected_distro = distro.detect() if 'ppc' not in process.system_output("uname -p", ignore_status=True): self.cancel("Processor is not ppc64") deps = ['gcc', 'make'] if 'Ubuntu' in detected_distro.name: deps.extend(['linux-tools-common', 'linux-tools-%s' % platform.uname()[2]]) elif detected_distro.name in ['rhel', 'SuSE', 'fedora', 'centos']: deps.extend(['perf']) else: self.cancel("Install the package for perf supported by %s" % detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) self.perf_args = "perf stat -v -C 0 -e" self.perf_stat = "%s hv_24x7/HPM_0THRD_NON_IDLE_CCYC" % self.perf_args self.event_sysfs = "/sys/bus/event_source/devices/hv_24x7" self.cpu_arch = cpu.get_cpu_arch().lower() # Check if this is a guest # 24x7 is not suported on guest cpu_output = process.run("cat /proc/cpuinfo") if "emulated by" in cpu_output.stdout: self.cancel("This test is not supported on guest") # Check if 24x7 is present if os.path.exists("%s" % self.event_sysfs): self.log.info('hv_24x7 present') else: self.cancel("%s doesn't exist.This feature is supported" "on only lpar" % self.event_sysfs) # Performance measurement has to be enabled in lpar through BMC # Check if its enabled # Refer https://bugzilla.linux.ibm.com/show_bug.cgi?id=139404#c21 result_perf = process.run("%s,domain=2,core=1/ sleep 1" % self.perf_stat, ignore_status=True) if "You may not have permission to collect\ stats" in result_perf.stderr: self.cancel("Please enable lpar to allow collecting" "the 24x7 counters info")
def test_cpu_arch_arm_v8(self): cpu_output = b"""processor : 0 BogoMIPS : 200.00 Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid CPU implementer : 0x43 CPU architecture: 8 CPU variant : 0x1 CPU part : 0x0a1 CPU revision : 1 """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "aarch64")
def setUp(self): smg = SoftwareManager() self.cpu_arch = cpu.get_cpu_arch().lower() self.dist = distro.detect() if self.dist.name in ['centos', 'fedora', 'rhel', 'SuSE']: pkgs = ['pcp', 'pcp-pmda-perfevent'] else: self.cancel("PCP is not supported on %s" % self.dist.name) for pkg in pkgs: if not smg.check_installed(pkg) and not smg.install(pkg): self.cancel("Package %s is missing/could not be installed" % pkg)
def test_cpu_arch_arm_v8(self): cpu_output = b"""processor : 0 BogoMIPS : 200.00 Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid CPU implementer : 0x43 CPU architecture: 8 CPU variant : 0x1 CPU part : 0x0a1 CPU revision : 1 """ with unittest.mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "aarch64")
def test_cpu_arch_ppc64_le_power9(self): cpu_output = u"""processor : 20 cpu : POWER9 (raw), altivec supported clock : 2050.000000MHz revision : 1.0 (pvr 004e 0100) timebase : 512000000 platform : PowerNV model : 8375-42A machine : PowerNV 8375-42A firmware : OPAL """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "power9")
def test_cpu_arch_ppc64_le_power8(self): cpu_output = b"""processor : 88 cpu : POWER8E (raw), altivec supported clock : 3325.000000MHz revision : 2.1 (pvr 004b 0201) timebase : 512000000 platform : PowerNV model : 8247-21L machine : PowerNV 8247-21L firmware : OPAL v3 """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "power8")
def test_cpu_arch_ppc64_le_power9(self): cpu_output = b"""processor : 20 cpu : POWER9 (raw), altivec supported clock : 2050.000000MHz revision : 1.0 (pvr 004e 0100) timebase : 512000000 platform : PowerNV model : 8375-42A machine : PowerNV 8375-42A firmware : OPAL """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "power9")
def test_cpu_arch_ppc64_le_power8(self): cpu_output = u"""processor : 88 cpu : POWER8E (raw), altivec supported clock : 3325.000000MHz revision : 2.1 (pvr 004b 0201) timebase : 512000000 platform : PowerNV model : 8247-21L machine : PowerNV 8247-21L firmware : OPAL v3 """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "power8")
def test_cpu_arch_arm_v7(self): cpu_output = b"""Processor : ARMv7 Processor rev 2 (v7l) BogoMIPS : 994.65 Features : swp half thumb fastmult vfp edsp thumbee neon vfpv3 CPU implementer : 0x41 CPU architecture: 7 CPU variant : 0x2 CPU part : 0xc08 CPU revision : 2 Hardware : herring Revision : 0034 Serial : 3534268a5e0700ec """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "arm")
def test_cpu_arch_arm_v7(self): cpu_output = u"""Processor : ARMv7 Processor rev 2 (v7l) BogoMIPS : 994.65 Features : swp half thumb fastmult vfp edsp thumbee neon vfpv3 CPU implementer : 0x41 CPU architecture: 7 CPU variant : 0x2 CPU part : 0xc08 CPU revision : 2 Hardware : herring Revision : 0034 Serial : 3534268a5e0700ec """ with mock.patch('avocado.utils.cpu.open', return_value=self._get_file_mock(cpu_output)): self.assertEqual(cpu.get_cpu_arch(), "arm")
def setUp(self): ''' Install the basic packages to support perf ''' # Check for basic utilities smm = SoftwareManager() detected_distro = distro.detect() distro_name = detected_distro.name if detected_distro.arch != 'ppc64le': self.cancel('This test is not supported on %s architecture' % detected_distro.arch) if cpu.get_cpu_arch().lower() == 'power8': self.cancel('This test not applies to Power8') if 'PowerNV' not in genio.read_file('/proc/cpuinfo').rstrip( '\t\r\n\0'): self.cancel('This test applies only to PowerNV') deps = ['gcc', 'make'] if 'Ubuntu' in distro_name: deps.extend( ['linux-tools-common', 'linux-tools-%s' % platform.uname()[2]]) elif distro_name in ['rhel', 'SuSE', 'fedora', 'centos']: deps.extend(['perf']) else: self.cancel("Install the package for perf supported \ by %s" % detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) # Collect nest events self.list_of_nest_events = [] for line in process.get_perf_events('nest_'): line = line.split(' ')[2] if 'pm_nest' in line: continue self.list_of_nest_events.append(line) # Clear the dmesg, by that we can capture the delta at the end of the test. process.run("dmesg -c", sudo=True)
def run(test, params, env): """ Different cpu compat mode scenario tests :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def check_feature(vm, feature="", vcpu=0): """ Checks the given feature is present :param vm: VM Name :param feature: feature to be verified :param vcpu: vcpu number to pin guest test :return: true on success, test fail on failure """ session = vm.wait_for_login() if 'power8' in feature: cmd = 'lscpu|grep -i "Model name:.*power8"' elif 'xive' in feature: # remove -v once guest xive support is available # right now power9 guest supports only xics cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible" elif 'xics' in feature: cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible" elif 'power9' in feature: cmd = 'lscpu|grep -i "Model name:.*power9"' elif 'hpt' in feature: cmd = 'grep "MMU.*: Hash" /proc/cpuinfo' elif 'rpt' in feature: cmd = 'grep "MMU.*: Radix" /proc/cpuinfo' elif 'isa' in feature: utils_package.package_install('gcc', session) cmd = "echo 'int main(){asm volatile (\".long 0x7c0005e6\");" cmd += "return 0;}' > ~/a.c;cc ~/a.c;taskset -c %s ./a.out" % vcpu status, output = session.cmd_status_output(cmd) logging.debug(output) session.close() if feature != "isa2.7": if status != 0: test.fail("Feature: %s check failed inside " "%s guest on %s host" % (feature, guest_version, host_version)) else: if status == 0: test.fail("isa3.0 instruction succeeds in " "%s guest on %s host" % (guest_version, host_version)) return True vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pin_vcpu = 0 host_version = params.get("host_version") guest_version = params.get("guest_version") max_vcpu = params.get("cpucompat_vcpu_max", "") cur_vcpu = int(params.get("cpucompat_vcpu_cur", "1")) cores = int(params.get("topology_cores", '1')) sockets = int(params.get("topology_sockets", '1')) threads = int(params.get("topology_threads", '1')) status_error = "yes" == params.get("status_error", "no") condn = params.get("condn", "") guest_features = params.get("guest_features", "") if guest_features: guest_features = guest_features.split(',') if guest_version: guest_features.append(guest_version) if host_version not in cpu.get_cpu_arch(): test.cancel("Unsupported Host cpu version") vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) org_xml = vmxml.copy() # Destroy the vm vm.destroy() try: # Set cpu model if max_vcpu: pin_vcpu = int(max_vcpu) - 1 libvirt_xml.VMXML.set_vm_vcpus(vm_name, int(max_vcpu), cur_vcpu, sockets=sockets, cores=cores, threads=threads, add_topology=True) libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version) logging.debug(virsh.dumpxml(vm_name)) try: vm.start() except virt_vm.VMStartError as detail: if not status_error: test.fail("%s" % detail) else: pass if max_vcpu: virsh.setvcpus(vm_name, int(max_vcpu), "--live", ignore_status=False, debug=True) if not utils_misc.check_if_vm_vcpu_match(int(max_vcpu), vm): test.fail("Vcpu hotplug failed") if not status_error: for feature in guest_features: check_feature(vm, feature, vcpu=pin_vcpu) if condn == "filetrans": utils_test.run_file_transfer(test, params, env) elif condn == "stress": bt = utils_test.run_avocado_bg(vm, params, test) if not bt: test.cancel("guest stress failed to start") elif condn == "save": save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) # Just sleep few secs before guest recovery time.sleep(2) if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) os.remove(save_file) elif condn == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) # Just sleep few secs before guest recovery time.sleep(2) result = virsh.resume(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) else: pass finally: org_xml.sync()
def output_check(nodeinfo_output): # Check CPU model cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3) cpu_model_os = utils.get_current_kernel_arch() if not re.match(cpu_model_nodeinfo, cpu_model_os): raise error.TestFail( "Virsh nodeinfo output didn't match CPU model") # Check number of CPUs, nodeinfo CPUs represent online threads in the # system, check all online cpus in sysfs cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2) cmd = "cat /sys/devices/system/cpu/cpu*/online | grep 1 | wc -l" cpus_online = utils.run(cmd, ignore_status=True).stdout.strip() cmd = "cat /sys/devices/system/cpu/cpu*/online | wc -l" cpus_total = utils.run(cmd, ignore_status=True).stdout.strip() if not os.path.exists('/sys/devices/system/cpu/cpu0/online'): cpus_online = str(int(cpus_online) + 1) cpus_total = str(int(cpus_total) + 1) logging.debug("host online cpus are %s", cpus_online) logging.debug("host total cpus are %s", cpus_total) if cpus_nodeinfo != cpus_online: if 'power' in cpu_util.get_cpu_arch(): if cpus_nodeinfo != cpus_total: raise error.TestFail("Virsh nodeinfo output of CPU(s) on" " ppc did not match all threads in " "the system") else: raise error.TestFail("Virsh nodeinfo output didn't match " "number of CPU(s)") # Check CPU frequency, frequency is under clock for ppc cpu_frequency_nodeinfo = _check_nodeinfo( nodeinfo_output, 'CPU frequency', 3) cmd = ("cat /proc/cpuinfo | grep -E 'cpu MHz|clock' | head -n1 | " "awk -F: '{print $2}' | awk -F. '{print $1}'") cmd_result = utils.run(cmd, ignore_status=True) cpu_frequency_os = cmd_result.stdout.strip() logging.debug("cpu_frequency_nodeinfo=%s cpu_frequency_os=%s", cpu_frequency_nodeinfo, cpu_frequency_os) # # Matching CPU Frequency is not an exact science in todays modern # processors and OS's. CPU's can have their execution speed varied # based on current workload in order to save energy and keep cool. # Thus since we're getting the values at disparate points in time, # we cannot necessarily do a pure comparison. # So, let's get the absolute value of the difference and ensure # that it's within 20 percent of each value to give us enough of # a "fudge" factor to declare "close enough". Don't return a failure # just print a debug message and move on. diffval = abs(int(cpu_frequency_nodeinfo) - int(cpu_frequency_os)) if float(diffval) / float(cpu_frequency_nodeinfo) > 0.20 or \ float(diffval) / float(cpu_frequency_os) > 0.20: logging.debug("Virsh nodeinfo output didn't match CPU " "frequency within 20 percent") # Get CPU topology from virsh capabilities xml cpu_topology = capability_xml.CapabilityXML()['cpu_topology'] logging.debug("Cpu topology in virsh capabilities output: %s", cpu_topology) # Check CPU socket(s) cpu_sockets_nodeinfo = int( _check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3)) # CPU socket(s) in virsh nodeinfo is Total sockets in each node, not # total sockets in the system, so get total sockets in one node and # check with it node_info = utils_misc.NumaInfo() node_online_list = node_info.get_online_nodes() cmd = "cat /sys/devices/system/node/node%s" % node_online_list[0] cmd += "/cpu*/topology/physical_package_id | uniq |wc -l" cmd_result = utils.run(cmd, ignore_status=True) total_sockets_in_node = int(cmd_result.stdout.strip()) if total_sockets_in_node != cpu_sockets_nodeinfo: raise error.TestFail("Virsh nodeinfo output didn't match CPU " "socket(s) of host OS") if cpu_sockets_nodeinfo != int(cpu_topology['sockets']): raise error.TestFail("Virsh nodeinfo output didn't match CPU " "socket(s) of virsh capabilities output") # Check Core(s) per socket cores_per_socket_nodeinfo = _check_nodeinfo( nodeinfo_output, 'Core(s) per socket', 4) cmd = "lscpu | grep 'Core(s) per socket' | head -n1 | awk '{print $4}'" cmd_result = utils.run(cmd, ignore_status=True) cores_per_socket_os = cmd_result.stdout.strip() if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os): raise error.TestFail("Virsh nodeinfo output didn't match Core(s) " "per socket of host OS") if cores_per_socket_nodeinfo != cpu_topology['cores']: raise error.TestFail("Virsh nodeinfo output didn't match Core(s) " "per socket of virsh capabilities output") # Ckeck Thread(s) per core threads_per_core_nodeinfo = _check_nodeinfo(nodeinfo_output, 'Thread(s) per core', 4) if threads_per_core_nodeinfo != cpu_topology['threads']: raise error.TestFail("Virsh nodeinfo output didn't match Thread(s) " "per core of virsh capabilities output") # Check Memory size memory_size_nodeinfo = int( _check_nodeinfo(nodeinfo_output, 'Memory size', 3)) memory_size_os = 0 for i in node_online_list: node_memory = node_info.read_from_node_meminfo(i, 'MemTotal') memory_size_os += int(node_memory) logging.debug('The host total memory from nodes is %s', memory_size_os) if memory_size_nodeinfo != memory_size_os: raise error.TestFail("Virsh nodeinfo output didn't match " "Memory size")
def set_condition(vm_name, condn, reset=False, guestbt=None): """ Set domain to given state or reset it. """ bt = None if not reset: if condn == "stress": bt = utils_test.run_avocado_bg(vm, params, test) if not bt: test.cancel("guest stress failed to start") # Allow stress to start time.sleep(condn_sleep_sec) return bt elif condn in ["save", "managedsave"]: # No action pass elif condn == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "hotplug": result = virsh.setvcpus(vm_name, max_vcpu, "--live", ignore_status=True, debug=True) libvirt.check_exit_status(result) exp_vcpu = { 'max_config': max_vcpu, 'max_live': max_vcpu, 'cur_config': current_vcpu, 'cur_live': max_vcpu, 'guest_live': max_vcpu } result = utils_hotplug.check_vcpu_value(vm, exp_vcpu, option="--live") elif condn == "host_smt": if cpu.get_cpu_arch() == 'power9': result = process.run("ppc64_cpu --smt=4", shell=True) else: test.cancel( "Host SMT changes not allowed during guest live") else: logging.debug("No operation for the domain") else: if condn == "save": save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) time.sleep(condn_sleep_sec) if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) os.remove(save_file) else: test.error("No save file for domain restore") elif condn == "managedsave": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) time.sleep(condn_sleep_sec) result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "suspend": result = virsh.resume(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "stress": guestbt.join(ignore_status=True) elif condn == "hotplug": result = virsh.setvcpus(vm_name, current_vcpu, "--live", ignore_status=True, debug=True) libvirt.check_exit_status(result) exp_vcpu = { 'max_config': max_vcpu, 'max_live': current_vcpu, 'cur_config': current_vcpu, 'cur_live': current_vcpu, 'guest_live': current_vcpu } result = utils_hotplug.check_vcpu_value(vm, exp_vcpu, option="--live") elif condn == "host_smt": result = process.run("ppc64_cpu --smt=2", shell=True) # Change back the host smt result = process.run("ppc64_cpu --smt=4", shell=True) # Work around due to known cgroup issue after cpu hot(un)plug # sequence root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset") machine_cpuset_paths = [] if os.path.isdir( os.path.join(root_cpuset_path, "machine.slice")): machine_cpuset_paths.append( os.path.join(root_cpuset_path, "machine.slice")) if os.path.isdir(os.path.join(root_cpuset_path, "machine")): machine_cpuset_paths.append( os.path.join(root_cpuset_path, "machine")) if not machine_cpuset_paths: logging.warning("cgroup cpuset might not recover properly " "for guests after host smt changes, " "restore it manually") root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus") for path in machine_cpuset_paths: machine_cpuset_cpus = os.path.join(path, "cpuset.cpus") # check if file content differs cmd = "diff %s %s" % (root_cpuset_cpus, machine_cpuset_cpus) if process.system(cmd, verbose=True, ignore_status=True): cmd = "cp %s %s" % (root_cpuset_cpus, machine_cpuset_cpus) process.system(cmd, verbose=True) else: logging.debug("No need recover the domain") return bt
# # See LICENSE for more details. # # Copyright: 2019 IBM # Author: Shirisha <*****@*****.**> import os import platform from avocado import Test from avocado import main from avocado import skipUnless from avocado.utils import archive from avocado.utils import cpu, build, distro, process, genio from avocado.utils.software_manager import SoftwareManager IS_POWER8 = 'power8' in cpu.get_cpu_arch().lower() class PerfWatchPoint(Test): @skipUnless(IS_POWER8, 'Supported only on Power8') def setUp(self): ''' Install the basic packages to support perf ''' # Check for basic utilities smm = SoftwareManager() detected_distro = distro.detect() self.distro_name = detected_distro.name if detected_distro.arch != 'ppc64le': self.cancel('This test is not supported on %s architecture' % detected_distro.arch)
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Global variable to store max/current memory, # it may change after attach/detach new_max_mem = None new_cur_mem = None def consume_vm_mem(size=1000, timeout=360): """ To consume guest memory, default size is 1000M """ session = vm.wait_for_login() # Mount tmpfs on /mnt and write to a file on it, # it is the memory operation sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs " "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M" " count={0}".format(size)) session.cmd(sh_cmd, timeout=timeout) session.close() def mount_hugepages(page_size): """ To mount hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ if page_size == 4: perm = "" else: perm = "pagesize=%dK" % page_size tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_status: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm) def setup_hugepages(page_size=2048, shp_num=2000): """ To setup hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc :param shp_num: number of hugepage, string type """ mount_hugepages(page_size) utils_memory.set_num_huge_pages(shp_num) config.hugetlbfs_mount = ["/dev/hugepages"] utils_libvirtd.libvirtd_restart() def restore_hugepages(page_size=4): """ To recover hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ mount_hugepages(page_size) config.restore() utils_libvirtd.libvirtd_restart() def check_qemu_cmd(max_mem_rt, tg_size): """ Check qemu command line options. :param max_mem_rt: size of max memory :param tg_size: Target hotplug memory size :return: None """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if discard: if libvirt_version.version_compare(7, 3, 0): cmd = cmd + " | grep " + '\\"discard-data\\":true' else: cmd += " | grep 'discard-data=yes'" elif max_mem_rt: cmd += (" | grep 'slots=%s,maxmem=%sk'" % (max_mem_slots, max_mem_rt)) if tg_size: size = int(tg_size) * 1024 if huge_pages or discard or cold_plug_discard: cmd_str = 'memdimm.\|memory-backend-file,id=ram-node.' cmd += ( " | grep 'memory-backend-file,id=%s' | grep 'size=%s" % (cmd_str, size)) else: cmd_str = 'mem.\|memory-backend-ram,id=ram-node.' cmd += ( " | grep 'memory-backend-ram,id=%s' | grep 'size=%s" % (cmd_str, size)) if pg_size: cmd += ",host-nodes=%s" % node_mask if numa_memnode: for node in numa_memnode: if ('nodeset' in node and node['nodeset'] in node_mask): cmd += ",policy=%s" % node['mode'] cmd += ".*pc-dimm,node=%s" % tg_node if mem_addr: cmd += (".*slot=%s" % (mem_addr['slot'])) cmd += "'" if cold_plug_discard: cmd += " | grep 'discard-data=yes'" # Run the command result = process.run(cmd, shell=True, verbose=True, ignore_status=True) if result.exit_status: test.fail('Qemu command check fail.') def check_guest_meminfo(old_mem, check_option): """ Check meminfo on guest. """ assert old_mem is not None session = vm.wait_for_login() # Hot-plugged memory should be online by udev rules udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules" udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",' ' ATTR{state}=="offline", ATTR{state}="online"') cmd = ("grep memory %s || echo '%s' >> %s" % (udev_file, udev_rules, udev_file)) session.cmd(cmd) # Wait a while for new memory to be detected. utils_misc.wait_for( lambda: vm.get_totalmem_sys(online) != int(old_mem), 30, first=20.0) new_mem = vm.get_totalmem_sys(online) session.close() logging.debug("Memtotal on guest: %s", new_mem) no_of_times = 1 if at_times: no_of_times = at_times if check_option == "attach": if new_mem != int(old_mem) + (int(tg_size) * no_of_times): test.fail("Total memory on guest couldn't changed after " "attach memory device") if check_option == "detach": if new_mem != int(old_mem) - (int(tg_size) * no_of_times): test.fail("Total memory on guest couldn't changed after " "detach memory device") def check_dom_xml(at_mem=False, dt_mem=False): """ Check domain xml options. """ # Global variable to store max/current memory global new_max_mem global new_cur_mem if attach_option.count("config"): dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) else: dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) try: xml_max_mem_rt = int(dom_xml.max_mem_rt) xml_max_mem = int(dom_xml.max_mem) xml_cur_mem = int(dom_xml.current_mem) assert int(max_mem_rt) == xml_max_mem_rt # Check attached/detached memory logging.info("at_mem=%s,dt_mem=%s", at_mem, dt_mem) logging.info("detach_device is %s", detach_device) if at_mem: if at_times: assert int(max_mem) + (int(tg_size) * at_times) == xml_max_mem else: assert int(max_mem) + int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory if at_times: assert int(cur_mem) + (int(tg_size) * at_times) == xml_cur_mem else: assert int(cur_mem) + int(tg_size) == xml_cur_mem new_max_mem = xml_max_mem new_cur_mem = xml_cur_mem mem_dev = dom_xml.get_devices("memory") memory_devices = 1 if at_times: memory_devices = at_times if len(mem_dev) != memory_devices: test.fail("Found wrong number of memory device") assert int(tg_size) == int(mem_dev[0].target.size) assert int(tg_node) == int(mem_dev[0].target.node) elif dt_mem: if at_times: assert int(new_max_mem) - (int(tg_size) * at_times) == xml_max_mem assert int(new_cur_mem) - (int(tg_size) * at_times) == xml_cur_mem else: assert int(new_max_mem) - int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory assert int(new_cur_mem) - int(tg_size) == xml_cur_mem except AssertionError: utils_misc.log_last_traceback() test.fail("Found unmatched memory setting from domain xml") def check_mem_align(): """ Check if set memory align to 256 """ dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) dom_mem = {} dom_mem['maxMemory'] = int(dom_xml.max_mem_rt) dom_mem['memory'] = int(dom_xml.memory) dom_mem['currentMemory'] = int(dom_xml.current_mem) cpuxml = dom_xml.cpu numa_cell = cpuxml.numa_cell dom_mem['numacellMemory'] = int(numa_cell[0]['memory']) sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell]) attached_mem = dom_xml.get_devices(device_type='memory')[0] dom_mem['attached_mem'] = attached_mem.target.size all_align = True for key in dom_mem: logging.info('%-20s:%15d', key, dom_mem[key]) if dom_mem[key] % 262144: logging.error('%s not align to 256', key) if key == 'currentMemory': continue all_align = False if not all_align: test.fail('Memory not align to 256') if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']: logging.info( 'Check Pass: Memory is equal to (all numa memory + memory device)' ) else: test.fail( 'Memory is not equal to (all numa memory + memory device)') return dom_mem def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) def _wait_for_restore(): try: virsh.restore(save_file, debug=True, ignore_status=False) return True except Exception as e: logging.error(e) utils_misc.wait_for(_wait_for_restore, 30, step=5) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def add_device(dev_xml, attach, at_error=False): """ Add memory device by attachment or modify domain xml. """ if attach: ret = virsh.attach_device(vm_name, dev_xml.xml, flagstr=attach_option, debug=True) libvirt.check_exit_status(ret, at_error) else: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if numa_cells: del vmxml.max_mem del vmxml.current_mem vmxml.add_device(dev_xml) vmxml.sync() def modify_domain_xml(): """ Modify domain xml and define it. """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) mem_unit = params.get("mem_unit", "KiB") vcpu = params.get("vcpu", "4") if max_mem_rt: vmxml.max_mem_rt = int(max_mem_rt) vmxml.max_mem_rt_slots = max_mem_slots vmxml.max_mem_rt_unit = mem_unit if max_mem: vmxml.max_mem = int(max_mem) if cur_mem: vmxml.current_mem = int(cur_mem) if memory_val: vmxml.memory = int(memory_val) if vcpu: vmxml.vcpu = int(vcpu) vcpu_placement = params.get("vcpu_placement", "static") vmxml.placement = vcpu_placement if numa_memnode: vmxml.numa_memory = {} vmxml.numa_memnode = numa_memnode else: try: del vmxml.numa_memory del vmxml.numa_memnode except Exception: # Not exists pass if numa_cells: cells = [ast.literal_eval(x) for x in numa_cells] # Rounding the numa memory values if align_mem_values: for cell in range(cells.__len__()): memory_value = str( utils_numeric.align_value(cells[cell]["memory"], align_to_value)) cells[cell]["memory"] = memory_value cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu mode='host-model'><numa/></cpu>" cpu_mode = params.get("cpu_mode") model_fallback = params.get("model_fallback") if cpu_mode: cpu_xml.mode = cpu_mode if model_fallback: cpu_xml.fallback = model_fallback cpu_xml.numa_cell = cpu_xml.dicts_to_cells(cells) vmxml.cpu = cpu_xml # Delete memory and currentMemory tag, # libvirt will fill it automatically del vmxml.max_mem del vmxml.current_mem # hugepages setting if huge_pages or discard or cold_plug_discard: membacking = vm_xml.VMMemBackingXML() membacking.discard = True membacking.source = '' membacking.source_type = 'file' if huge_pages: hugepages = vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(huge_pages)): pagexml = hugepages.PageXML() pagexml.update(huge_pages[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug("vm xml: %s", vmxml) vmxml.sync() pre_vm_state = params.get("pre_vm_state", "running") attach_device = "yes" == params.get("attach_device", "no") detach_device = "yes" == params.get("detach_device", "no") detach_alias = "yes" == params.get("detach_alias", "no") detach_alias_options = params.get("detach_alias_options") attach_error = "yes" == params.get("attach_error", "no") start_error = "yes" == params.get("start_error", "no") define_error = "yes" == params.get("define_error", "no") detach_error = "yes" == params.get("detach_error", "no") maxmem_error = "yes" == params.get("maxmem_error", "no") attach_option = params.get("attach_option", "") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") wait_before_save_secs = int(params.get("wait_before_save_secs", 0)) test_managedsave = "yes" == params.get("test_managedsave", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_mem_binding = "yes" == params.get("test_mem_binding", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") add_mem_device = "yes" == params.get("add_mem_device", "no") test_dom_xml = "yes" == params.get("test_dom_xml", "no") max_mem = params.get("max_mem") max_mem_rt = params.get("max_mem_rt") max_mem_slots = params.get("max_mem_slots", "16") memory_val = params.get('memory_val', '') mem_align = 'yes' == params.get('mem_align', 'no') hot_plug = 'yes' == params.get('hot_plug', 'no') cur_mem = params.get("current_mem") numa_cells = params.get("numa_cells", "").split() set_max_mem = params.get("set_max_mem") align_mem_values = "yes" == params.get("align_mem_values", "no") align_to_value = int(params.get("align_to_value", "65536")) hot_reboot = "yes" == params.get("hot_reboot", "no") rand_reboot = "yes" == params.get("rand_reboot", "no") guest_known_unplug_errors = [] guest_known_unplug_errors.append(params.get("guest_known_unplug_errors")) host_known_unplug_errors = [] host_known_unplug_errors.append(params.get("host_known_unplug_errors")) discard = "yes" == params.get("discard", "no") cold_plug_discard = "yes" == params.get("cold_plug_discard", "no") if cold_plug_discard or discard: mem_discard = 'yes' else: mem_discard = 'no' # params for attached device mem_model = params.get("mem_model", "dimm") tg_size = params.get("tg_size") tg_sizeunit = params.get("tg_sizeunit", 'KiB') tg_node = params.get("tg_node", 0) pg_size = params.get("page_size") pg_unit = params.get("page_unit", "KiB") huge_page_num = int(params.get('huge_page_num', 2000)) node_mask = params.get("node_mask", "0") mem_addr = ast.literal_eval(params.get("memory_addr", "{}")) huge_pages = [ ast.literal_eval(x) for x in params.get("huge_pages", "").split() ] numa_memnode = [ ast.literal_eval(x) for x in params.get("numa_memnode", "").split() ] at_times = int(params.get("attach_times", 1)) online = params.get("mem_online", "no") config = utils_config.LibvirtQemuConfig() setup_hugepages_flag = params.get("setup_hugepages") if (setup_hugepages_flag == "yes"): cpu_arch = cpu_util.get_family() if hasattr(cpu_util, 'get_family')\ else cpu_util.get_cpu_arch() if cpu_arch == 'power8': pg_size = '16384' huge_page_num = 200 elif cpu_arch == 'power9': pg_size = '2048' huge_page_num = 2000 [x.update({'size': pg_size}) for x in huge_pages] setup_hugepages(int(pg_size), shp_num=huge_page_num) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not libvirt_version.version_compare(1, 2, 14): test.cancel("Memory hotplug not supported in current libvirt version.") if 'align_256m' in params.get('name', ''): arch = platform.machine() if arch.lower() != 'ppc64le': test.cancel('This case is for ppc64le only.') if align_mem_values: # Rounding the following values to 'align' max_mem = utils_numeric.align_value(max_mem, align_to_value) max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value) cur_mem = utils_numeric.align_value(cur_mem, align_to_value) tg_size = utils_numeric.align_value(tg_size, align_to_value) try: # Drop caches first for host has enough memory drop_caches() # Destroy domain first if vm.is_alive(): vm.destroy(gracefully=False) modify_domain_xml() numa_info = utils_misc.NumaInfo() logging.debug(numa_info.get_all_node_meminfo()) # Start the domain any way if attach memory device old_mem_total = None if attach_device: vm.start() session = vm.wait_for_login() old_mem_total = vm.get_totalmem_sys(online) logging.debug("Memtotal on guest: %s", old_mem_total) session.close() elif discard: vm.start() session = vm.wait_for_login() check_qemu_cmd(max_mem_rt, tg_size) dev_xml = None # To attach the memory device. if (add_mem_device and not hot_plug) or cold_plug_discard: at_times = int(params.get("attach_times", 1)) randvar = 0 if rand_reboot: rand_value = random.randint(15, 25) logging.debug("reboots at %s", rand_value) for x in xrange(at_times): # If any error excepted, command error status should be # checked in the last time device_alias = "ua-" + str(uuid.uuid4()) dev_xml = utils_hotplug.create_mem_xml( tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model, mem_discard, device_alias) randvar = randvar + 1 logging.debug("attaching device count = %s", x) if x == at_times - 1: add_device(dev_xml, attach_device, attach_error) else: add_device(dev_xml, attach_device) if hot_reboot: vm.reboot() vm.wait_for_login() if rand_reboot and randvar == rand_value: vm.reboot() vm.wait_for_login() randvar = 0 rand_value = random.randint(15, 25) logging.debug("reboots at %s", rand_value) # Check domain xml after attach device. if test_dom_xml: check_dom_xml(at_mem=attach_device) # Set domain state if pre_vm_state == "transient": logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): try: vm.start() vm.wait_for_login().close() except virt_vm.VMStartError as detail: if start_error: pass else: except_msg = "memory hotplug isn't supported by this QEMU binary" if except_msg in detail.reason: test.cancel(detail) test.fail(detail) # Set memory operation if set_max_mem: max_mem_option = params.get("max_mem_option", "") ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option) libvirt.check_exit_status(ret, maxmem_error) # Hotplug memory device if add_mem_device and hot_plug: process.run('ps -ef|grep qemu', shell=True, verbose=True) session = vm.wait_for_login() original_mem = vm.get_totalmem_sys() dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model) add_device(dev_xml, True) mem_after = vm.get_totalmem_sys() params['delta'] = mem_after - original_mem # Check domain xml after start the domain. if test_dom_xml: check_dom_xml(at_mem=attach_device) if mem_align: dom_mem = check_mem_align() check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem']) if hot_plug and params['delta'] != dom_mem['attached_mem']: test.fail( 'Memory after attach not equal to original mem + attached mem' ) # Check qemu command line if test_qemu_cmd: check_qemu_cmd(max_mem_rt, tg_size) # Check guest meminfo after attachment if (attach_device and not attach_option.count("config") and not any([attach_error, start_error])): check_guest_meminfo(old_mem_total, check_option="attach") # Consuming memory on guest, # to verify memory changes by numastat if test_mem_binding: pid = vm.get_pid() old_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", old_numastat) # Increase the memory consumed to 1500 consume_vm_mem(1500) new_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", new_numastat) # Only check total memory which is the last element if float(new_numastat[-1]) - float(old_numastat[-1]) < 0: test.fail("Numa memory can't be consumed on guest") # Run managedsave command to check domain xml. if test_managedsave: # Wait 10s for vm to be ready before managedsave time.sleep(wait_before_save_secs) ret = virsh.managedsave(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) def _wait_for_vm_start(): try: vm.start() return True except Exception as e: logging.error(e) utils_misc.wait_for(_wait_for_vm_start, timeout=30, step=5) vm.wait_for_login().close() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Run save and restore command to check domain xml if test_save_restore: # Wait 10s for vm to be ready before save time.sleep(wait_before_save_secs) check_save_restore() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Check domain xml after restarting libvirtd if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Detach the memory device unplug_failed_with_known_error = False if detach_device: dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model, mem_discard) for x in xrange(at_times): if not detach_alias: ret = virsh.detach_device(vm_name, dev_xml.xml, flagstr=attach_option, debug=True) else: ret = virsh.detach_device_alias(vm_name, device_alias, detach_alias_options, debug=True) if ret.stderr and host_known_unplug_errors: for known_error in host_known_unplug_errors: if (known_error[0] == known_error[-1]) and \ known_error.startswith(("'")): known_error = known_error[1:-1] if known_error in ret.stderr: unplug_failed_with_known_error = True logging.debug( "Known error occurred in Host, while" " hot unplug: %s", known_error) if unplug_failed_with_known_error: break try: libvirt.check_exit_status(ret, detach_error) except Exception as detail: dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) try: session = vm.wait_for_login() utils_misc.verify_dmesg(dmesg_log_file=dmesg_file, ignore_result=True, session=session, level_check=5) except Exception: session.close() test.fail("After memory unplug Unable to connect to VM" " or unable to collect dmesg") session.close() if os.path.exists(dmesg_file): with open(dmesg_file, 'r') as f: flag = re.findall( r'memory memory\d+?: Offline failed', f.read()) if not flag: # The attached memory is used by vm, and it could # not be unplugged.The result is expected os.remove(dmesg_file) test.fail(detail) unplug_failed_with_known_error = True os.remove(dmesg_file) # Check whether a known error occurred or not dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) try: session = vm.wait_for_login() utils_misc.verify_dmesg(dmesg_log_file=dmesg_file, ignore_result=True, session=session, level_check=4) except Exception: session.close() test.fail("After memory unplug Unable to connect to VM" " or unable to collect dmesg") session.close() if guest_known_unplug_errors and os.path.exists(dmesg_file): for known_error in guest_known_unplug_errors: if (known_error[0] == known_error[-1]) and \ known_error.startswith(("'")): known_error = known_error[1:-1] with open(dmesg_file, 'r') as f: if known_error in f.read(): unplug_failed_with_known_error = True logging.debug( "Known error occurred, while hot" " unplug: %s", known_error) if test_dom_xml and not unplug_failed_with_known_error: check_dom_xml(dt_mem=detach_device) # Remove dmesg temp file if os.path.exists(dmesg_file): os.remove(dmesg_file) except xcepts.LibvirtXMLError: if define_error: pass finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") if (setup_hugepages_flag == "yes"): restore_hugepages() vmxml_backup.sync()
def run(test, params, env): """ Domain CPU management testing. 1. Prepare a domain for testing, install qemu-guest-ga if needed. 2. Plug vcpu for the domain. 3. Checking: 3.1. Virsh vcpucount. 3.2. Virsh vcpuinfo. 3.3. Current vcpu number in domain xml. 3.4. Virsh vcpupin and vcpupin in domain xml. 3.5. The vcpu number in domain. 3.6. Virsh cpu-stats. 4. Repeat step 3 to check again. 5. Control domain(save, managedsave, s3, s4, migrate, etc.). 6. Repeat step 3 to check again. 7. Recover domain(restore, wakeup, etc.). 8. Repeat step 3 to check again. 9. Unplug vcpu for the domain. 10. Repeat step 3 to check again. 11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip save/managedsave/migrate related actions). 12. Repeat step 3 to check again. 13. Repeat step 7 to recover domain. 14. Repeat step 3 to check again. 15. Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_operation = params.get("vm_operation", "null") vcpu_max_num = params.get("vcpu_max_num") vcpu_current_num = params.get("vcpu_current_num") vcpu_plug = "yes" == params.get("vcpu_plug", "no") vcpu_plug_num = params.get("vcpu_plug_num") vcpu_unplug = "yes" == params.get("vcpu_unplug", "no") vcpu_unplug_num = params.get("vcpu_unplug_num") setvcpu_option = params.get("setvcpu_option", "") agent_channel = "yes" == params.get("agent_channel", "yes") install_qemuga = "yes" == params.get("install_qemuga", "no") start_qemuga = "yes" == params.get("start_qemuga", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no") status_error = "yes" == params.get("status_error", "no") pin_before_plug = "yes" == params.get("pin_before_plug", "no") pin_after_plug = "yes" == params.get("pin_after_plug", "no") pin_before_unplug = "yes" == params.get("pin_before_unplug", "no") pin_after_unplug = "yes" == params.get("pin_after_unplug", "no") pin_vcpu = params.get("pin_vcpu") pin_cpu_list = params.get("pin_cpu_list", "x") check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no") # Init expect vcpu count values expect_vcpu_num = [ vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num, vcpu_current_num ] if check_after_plug_fail: expect_vcpu_num_bk = list(expect_vcpu_num) # Init expect vcpu pin values expect_vcpupin = {} # Init cpu-list for vcpupin host_cpu_count = utils.count_cpus() if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"): raise error.TestNAError("We need more cpus on host in this case for" " the cpu-list=%s. But current number of cpu" " on host is %s." % (pin_cpu_list, host_cpu_count)) cpus_list = utils.cpu_online_map() logging.info("Active cpus in host are %s", cpus_list) cpu_seq_str = "" for i in range(len(cpus_list) - 1): if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]): cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1]) break if pin_cpu_list == "x": pin_cpu_list = cpus_list[-1] if pin_cpu_list == "x-y": if cpu_seq_str: pin_cpu_list = cpu_seq_str else: pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0]) elif pin_cpu_list == "x,y": pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1]) elif pin_cpu_list == "x-y,^z": if cpu_seq_str: pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1] else: pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1], cpus_list[0]) else: # Just use the value get from cfg pass need_mkswap = False # Back up domain XML vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() try: # Customize domain vcpu number if vm.is_alive(): vm.destroy() if agent_channel: vmxml.set_agent_channel() else: vmxml.remove_agent_channels() vmxml.sync() vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num)) # Do not apply S3/S4 on power if 'power' not in cpu_util.get_cpu_arch(): vmxml.set_pm_suspend(vm_name, "yes", "yes") vm.start() # Create swap partition/file if nessesary if vm_operation == "s4": need_mkswap = not vm.has_swap() if need_mkswap: logging.debug("Creating swap partition") vm.create_swap_partition() # Prepare qemu guest agent if install_qemuga: vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga) vm.setenforce(0) else: # Remove qemu-guest-agent for negative test vm.remove_package('qemu-guest-agent') # Run test check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin) # plug vcpu if vcpu_plug: # Pin vcpu if pin_before_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) check_setvcpus_result(result, status_error) if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_plug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num if not status_error: if not online_new_vcpu(vm, vcpu_plug_num): raise error.TestFail("Fail to enable new added cpu") # Pin vcpu if pin_after_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if status_error and check_after_plug_fail: check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option) if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Unplug vcpu if vcpu_unplug: # Pin vcpu if pin_before_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) # As the vcpu will unplug later, so set expect_vcpupin to empty expect_vcpupin = {} result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) try: check_setvcpus_result(result, status_error) except error.TestNAError: raise error.TestWarn("Skip unplug vcpu as it is not supported") if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_unplug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num # Pin vcpu if pin_after_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option)
def run(test, params, env): """ Test hpt resizing """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) status_error = 'yes' == params.get('status_error', 'no') error_msg = eval(params.get('error_msg', '[]')) hpt_attrs = eval(params.get('hpt_attrs', '{}')) hpt_order_path = params.get('hpt_order_path', '') cpu_attrs = eval(params.get('cpu_attrs', '{}')) numa_cell = eval(params.get('numa_cell', '{}')) hugepage = 'yes' == params.get('hugepage', 'no') maxpagesize = int(params.get('maxpagesize', 0)) check_hp = 'yes' == params.get('check_hp', 'no') qemu_check = params.get('qemu_check', '') skip_p8 = 'yes' == params.get('skip_p8', 'no') def set_hpt(vmxml, sync, **attrs): """ Set resizing value to vm xml :param vmxml: xml of vm to be manipulated :param sync: whether to sync vmxml after :param attrs: attrs to set to hpt xml """ if vmxml.xmltreefile.find('/features'): features_xml = vmxml.features else: features_xml = vm_xml.VMFeaturesXML() hpt_xml = vm_xml.VMFeaturesHptXML() for attr in attrs: setattr(hpt_xml, attr, attrs[attr]) features_xml.hpt = hpt_xml vmxml.features = features_xml logging.debug(vmxml) if sync: vmxml.sync() def set_cpu(vmxml, **attrs): """ Set cpu attrs for vmxml according to given attrs :param vmxml: xml of vm to be manipulated :param attrs: attrs to set to cpu xml """ if vmxml.xmltreefile.find('cpu'): cpu = vmxml.cpu else: cpu = vm_xml.VMCPUXML() if 'numa_cell' in attrs: cpu.xmltreefile.create_by_xpath('/numa') cpu.numa_cell = attrs['numa_cell'] for key in attrs: setattr(cpu, key, attrs[key]) vmxml.cpu = cpu vmxml.sync() def set_memory(vmxml): """ Set memory attributes in vm xml """ vmxml.max_mem_rt = int(params.get('max_mem_rt', 30670848)) vmxml.max_mem_rt_slots = int(params.get('max_mem_rt_slots', 16)) vmxml.max_mem_rt_unit = params.get('max_mem_rt_unit', 'KiB') logging.debug(numa_cell) if numa_cell: # Remove cpu topology to avoid that it doesn't match vcpu count if vmxml.get_cpu_topology(): new_cpu = vmxml.cpu new_cpu.del_topology() vmxml.cpu = new_cpu vmxml.vcpu = max([int(cell['cpus'][-1]) for cell in numa_cell]) + 1 vmxml.sync() def check_hpt_order(session, resizing=''): """ Return htp order in hpt_order file by default If 'resizing' is disabled, test updating htp_order """ if not hpt_order_path: test.cancel('No hpt order path provided.') hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) if resizing == 'disabled': cmd_result = session.cmd_status_output( 'echo %d > %s' % (hpt_order + 1, hpt_order_path)) result = process.CmdResult(stderr=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result, True) libvirt.check_result(result, error_msg) return hpt_order def check_hp_in_vm(session, page_size): """ Check if hugepage size is correct inside vm :param session: the session of the running vm :param page_size: the expected pagesize to be checked inside vm """ expect = False if int(page_size) == 65536 else True meminfo = session.cmd_output('cat /proc/meminfo|grep Huge') logging.info('meminfo: \n%s', meminfo) pattern = 'Hugepagesize:\s+%d\s+kB' % int(page_size / 1024) logging.info('"%s" should %s be found in meminfo output', pattern, '' if expect else 'not') result = expect == bool(re.search(pattern, meminfo)) if not result: test.fail('meminfo output not meet expectation') # Check PAGE_SIZE in another way if not expect: conf_page_size = session.cmd_output('getconf PAGE_SIZE') logging.debug('Output of "getconf PAGE_SIZE": %s', conf_page_size) if int(conf_page_size) != int(page_size): test.fail( 'PAGE_SIZE not correct, should be %r, actually is %r' % (page_size, conf_page_size)) bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: arch = platform.machine() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) resizing = hpt_attrs.get('resizing') # Test on ppc64le hosts if arch.lower() == 'ppc64le': cpu_arch = cpu.get_cpu_arch() logging.debug('cpu_arch is: %s', cpu_arch) if skip_p8 and cpu_arch == 'power8': test.cancel('This case is not for POWER8') if maxpagesize and not utils_misc.compare_qemu_version(3, 1, 0): test.cancel('Qemu version is too low, ' 'does not support maxpagesize setting') if maxpagesize == 16384 and cpu_arch == 'power9': test.cancel('Power9 does not support 16M pagesize.') set_hpt(vmxml, True, **hpt_attrs) if cpu_attrs or numa_cell: if numa_cell: cpu_attrs['numa_cell'] = numa_cell set_cpu(vmxml, **cpu_attrs) if hugepage: vm_mem = vmxml.max_mem host_hp_size = utils_memory.get_huge_page_size() # Make 100m extra memory just to be safe hp_count = max((vm_mem + 102400) // host_hp_size, 1200) vm_xml.VMXML.set_memoryBacking_tag(vm_name, hpgs=True) # Set up hugepage env mnt_source, hp_path, fstype = 'hugetlbfs', '/dev/hugepages', 'hugetlbfs' if not os.path.isdir(hp_path): process.run('mkdir %s' % hp_path, verbose=True) utils_memory.set_num_huge_pages(hp_count) if utils_misc.is_mounted(mnt_source, hp_path, fstype, verbose=True): utils_misc.umount(mnt_source, hp_path, fstype, verbose=True) utils_misc.mount(mnt_source, hp_path, fstype, verbose=True) # Restart libvirtd service to make sure mounted hugepage # be recognized utils_libvirtd.libvirtd_restart() if resizing == 'enabled': set_memory(vmxml) logging.debug('vmxml: \n%s', vmxml) # Start vm and check if start succeeds result = virsh.start(vm_name, debug=True) libvirt.check_exit_status(result, expect_error=status_error) # if vm is not suposed to start, terminate test if status_error: libvirt.check_result(result, error_msg) return libvirt.check_qemu_cmd_line(qemu_check) session = vm.wait_for_login() hpt_order = check_hpt_order(session, resizing) # Check hugepage inside vm if check_hp: check_hp_in_vm(session, maxpagesize * 1024) if resizing == 'enabled': mem_xml = utils_hotplug.create_mem_xml( tg_size=int(params.get('mem_size', 2048000)), tg_sizeunit=params.get('size_unit', 'KiB'), tg_node=int(params.get('mem_node', 0)), mem_model=params.get('mem_model', 'dimm')) logging.debug(mem_xml) # Attach memory device to the guest for 12 times # that will reach the maxinum memory limitation for i in range(12): virsh.attach_device(vm_name, mem_xml.xml, debug=True, ignore_status=False) xml_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(xml_after_attach) # Check dumpxml of the guest, # check if each device has its alias for i in range(12): pattern = "alias\s+name=[\'\"]dimm%d[\'\"]" % i logging.debug('Searching for %s', pattern) if not re.search(pattern, str( xml_after_attach.xmltreefile)): test.fail('Missing memory alias: %s' % pattern) # Test on non-ppc64le hosts else: set_hpt(vmxml, sync=False, **hpt_attrs) result = virsh.define(vmxml.xml) libvirt.check_exit_status(result, status_error) libvirt.check_result(result, error_msg) finally: bk_xml.sync() if hugepage: utils_misc.umount('hugetlbfs', '/dev/hugepages', 'hugetlbfs') utils_memory.set_num_huge_pages(0)
def setUp(self): """ Setup checks : 0. Processor should be ppc64. 1. Perf package 2. 24x7 is not supported on guest 3. 24x7 is present 4. Performance measurement is enabled in LPAR through BMC """ smm = SoftwareManager() detected_distro = distro.detect() if 'ppc64' not in detected_distro.arch: self.cancel("Processor is not PowerPC") deps = ['gcc', 'make'] if 'Ubuntu' in detected_distro.name: deps.extend( ['linux-tools-common', 'linux-tools-%s' % platform.uname()[2]]) elif detected_distro.name in ['rhel', 'SuSE', 'fedora', 'centos']: deps.extend(['perf', 'numactl']) else: self.cancel("Install the package for perf supported by %s" % detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) self.cpu_arch = cpu.get_cpu_arch().lower() self.perf_args = "perf stat -v -C 0 -e" if self.cpu_arch == 'power8': self.perf_stat = "%s hv_24x7/HPM_0THRD_NON_IDLE_CCYC" % self.perf_args if self.cpu_arch == 'power9': self.perf_stat = "%s hv_24x7/CPM_TLBIE" % self.perf_args self.event_sysfs = "/sys/bus/event_source/devices/hv_24x7" # Check if this is a guest # 24x7 is not suported on guest if "emulated by" in cpu._get_cpu_info(): self.cancel("This test is not supported on guest") # Check if 24x7 is present if os.path.exists(self.event_sysfs): self.log.info('hv_24x7 present') else: self.cancel("%s doesn't exist.This test is supported" " only on PowerVM" % self.event_sysfs) # Performance measurement has to be enabled in lpar through BMC # Check if its enabled result_perf = process.run("%s,domain=2,core=1/ sleep 1" % self.perf_stat, ignore_status=True) if "not supported" in result_perf.stderr.decode("utf-8"): self.cancel("Please enable LPAR to allow collecting" " the 24x7 counters info") # Getting the number of cores output = process.run("lscpu") for line in output.stdout.decode("utf-8").split('\n'): if 'Core(s) per socket:' in line: self.cores = int(line.split(':')[1].strip()) # Getting the number of chips available in the machine self.chip = memory.numa_nodes() # Collect all hv_24x7 events self.list_of_hv_24x7_events = [] for line in process.get_command_output_matching( 'perf list', 'hv_24x7'): line = line.split(',')[0].split('/')[1] self.list_of_hv_24x7_events.append(line) # Clear the dmesg, by that we can capture the delta at the end of the test. process.run("dmesg -c", sudo=True)
def run(test, params, env): """ Test different hmi injections with guest :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def set_condn(action, recover=False): """ Set/reset guest state/action :param action: Guest state change/action :param recover: whether to recover given state default: False """ if not recover: if action == "pin_vcpu": for i in range(cur_vcpu): virsh.vcpupin(vm_name, i, hmi_cpu, "--live", ignore_status=False, debug=True) virsh.emulatorpin(vm_name, hmi_cpu, "live", ignore_status=False, debug=True) elif action == "filetrans": utils_test.run_file_transfer(test, params, env) elif action == "save": save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) time.sleep(10) if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) os.remove(save_file) elif action == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) time.sleep(10) result = virsh.resume(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) return host_version = params.get("host_version") guest_version = params.get("guest_version", "") max_vcpu = int(params.get("ppchmi_vcpu_max", '1')) cur_vcpu = int(params.get("ppchmi_vcpu_cur", "1")) cores = int(params.get("ppchmi_cores", '1')) sockets = int(params.get("ppchmi_sockets", '1')) threads = int(params.get("ppchmi_threads", '1')) status_error = "yes" == params.get("status_error", "no") condition = params.get("condn", "") inject_code = params.get("inject_code", "") scom_base = params.get("scom_base", "") hmi_name = params.get("hmi_name", "") hmi_iterations = int(params.get("hmi_iterations", 1)) if host_version not in cpu.get_cpu_arch(): test.cancel("Unsupported Host cpu version") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sm = SoftwareManager() if not sm.check_installed("opal-utils") and not sm.install("opal-utils"): test.cancel("opal-utils package install failed") cpus_list = cpu.cpu_online_list() cpu_idle_state = cpu.get_cpuidle_state() cpu.set_cpuidle_state() # Lets use second available host cpu hmi_cpu = cpus_list[1] pir = int(open('/sys/devices/system/cpu/cpu%s/pir' % hmi_cpu).read().strip(), 16) if host_version == 'power9': coreid = (((pir) >> 2) & 0x3f) nodeid = (((pir) >> 8) & 0x7f) & 0xf hmi_scom_addr = hex(((coreid & 0x1f + 0x20) << 24) | int(scom_base, 16)) if host_version == 'power8': coreid = (((pir) >> 3) & 0xf) nodeid = (((pir) >> 7) & 0x3f) hmi_scom_addr = hex(((coreid & 0xf) << 24) | int(scom_base, 16)) hmi_cmd = "putscom -c %s %s %s" % (nodeid, hmi_scom_addr, inject_code) vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) org_xml = vmxml.copy() # Destroy the vm vm.destroy() try: session = None bgt = None libvirt_xml.VMXML.set_vm_vcpus(vm_name, max_vcpu, cur_vcpu, sockets=sockets, cores=cores, threads=threads, add_topology=True) if guest_version: libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version) vm.start() # Lets clear host and guest dmesg process.system("dmesg -C", verbose=False) session = vm.wait_for_login() session.cmd("dmesg -C") # Set condn if "vcpupin" in condition: set_condn("pin_vcpu") if "stress" in condition: utils_test.load_stress("stress_in_vms", params=params, vms=[vm]) if "save" in condition: set_condn("save") if "suspend" in condition: set_condn("suspend") # hmi inject logging.debug("Injecting %s HMI on cpu %s", hmi_name, hmi_cpu) logging.debug("HMI Command: %s", hmi_cmd) process.run(hmi_cmd) # Check host and guest dmesg host_dmesg = process.run("dmesg -c", verbose=False).stdout_text guest_dmesg = session.cmd_output("dmesg") if "Unrecovered" in host_dmesg: test.fail("Unrecovered host hmi\n%s", host_dmesg) else: logging.debug("Host dmesg: %s", host_dmesg) logging.debug("Guest dmesg: %s", guest_dmesg) if "save" in condition: set_condn("save") if "suspend" in condition: set_condn("suspend") finally: if "stress" in condition: utils_test.unload_stress("stress_in_vms", params=params, vms=[vm]) if session: session.close() org_xml.sync() cpu.set_cpuidle_state(setstate=cpu_idle_state)
def setUp(self): ''' Build VA Test ''' # Check for basic utilities smm = SoftwareManager() self.scenario_arg = int(self.params.get('scenario_arg', default=1)) self.n_chunks = nr_pages = self.n_chunks2 = self.def_chunks = 0 self.hsizes = [1024, 2] page_chunker = memory.meminfo.Hugepagesize.m if distro.detect().arch in ['ppc64', 'ppc64le']: if 'power8' in cpu.get_cpu_arch(): self.hsizes = [16384, 16] if self.scenario_arg not in range(1, 13): self.cancel("Test need to skip as scenario will be 1-12") elif self.scenario_arg in [7, 8, 9]: self.log.info("Using alternate hugepages") if memory.meminfo.Hugepagesize.m == self.hsizes[0]: page_chunker = self.hsizes[1] else: page_chunker = self.hsizes[0] self.exist_pages = memory.get_num_huge_pages() if self.scenario_arg in [10, 11, 12]: self.log.info("Using Multiple hugepages") if memory.meminfo.Hugepagesize.m != self.hsizes[0]: self.hsizes.reverse() # Leaving half size for default pagesize total_mem = (0.9 * memory.meminfo.MemFree.m) / 2 self.def_chunks = int(total_mem / 16384) for hp_size in self.hsizes: nr_pgs = int((total_mem / 2) / hp_size) genio.write_file( '/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % str(hp_size * 1024), str(nr_pgs)) n_pages = genio.read_file( '/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % str(self.hsizes[0] * 1024)).rstrip("\n") n_pages2 = genio.read_file( '/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % str(self.hsizes[1] * 1024)).rstrip("\n") self.n_chunks = (int(n_pages) * self.hsizes[0]) / 16384 self.n_chunks2 = (int(n_pages2) * self.hsizes[1]) / 16384 if self.scenario_arg not in [1, 2, 10, 11, 12]: max_hpages = int((0.9 * memory.meminfo.MemFree.m) / page_chunker) if self.scenario_arg in [3, 4, 5, 6]: memory.set_num_huge_pages(max_hpages) nr_pages = memory.get_num_huge_pages() else: genio.write_file( '/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % str(page_chunker * 1024), str(max_hpages)) nr_pages = genio.read_file( '/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepage' 's' % str(page_chunker * 1024)).rstrip("\n") self.n_chunks = (int(nr_pages) * page_chunker) / 16384 for packages in ['gcc', 'make']: if not smm.check_installed(packages) and not smm.install(packages): self.cancel('%s is needed for the test to be run' % packages) shutil.copyfile(os.path.join(self.datadir, 'va_test.c'), os.path.join(self.teststmpdir, 'va_test.c')) shutil.copyfile(os.path.join(self.datadir, 'Makefile'), os.path.join(self.teststmpdir, 'Makefile')) build.make(self.teststmpdir)
def run(test, params, env): """ Different cpu compat mode scenario tests :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def check_feature(vm, feature="", vcpu=0): """ Checks the given feature is present :param vm: VM Name :param feature: feature to be verified :param vcpu: vcpu number to pin guest test :return: true on success, test fail on failure """ session = vm.wait_for_login() if 'power8' in feature: cmd = 'lscpu|grep -i "Model name:.*power8"' elif 'xive' in feature: # remove -v once guest xive support is available # right now power9 guest supports only xics cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible" elif 'xics' in feature: cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible" elif 'power9' in feature: cmd = 'lscpu|grep -i "Model name:.*power9"' elif 'hpt' in feature: cmd = 'grep "MMU.*: Hash" /proc/cpuinfo' elif 'rpt' in feature: cmd = 'grep "MMU.*: Radix" /proc/cpuinfo' elif 'isa' in feature: utils_package.package_install('gcc', session) cmd = "echo 'int main(){asm volatile (\".long 0x7c0005e6\");" cmd += "return 0;}' > ~/a.c;cc ~/a.c;taskset -c %s ./a.out" % vcpu status, output = session.cmd_status_output(cmd) logging.debug(output) session.close() if feature != "isa2.7": if status != 0: test.fail("Feature: %s check failed inside " "%s guest on %s host" % (feature, guest_version, host_version)) else: if status == 0: test.fail("isa3.0 instruction succeeds in " "%s guest on %s host" % (guest_version, host_version)) return True vm_name = params.get("main_vm") vm = env.get_vm(vm_name) pin_vcpu = 0 host_version = params.get("host_version") guest_version = params.get("guest_version") max_vcpu = params.get("cpucompat_vcpu_max", "") cur_vcpu = int(params.get("cpucompat_vcpu_cur", "1")) cores = int(params.get("topology_cores", '1')) sockets = int(params.get("topology_sockets", '1')) threads = int(params.get("topology_threads", '1')) status_error = "yes" == params.get("status_error", "no") condn = params.get("condn", "") guest_features = params.get("guest_features", "") if guest_features: guest_features = guest_features.split(',') if guest_version: guest_features.append(guest_version) if host_version not in cpu.get_cpu_arch(): test.cancel("Unsupported Host cpu version") vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) org_xml = vmxml.copy() # Destroy the vm vm.destroy() try: # Set cpu model if max_vcpu: pin_vcpu = int(max_vcpu) - 1 libvirt_xml.VMXML.set_vm_vcpus(vm_name, int(max_vcpu), cur_vcpu, sockets=sockets, cores=cores, threads=threads, add_topology=True) libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version) logging.debug(virsh.dumpxml(vm_name)) try: vm.start() except virt_vm.VMStartError as detail: if not status_error: test.fail("%s" % detail) else: pass if max_vcpu: virsh.setvcpus(vm_name, int(max_vcpu), "--live", ignore_status=False, debug=True) if not cpu.check_if_vm_vcpu_match(int(max_vcpu), vm): test.fail("Vcpu hotplug failed") if not status_error: for feature in guest_features: check_feature(vm, feature, vcpu=pin_vcpu) if condn == "filetrans": utils_test.run_file_transfer(test, params, env) elif condn == "stress": bt = utils_test.run_avocado_bg(vm, params, test) if not bt: test.cancel("guest stress failed to start") elif condn == "save": save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) # Just sleep few secs before guest recovery time.sleep(2) if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) os.remove(save_file) elif condn == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) # Just sleep few secs before guest recovery time.sleep(2) result = virsh.resume(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result) else: pass finally: org_xml.sync()
def set_condition(vm_name, condn, reset=False, guestbt=None): """ Set domain to given state or reset it. """ bt = None if not reset: if condn == "avocadotest": bt = utils_test.run_avocado_bg(vm, params, test) if not bt: test.cancel("guest stress failed to start") # Allow stress to start time.sleep(condn_sleep_sec) return bt elif condn == "stress": utils_test.load_stress("stress_in_vms", params=params, vms=[vm]) elif condn in ["save", "managedsave"]: # No action pass elif condn == "suspend": result = virsh.suspend(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "hotplug": result = virsh.setvcpus(vm_name, max_vcpu, "--live", ignore_status=True, debug=True) libvirt.check_exit_status(result) exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu, 'cur_config': current_vcpu, 'cur_live': max_vcpu, 'guest_live': max_vcpu} result = utils_hotplug.check_vcpu_value(vm, exp_vcpu, option="--live") elif condn == "host_smt": if cpu.get_cpu_arch() == 'power9': result = process.run("ppc64_cpu --smt=4", shell=True) else: test.cancel("Host SMT changes not allowed during guest live") else: logging.debug("No operation for the domain") else: if condn == "save": save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save") result = virsh.save(vm_name, save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) time.sleep(condn_sleep_sec) if os.path.exists(save_file): result = virsh.restore(save_file, ignore_status=True, debug=True) libvirt.check_exit_status(result) os.remove(save_file) else: test.error("No save file for domain restore") elif condn == "managedsave": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) time.sleep(condn_sleep_sec) result = virsh.start(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "suspend": result = virsh.resume(vm_name, ignore_status=True, debug=True) libvirt.check_exit_status(result) elif condn == "avocadotest": guestbt.join(ignore_status=True) elif condn == "stress": utils_test.unload_stress("stress_in_vms", params=params, vms=[vm]) elif condn == "hotplug": result = virsh.setvcpus(vm_name, current_vcpu, "--live", ignore_status=True, debug=True) libvirt.check_exit_status(result) exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu, 'cur_config': current_vcpu, 'cur_live': current_vcpu, 'guest_live': current_vcpu} result = utils_hotplug.check_vcpu_value(vm, exp_vcpu, option="--live") elif condn == "host_smt": result = process.run("ppc64_cpu --smt=2", shell=True) # Change back the host smt result = process.run("ppc64_cpu --smt=4", shell=True) # Work around due to known cgroup issue after cpu hot(un)plug # sequence root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset") machine_cpuset_paths = [] if os.path.isdir(os.path.join(root_cpuset_path, "machine.slice")): machine_cpuset_paths.append(os.path.join(root_cpuset_path, "machine.slice")) if os.path.isdir(os.path.join(root_cpuset_path, "machine")): machine_cpuset_paths.append(os.path.join(root_cpuset_path, "machine")) if not machine_cpuset_paths: logging.warning("cgroup cpuset might not recover properly " "for guests after host smt changes, " "restore it manually") root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus") for path in machine_cpuset_paths: machine_cpuset_cpus = os.path.join(path, "cpuset.cpus") # check if file content differs cmd = "diff %s %s" % (root_cpuset_cpus, machine_cpuset_cpus) if process.system(cmd, verbose=True, ignore_status=True): cmd = "cp %s %s" % (root_cpuset_cpus, machine_cpuset_cpus) process.system(cmd, verbose=True) else: logging.debug("No need recover the domain") return bt
def run(test, params, env): """ Domain CPU management testing. 1. Prepare a domain for testing, install qemu-guest-ga if needed. 2. Plug vcpu for the domain. 3. Checking: 3.1. Virsh vcpucount. 3.2. Virsh vcpuinfo. 3.3. Current vcpu number in domain xml. 3.4. Virsh vcpupin and vcpupin in domain xml. 3.5. The vcpu number in domain. 3.6. Virsh cpu-stats. 4. Repeat step 3 to check again. 5. Control domain(save, managedsave, s3, s4, migrate, etc.). 6. Repeat step 3 to check again. 7. Recover domain(restore, wakeup, etc.). 8. Repeat step 3 to check again. 9. Unplug vcpu for the domain. 10. Repeat step 3 to check again. 11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip save/managedsave/migrate related actions). 12. Repeat step 3 to check again. 13. Repeat step 7 to recover domain. 14. Repeat step 3 to check again. 15. Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vm_operation = params.get("vm_operation", "null") vcpu_max_num = params.get("vcpu_max_num") vcpu_current_num = params.get("vcpu_current_num") vcpu_plug = "yes" == params.get("vcpu_plug", "no") vcpu_plug_num = params.get("vcpu_plug_num") vcpu_unplug = "yes" == params.get("vcpu_unplug", "no") vcpu_unplug_num = params.get("vcpu_unplug_num") setvcpu_option = params.get("setvcpu_option", "") agent_channel = "yes" == params.get("agent_channel", "yes") install_qemuga = "yes" == params.get("install_qemuga", "no") start_qemuga = "yes" == params.get("start_qemuga", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no") status_error = "yes" == params.get("status_error", "no") pin_before_plug = "yes" == params.get("pin_before_plug", "no") pin_after_plug = "yes" == params.get("pin_after_plug", "no") pin_before_unplug = "yes" == params.get("pin_before_unplug", "no") pin_after_unplug = "yes" == params.get("pin_after_unplug", "no") pin_vcpu = params.get("pin_vcpu") pin_cpu_list = params.get("pin_cpu_list", "x") check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no") # Init expect vcpu count values expect_vcpu_num = [vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num, vcpu_current_num] if check_after_plug_fail: expect_vcpu_num_bk = list(expect_vcpu_num) # Init expect vcpu pin values expect_vcpupin = {} # Init cpu-list for vcpupin host_cpu_count = utils.count_cpus() if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"): raise error.TestNAError("We need more cpus on host in this case for" " the cpu-list=%s. But current number of cpu" " on host is %s." % (pin_cpu_list, host_cpu_count)) cpus_list = utils.cpu_online_map() logging.info("Active cpus in host are %s", cpus_list) cpu_seq_str = "" for i in range(len(cpus_list) - 1): if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]): cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1]) break if pin_cpu_list == "x": pin_cpu_list = cpus_list[-1] if pin_cpu_list == "x-y": if cpu_seq_str: pin_cpu_list = cpu_seq_str else: pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0]) elif pin_cpu_list == "x,y": pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1]) elif pin_cpu_list == "x-y,^z": if cpu_seq_str: pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1] else: pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1], cpus_list[0]) else: # Just use the value get from cfg pass need_mkswap = False # Back up domain XML vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() try: # Customize domain vcpu number if vm.is_alive(): vm.destroy() if agent_channel: vmxml.set_agent_channel() else: vmxml.remove_agent_channels() vmxml.sync() vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num)) # Do not apply S3/S4 on power if 'power' not in cpu_util.get_cpu_arch(): vmxml.set_pm_suspend(vm_name, "yes", "yes") vm.start() # Create swap partition/file if nessesary if vm_operation == "s4": need_mkswap = not vm.has_swap() if need_mkswap: logging.debug("Creating swap partition") vm.create_swap_partition() # Prepare qemu guest agent if install_qemuga: vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga) vm.setenforce(0) else: # Remove qemu-guest-agent for negative test vm.remove_package('qemu-guest-agent') # Run test check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin) # plug vcpu if vcpu_plug: # Pin vcpu if pin_before_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} result = virsh.setvcpus(vm_name, vcpu_plug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) check_setvcpus_result(result, status_error) if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_plug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num if not status_error: if not online_new_vcpu(vm, vcpu_plug_num): raise error.TestFail("Fail to enable new added cpu") # Pin vcpu if pin_after_plug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if status_error and check_after_plug_fail: check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option) if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_plug_num expect_vcpu_num[4] = vcpu_plug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_plug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Unplug vcpu # Since QEMU 2.2.0, by default all current vcpus are non-hotpluggable # when VM started , and it required that vcpu 0(id=1) is always # present and non-hotpluggable, which means we can't hotunplug these # vcpus directly. So we can either hotplug more vcpus before we do # hotunplug, or modify the 'hotpluggable' attribute to 'yes' of the # vcpus except vcpu 0, to make sure libvirt can find appropriate # hotpluggable vcpus to reach the desired target vcpu count. For # simple prepare step, here we choose to hotplug more vcpus. if vcpu_unplug: if setvcpu_option == "--live": logging.info("Hotplug vcpu to the maximum count to make sure" " all these new plugged vcpus are hotunpluggable") result = virsh.setvcpus(vm_name, vcpu_max_num, '--live', debug=True) libvirt.check_exit_status(result) # Pin vcpu if pin_before_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) # As the vcpu will unplug later, so set expect_vcpupin to empty expect_vcpupin = {} result = virsh.setvcpus(vm_name, vcpu_unplug_num, setvcpu_option, readonly=setvcpu_readonly, ignore_status=True, debug=True) try: check_setvcpus_result(result, status_error) except error.TestNAError: raise error.TestWarn("Skip unplug vcpu as it is not supported") if setvcpu_option == "--config": expect_vcpu_num[2] = vcpu_unplug_num elif setvcpu_option == "--guest": # vcpuset '--guest' only affect vcpu number in guest expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num # Pin vcpu if pin_after_unplug: result = virsh.vcpupin(vm_name, pin_vcpu, pin_cpu_list, ignore_status=True, debug=True) libvirt.check_exit_status(result) expect_vcpupin = {pin_vcpu: pin_cpu_list} if not status_error: if restart_libvirtd: utils_libvirtd.libvirtd_restart() # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Control domain manipulate_domain(vm_name, vm_operation) if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option) # Recover domain manipulate_domain(vm_name, vm_operation, recover=True) # Resume domain from S4 status may takes long time(QEMU bug), # here we wait for 10 mins then skip the remaining part of # tests if domain not resume successfully try: vm.wait_for_login(timeout=600) except Exception, e: raise error.TestWarn("Skip remaining test steps as domain" " not resume in 10 mins: %s" % e) # For hotplug/unplug vcpu without '--config flag, after # suspend domain to disk(shut off) and re-start it, the # current live vcpu number will recover to orinial value if vm_operation == 's4': if setvcpu_option.count("--config"): expect_vcpu_num[3] = vcpu_unplug_num expect_vcpu_num[4] = vcpu_unplug_num elif setvcpu_option.count("--guest"): expect_vcpu_num[4] = vcpu_unplug_num else: expect_vcpu_num[3] = vcpu_current_num expect_vcpu_num[4] = vcpu_current_num if vm_operation != "null": # Check vcpu number and related commands check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin, setvcpu_option)
# # See LICENSE for more details. # # Copyright: 2019 IBM # Author: Nageswara R Sastry <*****@*****.**> import os import platform import shutil from avocado import Test from avocado import main from avocado import skipUnless from avocado.utils import cpu, distro, process, genio from avocado.utils.software_manager import SoftwareManager CPU_ARCH = cpu.get_cpu_arch().lower() IS_POWER9 = 'power9' in CPU_ARCH IS_POWER8 = 'power8' in CPU_ARCH class PerfRawevents(Test): """ Tests raw events on Power8 and Power9 along with named events """ # Initializing fail command list fail_cmd = list() def copy_files(self, filename): shutil.copyfile(self.get_data(filename),