def setUp(self): ''' Install the basic packages to support perf ''' # Check for basic utilities smm = SoftwareManager() detected_distro = distro.detect() self.distro_name = detected_distro.name self.cpu_family = cpu.get_family()[5:] if detected_distro.arch != 'ppc64le': self.cancel('This test is not supported on %s architecture' % detected_distro.arch) deps = ['gcc', 'make'] if self.distro_name in ['Ubuntu']: deps.extend(['linux-tools-common', 'linux-tools-%s' % platform.uname()[2]]) elif self.distro_name in ['debian']: deps.extend(['linux-tools-%s' % platform.uname()[2][3]]) elif self.distro_name in ['rhel', 'SuSE', 'fedora', 'centos']: deps.extend(['perf']) else: self.cancel("Install the package for perf supported \ by %s" % detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) for filename in ['name_events_p8', 'raw_codes_p8', 'name_events_p9', 'raw_codes_p9', 'name_events_p10', 'raw_codes_p10']: self.copy_files(filename) os.chdir(self.teststmpdir) # Clear the dmesg to capture the delta at the end of the test. process.run("dmesg -C")
def setUp(self): smm = SoftwareManager() detected_distro = distro.detect() packages = ['make', 'gcc'] if detected_distro.name in ["Ubuntu", 'debian']: packages.append("gfortran") elif detected_distro.name == "SuSE": packages.extend(["gcc-fortran", "libgfortran4"]) else: packages.append("gcc-gfortran") for package in packages: if not smm.check_installed(package) and not smm.install(package): self.cancel(' %s is needed for the test to be run' % package) url = "https://github.com/xianyi/OpenBLAS/archive/develop.zip" tarball = self.fetch_asset("OpenBLAS-develop.zip", locations=[url], expire='7d') archive.extract(tarball, self.workdir) openblas_dir = os.path.join(self.workdir, "OpenBLAS-develop") openblas_bin_dir = os.path.join(openblas_dir, 'bin') os.mkdir(openblas_bin_dir) cmd = "gcc --version" output = process.system_output(cmd, shell=True).decode("utf-8") gcc_ver = re.search(r'\d+', output.splitlines()[0]) proc_type = cpu.get_family() self.target = "" if int(gcc_ver.group()) < 10 and 'power10' in proc_type: self.target = "TARGET=POWER9" build.make(openblas_dir, extra_args='FC=gfortran ' 'ISMAXKERNEL=imax.S ISMINKERNEL=imin.S TARGET=POWER9') else: build.make(openblas_dir, extra_args='FC=gfortran ' 'ISMAXKERNEL=imax.S ISMINKERNEL=imin.S') build.make(openblas_dir, extra_args='PREFIX=%s install' % openblas_bin_dir) self.test_dir = os.path.join(openblas_dir, "test")
def test(self): """ Validate the number of cpu idle states against device tree """ for var in range(1, 10): cpu_num = random.choice(cpu.cpu_online_list()) self.log.info("--------CPU: %s--------" % cpu_num) states = process.system_output("cpupower -c %s idle-info --silent" " | grep 'Number of idle states:' |" "awk '{print $5}'" % cpu_num, shell=True).decode("utf-8") cpu_idle_states = [] for i in range(1, int(states)): val = process.system_output("cat /sys/devices/system/cpu/" "cpu%s/cpuidle/state%s/" "name" % (cpu_num, i)).decode("utf-8") if 'power8' in cpu.get_family(): val = self.set_idle_states(val) cpu_idle_states.append(val) devicetree_list = self.read_from_device_tree() res = self.cmp(cpu_idle_states, devicetree_list) if res == 0: self.log.info("PASS : Validated the idle states") else: self.log.info(" cpupower tool : %s and device tree" ": %s" % (cpu_idle_states, devicetree_list)) self.fail("FAIL: Please check the idle states")
def setUp(self): """ Install necessary packages to build the linux module """ if 'power' not in cpu.get_family(): self.cancel('Test Only supported on Power') pkgs = ['gcc', 'make', 'kernel-devel'] smm = SoftwareManager() for package in pkgs: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) tarball = self.fetch_asset("ipistorm.zip", locations=[ "https://github.com/antonblanchard/ipistorm" "/archive/master.zip"], expire='7d') archive.extract(tarball, self.teststmpdir) teststmpdir = os.path.join(self.teststmpdir, "ipistorm-master") os.chdir(teststmpdir) kernel_version = platform.uname()[2] if not os.path.exists(os.path.join("/lib/modules", kernel_version)): self.cancel( "Modules of running kernel missing to build ipistorm module") build.make(teststmpdir) if not os.path.isfile(os.path.join(teststmpdir, 'ipistorm.ko')): self.fail("No ipistorm.ko found, module build failed") int_op = genio.read_file("/proc/interrupts") if "XIVE" not in int_op: self.cancel("Test is supported only with XIVE")
def setUp(self): """ Setup checks : 0. Processor should be ppc64. 1. Perf package 2. 24x7 is present 3. Performance measurement is enabled in lpar through BMC """ smm = SoftwareManager() detected_distro = distro.detect() processor = process.system_output("uname -m", ignore_status=True).decode("utf-8") if 'ppc' not in processor: if 'unknown' in processor and 'ppc' not in os.uname(): self.cancel("Processor is not ppc64") deps = ['gcc', 'make'] if 'Ubuntu' in detected_distro.name: deps.extend( ['linux-tools-common', 'linux-tools-%s' % platform.uname()[2]]) elif detected_distro.name in ['debian']: deps.extend(['linux-perf', 'perf-tools-unstable']) elif detected_distro.name in ['rhel', 'SuSE', 'fedora', 'centos']: deps.extend(['perf']) else: self.cancel("Install the package for perf supported by %s" % detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) self.cpu_family = cpu.get_family() self.perf_args = "perf stat -v -e" if self.cpu_family == 'power8': self.perf_stat = "%s hv_24x7/HPM_0THRD_NON_IDLE_CCYC" % self.perf_args if self.cpu_family == 'power9': self.perf_stat = "%s hv_24x7/CPM_TLBIE" % self.perf_args if self.cpu_family == 'power10': self.perf_stat = "%s hv_24x7/CPM_TLBIE_FIN" % self.perf_args self.event_sysfs = "/sys/bus/event_source/devices/hv_24x7" # Check if 24x7 is present if os.path.exists("%s" % self.event_sysfs): self.log.info('hv_24x7 present') else: self.cancel("%s doesn't exist.This feature is supported" " only on LPAR" % self.event_sysfs) # Performance measurement has to be enabled in lpar through BMC # Check if its enabled result_perf = process.run("%s,domain=2,core=1/ sleep 1" % self.perf_stat, ignore_status=True) if "operations is limited" in result_perf.stderr.decode("utf-8"): self.cancel("Please enable lpar to allow collecting" " the 24x7 counters info") if "You may not have permission to collect stats." in result_perf.stderr.decode( "utf-8"): self.cancel("Please enable lpar to allow collecting" " the 24x7 counters info")
def test_power9_get_family(self): with unittest.mock.patch('avocado.utils.cpu.get_arch', return_value='powerpc'): with unittest.mock.patch( 'builtins.open', return_value=self._get_data_mock('power9')): self.assertEqual(cpu.get_family(), "power9")
def test_intel_get_family(self): with unittest.mock.patch('avocado.utils.cpu.get_arch', return_value='x86_64'): with unittest.mock.patch('avocado.utils.cpu.get_vendor', return_value='intel'): with unittest.mock.patch( 'builtins.open', return_value=self._get_file_mock(b'broadwell')): self.assertEqual(cpu.get_family(), "broadwell")
def test_intel_get_family(self): with unittest.mock.patch("avocado.utils.cpu.get_arch", return_value="x86_64"): with unittest.mock.patch( "avocado.utils.cpu.get_vendor", return_value="intel" ): with unittest.mock.patch( "builtins.open", return_value=self._get_file_mock(b"broadwell") ): self.assertEqual(cpu.get_family(), "broadwell")
def test(self): cpu_family = cpu.get_family() if cpu_family == 'power8': self.run_event('raw_codes_p8', 'raw') self.error_check() self.run_event('name_events_p8', 'name') self.error_check() elif cpu_family == 'power9': self.run_event('raw_codes_p9', 'raw') self.error_check() else: self.cancel('This test is not supported on %s' % cpu_family)
def setUp(self): smg = SoftwareManager() self.cpu_family = cpu.get_family() self.dist = distro.detect() if self.dist.name in ['centos', 'fedora', 'rhel', 'SuSE']: pkgs = ['pcp', 'pcp-pmda-perfevent'] elif self.dist.name in ['Ubuntu', 'debian']: pkgs = ['pcp'] else: self.cancel("PCP is not supported on %s" % self.dist.name) for pkg in pkgs: if not smg.check_installed(pkg) and not smg.install(pkg): self.cancel("Package %s is missing/could not be installed" % pkg)
def setUp(self): ''' Install the basic packages to support perf ''' # Check for basic utilities smm = SoftwareManager() detected_distro = distro.detect() distro_name = detected_distro.name if detected_distro.arch != 'ppc64le': self.cancel('This test is not supported on %s architecture' % detected_distro.arch) if cpu.get_family() == 'power8': self.cancel('This test not applies to Power8') if 'PowerNV' not in genio.read_file('/proc/cpuinfo').rstrip( '\t\r\n\0'): self.cancel('This test applies only to PowerNV') deps = ['gcc', 'make'] if 'Ubuntu' in distro_name: deps.extend( ['linux-tools-common', 'linux-tools-%s' % platform.uname()[2]]) elif distro_name in ['rhel', 'SuSE', 'fedora', 'centos']: deps.extend(['perf']) else: self.cancel("Install the package for perf supported \ by %s" % detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) # Collect nest events self.list_of_nest_events = [] for line in process.get_command_output_matching('perf list', 'nest_'): line = line.split(' ')[2] if 'pm_nest' in line: continue self.list_of_nest_events.append(line) # Clear the dmesg, by that we can capture the delta at the end of the # test. process.run("dmesg -c", sudo=True)
def test_s390x_get_family(self): with unittest.mock.patch('avocado.utils.cpu.get_arch', return_value='s390'): with unittest.mock.patch('avocado.utils.cpu.get_version', return_value='8561'): self.assertEqual(cpu.get_family(), "z15")
def run(test, params, env): """ Test hpt resizing """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) status_error = 'yes' == params.get('status_error', 'no') error_msg = eval(params.get('error_msg', '[]')) hpt_attrs = eval(params.get('hpt_attrs', '{}')) hpt_order_path = params.get('hpt_order_path', '') cpu_attrs = eval(params.get('cpu_attrs', '{}')) numa_cell = eval(params.get('numa_cell', '{}')) hugepage = 'yes' == params.get('hugepage', 'no') maxpagesize = int(params.get('maxpagesize', 0)) check_hp = 'yes' == params.get('check_hp', 'no') qemu_check = params.get('qemu_check', '') skip_p8 = 'yes' == params.get('skip_p8', 'no') def set_hpt(vmxml, sync, **attrs): """ Set resizing value to vm xml :param vmxml: xml of vm to be manipulated :param sync: whether to sync vmxml after :param attrs: attrs to set to hpt xml """ if vmxml.xmltreefile.find('/features'): features_xml = vmxml.features else: features_xml = vm_xml.VMFeaturesXML() hpt_xml = vm_xml.VMFeaturesHptXML() for attr in attrs: setattr(hpt_xml, attr, attrs[attr]) features_xml.hpt = hpt_xml vmxml.features = features_xml logging.debug(vmxml) if sync: vmxml.sync() def set_cpu(vmxml, **attrs): """ Set cpu attrs for vmxml according to given attrs :param vmxml: xml of vm to be manipulated :param attrs: attrs to set to cpu xml """ if vmxml.xmltreefile.find('cpu'): cpu = vmxml.cpu else: cpu = vm_xml.VMCPUXML() if 'numa_cell' in attrs: cpu.xmltreefile.create_by_xpath('/numa') attrs['numa_cell'] = cpu.dicts_to_cells(attrs['numa_cell']) for key in attrs: setattr(cpu, key, attrs[key]) vmxml.cpu = cpu vmxml.sync() def set_memory(vmxml): """ Set memory attributes in vm xml """ vmxml.max_mem_rt = int(params.get('max_mem_rt', 30670848)) vmxml.max_mem_rt_slots = int(params.get('max_mem_rt_slots', 16)) vmxml.max_mem_rt_unit = params.get('max_mem_rt_unit', 'KiB') logging.debug(numa_cell) if numa_cell: # Remove cpu topology to avoid that it doesn't match vcpu count if vmxml.get_cpu_topology(): new_cpu = vmxml.cpu new_cpu.del_topology() vmxml.cpu = new_cpu vmxml.vcpu = max([int(cell['cpus'][-1]) for cell in numa_cell]) + 1 vmxml.sync() def check_hpt_order(session, resizing=''): """ Return htp order in hpt_order file by default If 'resizing' is disabled, test updating htp_order """ if not hpt_order_path: test.cancel('No hpt order path provided.') hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) if resizing == 'disabled': cmd_result = session.cmd_status_output( 'echo %d > %s' % (hpt_order + 1, hpt_order_path)) result = process.CmdResult(stderr=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result, True) libvirt.check_result(result, error_msg) return hpt_order def check_hp_in_vm(session, page_size): """ Check if hugepage size is correct inside vm :param session: the session of the running vm :param page_size: the expected pagesize to be checked inside vm """ expect = False if int(page_size) == 65536 else True meminfo = session.cmd_output('cat /proc/meminfo|grep Huge') logging.info('meminfo: \n%s', meminfo) pattern = 'Hugepagesize:\s+%d\s+kB' % int(page_size / 1024) logging.info('"%s" should %s be found in meminfo output', pattern, '' if expect else 'not') result = expect == bool(re.search(pattern, meminfo)) if not result: test.fail('meminfo output not meet expectation') # Check PAGE_SIZE in another way if not expect: conf_page_size = session.cmd_output('getconf PAGE_SIZE') logging.debug('Output of "getconf PAGE_SIZE": %s', conf_page_size) if int(conf_page_size) != int(page_size): test.fail( 'PAGE_SIZE not correct, should be %r, actually is %r' % (page_size, conf_page_size)) bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: arch = platform.machine() vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) resizing = hpt_attrs.get('resizing') # Test on ppc64le hosts if arch.lower() == 'ppc64le': cpu_arch = cpu.get_family() if hasattr( cpu, 'get_family') else cpu.get_cpu_arch() logging.debug('cpu_arch is: %s', cpu_arch) if skip_p8 and cpu_arch == 'power8': test.cancel('This case is not for POWER8') if maxpagesize and not utils_misc.compare_qemu_version(3, 1, 0): test.cancel('Qemu version is too low, ' 'does not support maxpagesize setting') if maxpagesize == 16384 and cpu_arch == 'power9': test.cancel('Power9 does not support 16M pagesize.') set_hpt(vmxml, True, **hpt_attrs) if cpu_attrs or numa_cell: if numa_cell: cpu_attrs['numa_cell'] = numa_cell set_cpu(vmxml, **cpu_attrs) if hugepage: vm_mem = vmxml.max_mem host_hp_size = utils_memory.get_huge_page_size() # Make 100m extra memory just to be safe hp_count = max((vm_mem + 102400) // host_hp_size, 1200) vm_xml.VMXML.set_memoryBacking_tag(vm_name, hpgs=True) # Set up hugepage env mnt_source, hp_path, fstype = 'hugetlbfs', '/dev/hugepages', 'hugetlbfs' if not os.path.isdir(hp_path): process.run('mkdir %s' % hp_path, verbose=True) utils_memory.set_num_huge_pages(hp_count) if utils_misc.is_mounted(mnt_source, hp_path, fstype, verbose=True): utils_misc.umount(mnt_source, hp_path, fstype, verbose=True) utils_misc.mount(mnt_source, hp_path, fstype, verbose=True) # Restart libvirtd service to make sure mounted hugepage # be recognized utils_libvirtd.libvirtd_restart() if resizing == 'enabled': set_memory(vmxml) logging.debug('vmxml: \n%s', vmxml) # Start vm and check if start succeeds result = virsh.start(vm_name, debug=True) libvirt.check_exit_status(result, expect_error=status_error) # if vm is not supposed to start, terminate test if status_error: libvirt.check_result(result, error_msg) return libvirt.check_qemu_cmd_line(qemu_check) session = vm.wait_for_login() hpt_order = check_hpt_order(session, resizing) # Check hugepage inside vm if check_hp: check_hp_in_vm(session, maxpagesize * 1024) if resizing == 'enabled': mem_xml = utils_hotplug.create_mem_xml( tg_size=int(params.get('mem_size', 2048000)), tg_sizeunit=params.get('size_unit', 'KiB'), tg_node=int(params.get('mem_node', 0)), mem_model=params.get('mem_model', 'dimm')) logging.debug(mem_xml) # Attach memory device to the guest for 12 times # that will reach the maximum memory limitation for i in range(12): virsh.attach_device(vm_name, mem_xml.xml, debug=True, ignore_status=False) xml_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(xml_after_attach) # Check dumpxml of the guest, # check if each device has its alias for i in range(12): pattern = "alias\s+name=[\'\"]dimm%d[\'\"]" % i logging.debug('Searching for %s', pattern) if not re.search(pattern, str( xml_after_attach.xmltreefile)): test.fail('Missing memory alias: %s' % pattern) # Test on non-ppc64le hosts else: set_hpt(vmxml, sync=False, **hpt_attrs) result = virsh.define(vmxml.xml) libvirt.check_exit_status(result, status_error) libvirt.check_result(result, error_msg) finally: bk_xml.sync() if hugepage: utils_misc.umount('hugetlbfs', '/dev/hugepages', 'hugetlbfs') utils_memory.set_num_huge_pages(0)
def setUp(self): """ Setup checks : 0. Processor should be ppc64. 1. Perf package 2. 24x7 is not supported on guest 3. 24x7 is present 4. Performance measurement is enabled in LPAR through BMC """ smm = SoftwareManager() detected_distro = distro.detect() if 'ppc64' not in detected_distro.arch: self.cancel("Processor is not PowerPC") deps = ['gcc', 'make'] if 'Ubuntu' in detected_distro.name: deps.extend( ['linux-tools-common', 'linux-tools-%s' % platform.uname()[2]]) elif detected_distro.name in ['rhel', 'SuSE', 'fedora', 'centos']: deps.extend(['perf', 'numactl']) else: self.cancel("Install the package for perf supported by %s" % detected_distro.name) for package in deps: if not smm.check_installed(package) and not smm.install(package): self.cancel('%s is needed for the test to be run' % package) cpu_family = cpu.get_family() perf_args = "perf stat -v -e" if cpu_family == 'power8': perf_stat = "%s hv_24x7/HPM_0THRD_NON_IDLE_CCYC" % perf_args elif cpu_family == 'power9': perf_stat = "%s hv_24x7/CPM_TLBIE" % perf_args elif cpu_family == 'power10': perf_stat = "%s hv_24x7/CPM_TLBIE_FIN" % perf_args event_sysfs = "/sys/bus/event_source/devices/hv_24x7" # Check if this is a guest # 24x7 is not suported on guest if "emulated by" in cpu._get_info(): self.cancel("This test is not supported on guest") # Check if 24x7 is present if os.path.exists(event_sysfs): self.log.info('hv_24x7 present') else: self.cancel("%s doesn't exist.This test is supported" " only on PowerVM" % event_sysfs) # Performance measurement has to be enabled in lpar through BMC # Check if its enabled result_perf = process.run("%s,domain=2,core=1/ sleep 1" % perf_stat, ignore_status=True) if "operations is limited" in result_perf.stderr.decode("utf-8"): self.cancel("Please enable LPAR to allow collecting" " the 24x7 counters info") # Getting the number of cores output = process.run("lscpu") for line in output.stdout.decode("utf-8").split('\n'): if 'Core(s) per socket:' in line: self.cores = int(line.split(':')[1].strip()) # Getting the number of chips available in the machine self.chip = memory.numa_nodes() # Collect all hv_24x7 events self.list_of_hv_24x7_events = [] for lne in process.get_command_output_matching('perf list', 'hv_24x7'): lne = lne.split(',')[0].split('/')[1] self.list_of_hv_24x7_events.append(lne) # Clear the dmesg to capture the delta at the end of the test. process.run("dmesg -C", sudo=True)
def test_power9_get_family(self): with unittest.mock.patch("avocado.utils.cpu.get_arch", return_value="powerpc"): with unittest.mock.patch( "builtins.open", return_value=self._get_data_mock("power9") ): self.assertEqual(cpu.get_family(), "power9")
def test_s390x_get_family(self): with unittest.mock.patch("avocado.utils.cpu.get_arch", return_value="s390"): with unittest.mock.patch( "avocado.utils.cpu.get_version", return_value="8561" ): self.assertEqual(cpu.get_family(), "z15")
def create_host_os_cfg(options): def _forced_or_detected(forced, detected): if forced: return forced else: return detected host_os_cfg_path = data_dir.get_backend_cfg_path( get_opt(options, 'vt.type'), 'host.cfg') with open(host_os_cfg_path, 'w') as cfg: detected = distro.detect() name = host_os_get_distro_name(options, detected) version = _forced_or_detected( get_opt(options, 'vt_host_distro_version'), "m%s" % detected.version) release = _forced_or_detected( get_opt(options, 'vt_host_distro_release'), "u%s" % detected.release) arch = _forced_or_detected(get_opt(options, 'vt_host_distro_arch'), "Host_arch_%s" % detected.arch) vendor = cpu.get_vendor() if hasattr( cpu, 'get_vendor') else cpu.get_cpu_vendor_name() family = None if hasattr(cpu, 'get_family'): try: family = cpu.get_family() except Exception: pass cpu_version = cpu.get_version() if hasattr(cpu, 'get_version') else None # Replace special chars with _ to avoid bootstrap failure cpu_version = re.sub(r'[^\w-]', '_', cpu_version) if cpu_version else cpu_version cfg.write("variants:\n") cfg.write(" - @Host:\n") cfg.write(" variants:\n") cfg.write(" - @%s:\n" % name) cfg.write(" variants:\n") cfg.write(" - @%s:\n" % version) cfg.write(" variants:\n") cfg.write(" - @%s:\n" % release) cfg.write(" variants:\n") cfg.write(" - @%s:\n" % arch) if vendor: cfg.write("variants:\n") cfg.write(" - @HostCpuVendor:\n") cfg.write(" variants:\n") cfg.write(" - @%s:\n" % vendor) if family: cfg.write("variants:\n") cfg.write(" - @HostCpuFamily:\n") cfg.write(" variants:\n") cfg.write(" - @%s:\n" % family) if cpu_version: cfg.write(" variants:\n") cfg.write(" - @HostCpuVersion:\n") cfg.write(" variants:\n") cfg.write(" - @%s:\n" % cpu_version) count = [ get_opt(options, 'vt_host_distro_name'), get_opt(options, 'vt_host_distro_version'), get_opt(options, 'vt_host_distro_release'), get_opt(options, 'vt_host_distro_arch') ].count(None) if count == 4: source = "distro detection" elif count == 0: source = "command line parameters" else: source = "distro detection and command line parameters" LOG.debug("Config file %s generated from %s", host_os_cfg_path, source)
def run(test, params, env): """ Execute the libguestfs-test-tool unittest inside L1 guest. 1) Launch a guest and check if libguestfs-tools is installed. 2) Execute the libguestfs-test-tool directly launching qemu. 3) Analyze the result of libguestfs-test-tool. 4) Check the nested file exists. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ kvm_module = arch.get_kvm_module_list()[-1].replace('-', '_') is_kvm_mode = params["nested_flag"] == "nested_flag_on" nested_file = os.path.join("/sys/module/", kvm_module, "parameters/nested") unittest_timeout = params.get_numeric("unittest_timeout") cpu_vendor = cpu.get_vendor() cpu_arch = cpu.get_arch() if cpu_arch == "powerpc" and int(cpu.get_family().strip("power")) < 9: test.cancel("Nested feature requires a POWER9 CPU") elif cpu_arch == "x86_64": flag = "vmx" if cpu_vendor == "intel" else "svm" params["cpu_model_flags"] = params["cpu_model_flags"].format(flag) params["start_vm"] = "yes" vm = env.get_vm(params["main_vm"]) vm.create(params=params) vm.verify_alive() session = vm.wait_for_login() error_context.context("Check if libguestfs-tools is installed.", logging.info) sm = utils_package.RemotePackageMgr(session, "libguestfs-tools") if not (sm.is_installed("libguestfs-tools") or sm.install()): test.cancel("Unable to install libguestfs-tools inside guest.") try: error_context.context("Execute the libguestfs-test-tool unittest " "directly launching qemu.", logging.info) stderr_file = "/tmp/lgf_stderr" lgf_cmd = ("LIBGUESTFS_BACKEND=direct libguestfs-test-tool " "--timeout {} 2> {}".format(unittest_timeout, stderr_file)) lgf_s, lgf_o = session.cmd_status_output(lgf_cmd, timeout=unittest_timeout) logging.debug("libguestfs-test-tool stdout:\n%s", lgf_o) lgf_stderr = session.cmd_output("cat " + stderr_file) lgf_tcg = re.search("Back to tcg accelerator", lgf_stderr) error_context.context("Analyze the libguestfs-test-tool test result.", logging.info) fail_msg = ("the exit status is non-zero" if lgf_s else "back to tcg accelerator" if lgf_tcg and is_kvm_mode else "") if fail_msg: logging.debug("libguestfs-test-tool stderr:\n%s", lgf_stderr) test.fail("libguestfs-test-tool execution failed due to: %s. " % fail_msg) error_context.context("Check the nested file status.", logging.info) file_s, file_o = session.cmd_status_output("cat " + nested_file) if re.match(r"[1Y]", file_o) and is_kvm_mode: logging.info("Guest runs with nested flag, the nested feature has " "been enabled.") elif file_s == 1 and not is_kvm_mode: logging.info("Guest runs without nested flag, so the nested file " "does not exist.") else: logging.error("Nested file status: %s, output: %s", file_s, file_o) test.fail("Getting the status of nested file has unexpected " "result.") finally: session.cmd("rm -f " + stderr_file, ignore_all_errors=True) session.close()
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See LICENSE for more details. # # Copyright: 2019 IBM # Author: Shirisha <*****@*****.**> import os import platform from avocado import Test from avocado import skipUnless from avocado.utils import archive from avocado.utils import cpu, build, distro, process, genio from avocado.utils.software_manager.manager import SoftwareManager IS_POWER8 = 'power8' in cpu.get_family() class PerfWatchPoint(Test): @skipUnless(IS_POWER8, 'Supported only on Power8') def setUp(self): ''' Install the basic packages to support perf ''' # Check for basic utilities smm = SoftwareManager() detected_distro = distro.detect() self.distro_name = detected_distro.name if detected_distro.arch != 'ppc64le': self.cancel('This test is not supported on %s architecture'
def run(test, params, env): """ Test rbd disk device. 1.Prepare test environment,destroy or suspend a VM. 2.Prepare disk image. 3.Edit disks xml and start the domain. 4.Perform test operation. 5.Recover test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} # Global variable to store max/current memory, # it may change after attach/detach new_max_mem = None new_cur_mem = None def consume_vm_mem(size=1000, timeout=360): """ To consume guest memory, default size is 1000M """ session = vm.wait_for_login() # Mount tmpfs on /mnt and write to a file on it, # it is the memory operation sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs " "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M" " count={0}".format(size)) session.cmd(sh_cmd, timeout=timeout) session.close() def mount_hugepages(page_size): """ To mount hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ if page_size == 4: perm = "" else: perm = "pagesize=%dK" % page_size tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_status: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm) def setup_hugepages(page_size=2048, shp_num=2000): """ To setup hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc :param shp_num: number of hugepage, string type """ mount_hugepages(page_size) utils_memory.set_num_huge_pages(shp_num) config.hugetlbfs_mount = ["/dev/hugepages"] utils_libvirtd.libvirtd_restart() def restore_hugepages(page_size=4): """ To recover hugepages :param page_size: unit is kB, it can be 4,2048,1048576,etc """ mount_hugepages(page_size) config.restore() utils_libvirtd.libvirtd_restart() def check_qemu_cmd(max_mem_rt, tg_size): """ Check qemu command line options. :param max_mem_rt: size of max memory :param tg_size: Target hotplug memory size :return: None """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) if discard: if libvirt_version.version_compare(7, 3, 0): cmd = cmd + " | grep " + '\\"discard-data\\":true' else: cmd += " | grep 'discard-data=yes'" elif max_mem_rt: cmd += (" | grep 'slots=%s,maxmem=%sk'" % (max_mem_slots, max_mem_rt)) if tg_size: size = int(tg_size) * 1024 if huge_pages or discard or cold_plug_discard: cmd_str = 'memdimm.\|memory-backend-file,id=ram-node.' cmd += ( " | grep 'memory-backend-file,id=%s' | grep 'size=%s" % (cmd_str, size)) else: cmd_str = 'mem.\|memory-backend-ram,id=ram-node.' cmd += ( " | grep 'memory-backend-ram,id=%s' | grep 'size=%s" % (cmd_str, size)) if pg_size: cmd += ",host-nodes=%s" % node_mask if numa_memnode: for node in numa_memnode: if ('nodeset' in node and node['nodeset'] in node_mask): cmd += ",policy=%s" % node['mode'] cmd += ".*pc-dimm,node=%s" % tg_node if mem_addr: cmd += (".*slot=%s" % (mem_addr['slot'])) cmd += "'" if cold_plug_discard: cmd += " | grep 'discard-data=yes'" # Run the command result = process.run(cmd, shell=True, verbose=True, ignore_status=True) if result.exit_status: test.fail('Qemu command check fail.') def check_guest_meminfo(old_mem, check_option): """ Check meminfo on guest. """ assert old_mem is not None session = vm.wait_for_login() # Hot-plugged memory should be online by udev rules udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules" udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",' ' ATTR{state}=="offline", ATTR{state}="online"') cmd = ("grep memory %s || echo '%s' >> %s" % (udev_file, udev_rules, udev_file)) session.cmd(cmd) # Wait a while for new memory to be detected. utils_misc.wait_for( lambda: vm.get_totalmem_sys(online) != int(old_mem), 30, first=20.0) new_mem = vm.get_totalmem_sys(online) session.close() logging.debug("Memtotal on guest: %s", new_mem) no_of_times = 1 if at_times: no_of_times = at_times if check_option == "attach": if new_mem != int(old_mem) + (int(tg_size) * no_of_times): test.fail("Total memory on guest couldn't changed after " "attach memory device") if check_option == "detach": if new_mem != int(old_mem) - (int(tg_size) * no_of_times): test.fail("Total memory on guest couldn't changed after " "detach memory device") def check_dom_xml(at_mem=False, dt_mem=False): """ Check domain xml options. """ # Global variable to store max/current memory global new_max_mem global new_cur_mem if attach_option.count("config"): dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) else: dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) try: xml_max_mem_rt = int(dom_xml.max_mem_rt) xml_max_mem = int(dom_xml.max_mem) xml_cur_mem = int(dom_xml.current_mem) assert int(max_mem_rt) == xml_max_mem_rt # Check attached/detached memory logging.info("at_mem=%s,dt_mem=%s", at_mem, dt_mem) logging.info("detach_device is %s", detach_device) if at_mem: if at_times: assert int(max_mem) + (int(tg_size) * at_times) == xml_max_mem else: assert int(max_mem) + int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory if at_times: assert int(cur_mem) + (int(tg_size) * at_times) == xml_cur_mem else: assert int(cur_mem) + int(tg_size) == xml_cur_mem new_max_mem = xml_max_mem new_cur_mem = xml_cur_mem mem_dev = dom_xml.get_devices("memory") memory_devices = 1 if at_times: memory_devices = at_times if len(mem_dev) != memory_devices: test.fail("Found wrong number of memory device") assert int(tg_size) == int(mem_dev[0].target.size) assert int(tg_node) == int(mem_dev[0].target.node) elif dt_mem: if at_times: assert int(new_max_mem) - (int(tg_size) * at_times) == xml_max_mem assert int(new_cur_mem) - (int(tg_size) * at_times) == xml_cur_mem else: assert int(new_max_mem) - int(tg_size) == xml_max_mem # Bug 1220702, skip the check for current memory assert int(new_cur_mem) - int(tg_size) == xml_cur_mem except AssertionError: utils_misc.log_last_traceback() test.fail("Found unmatched memory setting from domain xml") def check_mem_align(): """ Check if set memory align to 256 """ dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) dom_mem = {} dom_mem['maxMemory'] = int(dom_xml.max_mem_rt) dom_mem['memory'] = int(dom_xml.memory) dom_mem['currentMemory'] = int(dom_xml.current_mem) cpuxml = dom_xml.cpu numa_cell = cpuxml.numa_cell dom_mem['numacellMemory'] = int(numa_cell[0]['memory']) sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell]) attached_mem = dom_xml.get_devices(device_type='memory')[0] dom_mem['attached_mem'] = attached_mem.target.size all_align = True for key in dom_mem: logging.info('%-20s:%15d', key, dom_mem[key]) if dom_mem[key] % 262144: logging.error('%s not align to 256', key) if key == 'currentMemory': continue all_align = False if not all_align: test.fail('Memory not align to 256') if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']: logging.info( 'Check Pass: Memory is equal to (all numa memory + memory device)' ) else: test.fail( 'Memory is not equal to (all numa memory + memory device)') return dom_mem def check_save_restore(): """ Test save and restore operation """ save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name) ret = virsh.save(vm_name, save_file, **virsh_dargs) libvirt.check_exit_status(ret) def _wait_for_restore(): try: virsh.restore(save_file, debug=True, ignore_status=False) return True except Exception as e: logging.error(e) utils_misc.wait_for(_wait_for_restore, 30, step=5) if os.path.exists(save_file): os.remove(save_file) # Login to check vm status vm.wait_for_login().close() def add_device(dev_xml, attach, at_error=False): """ Add memory device by attachment or modify domain xml. """ if attach: ret = virsh.attach_device(vm_name, dev_xml.xml, flagstr=attach_option, debug=True) libvirt.check_exit_status(ret, at_error) else: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if numa_cells: del vmxml.max_mem del vmxml.current_mem vmxml.add_device(dev_xml) vmxml.sync() def modify_domain_xml(): """ Modify domain xml and define it. """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) mem_unit = params.get("mem_unit", "KiB") vcpu = params.get("vcpu", "4") if max_mem_rt: vmxml.max_mem_rt = int(max_mem_rt) vmxml.max_mem_rt_slots = max_mem_slots vmxml.max_mem_rt_unit = mem_unit if max_mem: vmxml.max_mem = int(max_mem) if cur_mem: vmxml.current_mem = int(cur_mem) if memory_val: vmxml.memory = int(memory_val) if vcpu: vmxml.vcpu = int(vcpu) vcpu_placement = params.get("vcpu_placement", "static") vmxml.placement = vcpu_placement if numa_memnode: vmxml.numa_memory = {} vmxml.numa_memnode = numa_memnode else: try: del vmxml.numa_memory del vmxml.numa_memnode except Exception: # Not exists pass if numa_cells: cells = [ast.literal_eval(x) for x in numa_cells] # Rounding the numa memory values if align_mem_values: for cell in range(cells.__len__()): memory_value = str( utils_numeric.align_value(cells[cell]["memory"], align_to_value)) cells[cell]["memory"] = memory_value cpu_xml = vm_xml.VMCPUXML() cpu_xml.xml = "<cpu mode='host-model'><numa/></cpu>" cpu_mode = params.get("cpu_mode") model_fallback = params.get("model_fallback") if cpu_mode: cpu_xml.mode = cpu_mode if model_fallback: cpu_xml.fallback = model_fallback cpu_xml.numa_cell = cpu_xml.dicts_to_cells(cells) vmxml.cpu = cpu_xml # Delete memory and currentMemory tag, # libvirt will fill it automatically del vmxml.max_mem del vmxml.current_mem # hugepages setting if huge_pages or discard or cold_plug_discard: membacking = vm_xml.VMMemBackingXML() membacking.discard = True membacking.source = '' membacking.source_type = 'file' if huge_pages: hugepages = vm_xml.VMHugepagesXML() pagexml_list = [] for i in range(len(huge_pages)): pagexml = hugepages.PageXML() pagexml.update(huge_pages[i]) pagexml_list.append(pagexml) hugepages.pages = pagexml_list membacking.hugepages = hugepages vmxml.mb = membacking logging.debug("vm xml: %s", vmxml) vmxml.sync() pre_vm_state = params.get("pre_vm_state", "running") attach_device = "yes" == params.get("attach_device", "no") detach_device = "yes" == params.get("detach_device", "no") detach_alias = "yes" == params.get("detach_alias", "no") detach_alias_options = params.get("detach_alias_options") attach_error = "yes" == params.get("attach_error", "no") start_error = "yes" == params.get("start_error", "no") define_error = "yes" == params.get("define_error", "no") detach_error = "yes" == params.get("detach_error", "no") maxmem_error = "yes" == params.get("maxmem_error", "no") attach_option = params.get("attach_option", "") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") wait_before_save_secs = int(params.get("wait_before_save_secs", 0)) test_managedsave = "yes" == params.get("test_managedsave", "no") test_save_restore = "yes" == params.get("test_save_restore", "no") test_mem_binding = "yes" == params.get("test_mem_binding", "no") restart_libvirtd = "yes" == params.get("restart_libvirtd", "no") add_mem_device = "yes" == params.get("add_mem_device", "no") test_dom_xml = "yes" == params.get("test_dom_xml", "no") max_mem = params.get("max_mem") max_mem_rt = params.get("max_mem_rt") max_mem_slots = params.get("max_mem_slots", "16") memory_val = params.get('memory_val', '') mem_align = 'yes' == params.get('mem_align', 'no') hot_plug = 'yes' == params.get('hot_plug', 'no') cur_mem = params.get("current_mem") numa_cells = params.get("numa_cells", "").split() set_max_mem = params.get("set_max_mem") align_mem_values = "yes" == params.get("align_mem_values", "no") align_to_value = int(params.get("align_to_value", "65536")) hot_reboot = "yes" == params.get("hot_reboot", "no") rand_reboot = "yes" == params.get("rand_reboot", "no") guest_known_unplug_errors = [] guest_known_unplug_errors.append(params.get("guest_known_unplug_errors")) host_known_unplug_errors = [] host_known_unplug_errors.append(params.get("host_known_unplug_errors")) discard = "yes" == params.get("discard", "no") cold_plug_discard = "yes" == params.get("cold_plug_discard", "no") if cold_plug_discard or discard: mem_discard = 'yes' else: mem_discard = 'no' # params for attached device mem_model = params.get("mem_model", "dimm") tg_size = params.get("tg_size") tg_sizeunit = params.get("tg_sizeunit", 'KiB') tg_node = params.get("tg_node", 0) pg_size = params.get("page_size") pg_unit = params.get("page_unit", "KiB") huge_page_num = int(params.get('huge_page_num', 2000)) node_mask = params.get("node_mask", "0") mem_addr = ast.literal_eval(params.get("memory_addr", "{}")) huge_pages = [ ast.literal_eval(x) for x in params.get("huge_pages", "").split() ] numa_memnode = [ ast.literal_eval(x) for x in params.get("numa_memnode", "").split() ] at_times = int(params.get("attach_times", 1)) online = params.get("mem_online", "no") config = utils_config.LibvirtQemuConfig() setup_hugepages_flag = params.get("setup_hugepages") if (setup_hugepages_flag == "yes"): cpu_arch = cpu_util.get_family() if hasattr(cpu_util, 'get_family')\ else cpu_util.get_cpu_arch() if cpu_arch == 'power8': pg_size = '16384' huge_page_num = 200 elif cpu_arch == 'power9': pg_size = '2048' huge_page_num = 2000 [x.update({'size': pg_size}) for x in huge_pages] setup_hugepages(int(pg_size), shp_num=huge_page_num) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not libvirt_version.version_compare(1, 2, 14): test.cancel("Memory hotplug not supported in current libvirt version.") if 'align_256m' in params.get('name', ''): arch = platform.machine() if arch.lower() != 'ppc64le': test.cancel('This case is for ppc64le only.') if align_mem_values: # Rounding the following values to 'align' max_mem = utils_numeric.align_value(max_mem, align_to_value) max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value) cur_mem = utils_numeric.align_value(cur_mem, align_to_value) tg_size = utils_numeric.align_value(tg_size, align_to_value) try: # Drop caches first for host has enough memory drop_caches() # Destroy domain first if vm.is_alive(): vm.destroy(gracefully=False) modify_domain_xml() numa_info = utils_misc.NumaInfo() logging.debug(numa_info.get_all_node_meminfo()) # Start the domain any way if attach memory device old_mem_total = None if attach_device: vm.start() session = vm.wait_for_login() old_mem_total = vm.get_totalmem_sys(online) logging.debug("Memtotal on guest: %s", old_mem_total) session.close() elif discard: vm.start() session = vm.wait_for_login() check_qemu_cmd(max_mem_rt, tg_size) dev_xml = None # To attach the memory device. if (add_mem_device and not hot_plug) or cold_plug_discard: at_times = int(params.get("attach_times", 1)) randvar = 0 if rand_reboot: rand_value = random.randint(15, 25) logging.debug("reboots at %s", rand_value) for x in xrange(at_times): # If any error excepted, command error status should be # checked in the last time device_alias = "ua-" + str(uuid.uuid4()) dev_xml = utils_hotplug.create_mem_xml( tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model, mem_discard, device_alias) randvar = randvar + 1 logging.debug("attaching device count = %s", x) if x == at_times - 1: add_device(dev_xml, attach_device, attach_error) else: add_device(dev_xml, attach_device) if hot_reboot: vm.reboot() vm.wait_for_login() if rand_reboot and randvar == rand_value: vm.reboot() vm.wait_for_login() randvar = 0 rand_value = random.randint(15, 25) logging.debug("reboots at %s", rand_value) # Check domain xml after attach device. if test_dom_xml: check_dom_xml(at_mem=attach_device) # Set domain state if pre_vm_state == "transient": logging.info("Creating %s...", vm_name) vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if vm.is_alive(): vm.destroy(gracefully=False) vm.undefine() if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status: vmxml_backup.define() test.fail("Can't create the domain") elif vm.is_dead(): try: vm.start() vm.wait_for_login().close() except virt_vm.VMStartError as detail: if start_error: pass else: except_msg = "memory hotplug isn't supported by this QEMU binary" if except_msg in detail.reason: test.cancel(detail) test.fail(detail) # Set memory operation if set_max_mem: max_mem_option = params.get("max_mem_option", "") ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option) libvirt.check_exit_status(ret, maxmem_error) # Hotplug memory device if add_mem_device and hot_plug: process.run('ps -ef|grep qemu', shell=True, verbose=True) session = vm.wait_for_login() original_mem = vm.get_totalmem_sys() dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model) add_device(dev_xml, True) mem_after = vm.get_totalmem_sys() params['delta'] = mem_after - original_mem # Check domain xml after start the domain. if test_dom_xml: check_dom_xml(at_mem=attach_device) if mem_align: dom_mem = check_mem_align() check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem']) if hot_plug and params['delta'] != dom_mem['attached_mem']: test.fail( 'Memory after attach not equal to original mem + attached mem' ) # Check qemu command line if test_qemu_cmd: check_qemu_cmd(max_mem_rt, tg_size) # Check guest meminfo after attachment if (attach_device and not attach_option.count("config") and not any([attach_error, start_error])): check_guest_meminfo(old_mem_total, check_option="attach") # Consuming memory on guest, # to verify memory changes by numastat if test_mem_binding: pid = vm.get_pid() old_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", old_numastat) # Increase the memory consumed to 1500 consume_vm_mem(1500) new_numastat = read_from_numastat(pid, "Total") logging.debug("Numastat: %s", new_numastat) # Only check total memory which is the last element if float(new_numastat[-1]) - float(old_numastat[-1]) < 0: test.fail("Numa memory can't be consumed on guest") # Run managedsave command to check domain xml. if test_managedsave: # Wait 10s for vm to be ready before managedsave time.sleep(wait_before_save_secs) ret = virsh.managedsave(vm_name, **virsh_dargs) libvirt.check_exit_status(ret) def _wait_for_vm_start(): try: vm.start() return True except Exception as e: logging.error(e) utils_misc.wait_for(_wait_for_vm_start, timeout=30, step=5) vm.wait_for_login().close() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Run save and restore command to check domain xml if test_save_restore: # Wait 10s for vm to be ready before save time.sleep(wait_before_save_secs) check_save_restore() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Check domain xml after restarting libvirtd if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if test_dom_xml: check_dom_xml(at_mem=attach_device) # Detach the memory device unplug_failed_with_known_error = False if detach_device: dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node, node_mask, mem_model, mem_discard) for x in xrange(at_times): if not detach_alias: ret = virsh.detach_device(vm_name, dev_xml.xml, flagstr=attach_option, debug=True) else: ret = virsh.detach_device_alias(vm_name, device_alias, detach_alias_options, debug=True) if ret.stderr and host_known_unplug_errors: for known_error in host_known_unplug_errors: if (known_error[0] == known_error[-1]) and \ known_error.startswith(("'")): known_error = known_error[1:-1] if known_error in ret.stderr: unplug_failed_with_known_error = True logging.debug( "Known error occurred in Host, while" " hot unplug: %s", known_error) if unplug_failed_with_known_error: break try: libvirt.check_exit_status(ret, detach_error) except Exception as detail: dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) try: session = vm.wait_for_login() utils_misc.verify_dmesg(dmesg_log_file=dmesg_file, ignore_result=True, session=session, level_check=5) except Exception: session.close() test.fail("After memory unplug Unable to connect to VM" " or unable to collect dmesg") session.close() if os.path.exists(dmesg_file): with open(dmesg_file, 'r') as f: flag = re.findall( r'memory memory\d+?: Offline failed', f.read()) if not flag: # The attached memory is used by vm, and it could # not be unplugged.The result is expected os.remove(dmesg_file) test.fail(detail) unplug_failed_with_known_error = True os.remove(dmesg_file) # Check whether a known error occurred or not dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir()) try: session = vm.wait_for_login() utils_misc.verify_dmesg(dmesg_log_file=dmesg_file, ignore_result=True, session=session, level_check=4) except Exception: session.close() test.fail("After memory unplug Unable to connect to VM" " or unable to collect dmesg") session.close() if guest_known_unplug_errors and os.path.exists(dmesg_file): for known_error in guest_known_unplug_errors: if (known_error[0] == known_error[-1]) and \ known_error.startswith(("'")): known_error = known_error[1:-1] with open(dmesg_file, 'r') as f: if known_error in f.read(): unplug_failed_with_known_error = True logging.debug( "Known error occurred, while hot" " unplug: %s", known_error) if test_dom_xml and not unplug_failed_with_known_error: check_dom_xml(dt_mem=detach_device) # Remove dmesg temp file if os.path.exists(dmesg_file): os.remove(dmesg_file) except xcepts.LibvirtXMLError: if define_error: pass finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snap in snapshot_lists: virsh.snapshot_delete(vm_name, snap, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") if (setup_hugepages_flag == "yes"): restore_hugepages() vmxml_backup.sync()
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See LICENSE for more details. # # Copyright: 2019 IBM # Author: Nageswara R Sastry <*****@*****.**> import os import platform import shutil from avocado import Test from avocado import skipUnless from avocado.utils import cpu, distro, process, genio from avocado.utils.software_manager import SoftwareManager CPU_FAMILY = cpu.get_family() IS_POWER9 = 'power9' in CPU_FAMILY IS_POWER8 = 'power8' in CPU_FAMILY class PerfRawevents(Test): """ Tests raw events on Power8 and Power9 along with named events :avocado: tags=perf,rawevents,events """ # Initializing fail command list fail_cmd = list() def copy_files(self, filename): shutil.copyfile(self.get_data(filename),