def test_boot_hpet_mmap_enabled(self): ''' bz: 1660796, 1764790 polarion_id: ''' utils_lib.run_cmd(self, r'sudo rm -rf /var/crash/*', expect_ret=0, msg='clean /var/crash firstly') utils_lib.is_arch(self, arch='x86', action='cancel') cmd = 'sudo grubby --update-kernel=ALL --args="hpet_mmap=1"' utils_lib.run_cmd(self, cmd, msg='Append hpet_mmap=1 to command line!', timeout=600) utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=800) utils_lib.run_cmd(self, 'cat /proc/cmdline', expect_kw='hpet_mmap=1') utils_lib.run_cmd(self, 'dmesg | grep -i hpet', expect_kw='enabled', expect_not_kw='6HPET') cmd = 'sudo cat /sys/devices/system/clocksource/clocksource0/available_clocksource' out = utils_lib.run_cmd(self, cmd) if 'hpet' in out: utils_lib.run_cmd(self, 'sudo cat /proc/iomem|grep -i hpet', expect_kw='HPET 0') utils_lib.check_log(self, "error,warn,fail,trace,Trace", rmt_redirect_stdout=True)
def _recover_memory(self, mem_gb_current): self.log.info("Recover VM memory") self.vm.update_memory_size(mem_gb_current) self.assertEqual(self.vm.get_memory_size(), mem_gb_current, "Test failed as recover VM memory failed") utils_lib.init_connection(self) self._verify_memory_size()
def test_boot_usbcore_quirks(self): ''' bz: 1809429 polarion_id: ''' utils_lib.run_cmd(self, r'sudo rm -rf /var/crash/*', expect_ret=0, msg='clean /var/crash firstly') option = 'usbcore.quirks=quirks=0781:5580:bk,0a5c:5834:gij' cmd = 'sudo grubby --update-kernel=ALL --args="{}"'.format(option) utils_lib.run_cmd(self, cmd, msg='Append {} to command line!'.format(option), timeout=600) utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=800) utils_lib.run_cmd(self, 'cat /proc/cmdline', expect_kw=option) cmd = r'sudo cat /var/crash/*/vmcore-dmesg.txt|tail -50' utils_lib.run_cmd(self, cmd, expect_kw='No such file or directory', msg='make sure there is no core generated') utils_lib.check_log(self, "error,warn,fail,trace,Trace", skip_words='ftrace', rmt_redirect_stdout=True)
def tearDown(self): if 'test_boot_debugkernel' in self.id(): cmd = "sudo grubby --set-default-index=%s" % self.old_grub_index utils_lib.run_cmd(self, cmd, expect_ret=0, msg="restore default boot index to {}".format( self.old_grub_index)) if 'test_boot_hpet_mmap_enabled' in self.id(): cmd = 'sudo grubby --update-kernel=ALL --remove-args="hpet_mmap=1"' utils_lib.run_cmd(self, cmd, msg='Remove "hpet_mmap=1"') if 'test_boot_mitigations' in self.id(): cmd = 'sudo grubby --update-kernel=ALL --remove-args="mitigations=auto,nosmt"' utils_lib.run_cmd(self, cmd, msg='Remove "mitigations=auto,nosmt"') if 'test_boot_usbcore_quirks' in self.id(): cmd = 'sudo grubby --update-kernel=ALL --remove-args="usbcore.quirks=quirks=0781:5580:bk,0a5c:5834:gij"' utils_lib.run_cmd( self, cmd, msg='Remove "usbcore.quirks=quirks=0781:5580:bk,0a5c:5834:gij"' ) if 'test_kdump_no_specify_cpu' not in self.id(): utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=800)
def test_kdump_no_specify_cpu(self): ''' bz: 1654962 polarion_id: RHEL7-58669 ''' for cmd in ['sudo kdumpctl showmem','cat /proc/cmdline','systemctl is-active kdump']: utils_lib.run_cmd(self, cmd, expect_ret=0) output = utils_lib.run_cmd(self, 'lscpu', expect_ret=0) product_id = utils_lib.get_product_id(self) if utils_lib.is_arch(self, 'aarch64') and not utils_lib.is_metal(self) and float(product_id) < 8.6: self.skipTest("Cancel as bug 1654962 in arm guest earlier than 8.6 2082405" ) utils_lib.run_cmd(self, r'sudo rm -rf /var/crash/*', expect_ret=0, msg='clean /var/crash firstly') utils_lib.run_cmd(self, r'sudo sync', expect_ret=0) self.log.info("Before system crash") utils_lib.run_cmd(self, r'find /var/crash', expect_ret=0, msg='list /var/crash') utils_lib.run_cmd(self, "sudo bash -c \"echo c > /proc/sysrq-trigger\"", msg='trigger crash') utils_lib.init_connection(self, timeout=self.ssh_timeout) self.log.info("After system crash") utils_lib.run_cmd(self, r'find /var/crash', expect_ret=0, msg='list /var/crash after crash') cmd = r'sudo cat /var/crash/*/vmcore-dmesg.txt|tail -50' utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw='write_sysrq_trigger')
def test_cpu_passthrough(self): ''' case_tag: CPU case_name: test_cpu_passthrough case_file: os_tests.tests.test_nutanix_vm.test_cpu_passthrough component: CPU bugzilla_id: N/A is_customer_case: False customer_case_id: N/A testplan: N/A maintainer: [email protected] description: Verify CPU passthrough features key_steps: | 1. Enable CPU passthrough, and check CPU info on RHEL guest OS 2. Disable CPU passthrough, and check CPU info on RHEL guest OS expect_result: CPU passthrough can be enable or disable and take effect on RHEL guest OS debug_want: N/A ''' pt_is_disabled = self.vm.get_cpu_passthrough(enabled=False) if pt_is_disabled: self.vm.set_cpu_passthrough(enabled=True) utils_lib.init_connection(self) self.assertTrue(self.vm.get_cpu_passthrough(enabled=True), "Test failed as setup CPU passthrough failed") else: self.fail( "Expecte CPU passthrough set as disabled by default, need more investigation here" ) cmd = "grep -i vmx /proc/cpuinfo" utils_lib.run_cmd( self, cmd, expect_ret=0, expect_kw="vmx", msg="Verify if cpu vmx has take effected on RHEL guest OS") self.log.info("Recover VM cpu passthrough") self.vm.set_cpu_passthrough(enabled=False) self.assertTrue(self.vm.get_cpu_passthrough(enabled=False), "Test failed as recover CPU passthrough failed") utils_lib.init_connection(self) utils_lib.run_cmd( self, cmd, expect_not_ret=0, expect_not_kw="vmx", msg="Verify if cpu vmx has disabled on RHEL guest OS")
def _update_kernel_args(self, boot_param_required): cmd = 'sudo grubby --update-kernel=ALL --args="{}"'.format(boot_param_required) utils_lib.run_cmd(self, cmd, msg="append {} to boot params".format(boot_param_required)) utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=self.ssh_timeout) cat_proc_cmdline = utils_lib.run_cmd(self, 'cat /proc/cmdline') return cat_proc_cmdline
def _start_vm_and_check(self): self.vm.start(wait=True) time.sleep(30) utils_lib.init_connection(self, timeout=self.ssh_timeout) output = utils_lib.run_cmd(self, 'whoami').strip() self.assertEqual(self.vm.vm_username, output, "Start VM error: output of cmd `who` unexpected -> %s" % output)
def test_kdump_unknown_nmi_panic_disabled(self): ''' description: Test Diagnostic Interrupt doesn't trigger the kdump when unknown_nmi_panic is disabled with RHEL on AWS. https://aws.amazon.com/blogs/aws/new-trigger-a-kernel-panic-to-diagnose-unresponsive-ec2-instances/ testplan: N/A bugzilla_id: n/a is_customer_case: False maintainer: xiliang case_priority: 0 case_component: Kdump key_steps: 1. Launch an instance on AWS EC2. 2. Check the kdump status by command "systemctl status kdump.service". 3. Disable kernel to trigger a kernel panic upon receiving the interrupt by set /etc/sysctl.conf and add a line : kernel.unknown_nmi_panic=0 and reboot. Or by command "sudo sysctl kernel.unknown_nmi_panic=0". 4. Send Diagnostic Interrupt to the instance. pass_criteria: Unknown NMI received and kernel panic isn't triggered, system is still running with no error message. ''' utils_lib.run_cmd(self, 'lscpu', cancel_not_kw='aarch64', msg='Not support in arm instance') utils_lib.run_cmd(self, r'sudo rm -rf /var/crash/*', expect_ret=0, msg='clean /var/crash firstly') utils_lib.run_cmd(self, r'sudo sysctl kernel.unknown_nmi_panic=0', expect_ret=0, msg='disable unknown_nmi_panic') utils_lib.run_cmd(self, r'sudo sysctl -a|grep -i nmi', expect_ret=0, expect_kw='kernel.unknown_nmi_panic = 0') try: is_success = self.vm.send_nmi() except UnSupportedAction as err: self.skipTest("current {} not support nmi operation".format( self.vm.provider)) if not is_success: self.fail("Cannot trigger panic via nmi!") time.sleep(10) utils_lib.init_connection(self, timeout=self.ssh_timeout) utils_lib.run_cmd(self, r'sudo cat /var/crash/*/vmcore-dmesg.txt', expect_not_ret=0, msg='list /var/crash after crash') cmd = r'sudo dmesg|tail -10' utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw='NMI received')
def test_add_vcpu(self): ''' case_tag: CPU case_name: test_add_vcpu case_file: os_tests.tests.test_nutanix_vm.test_add_vcpu component: CPU bugzilla_id: N/A is_customer_case: False customer_case_id: N/A testplan: N/A maintainer: [email protected] description: Add vCPU to a powered off VM key_steps: | 1. Add vCPU to a powered off VM 2. Verify CPU numbers on RHEL guest OS expect_result: CPU numbers is consistent between Nutanix AHV and RHEL guest OS debug_want: N/A ''' if self.vm.is_started(): self.vm.stop(wait=True) vcpu_num_current = self.vm.get_vcpu_num() vcpu_num_target = vcpu_num_current * 2 self.log.info("Add VM vCPUs") self.vm.update_vcpu_num(vcpu_num_target) self.assertEqual(self.vm.get_vcpu_num(), vcpu_num_target, "Test failed as add VM vCPUs failed") utils_lib.init_connection(self) self._verify_cpu_cores() self.log.info("Recover VM vCPUs") self.vm.update_vcpu_num(vcpu_num_current) self.assertEqual(self.vm.get_vcpu_num(), vcpu_num_current, "Test failed as recover VM vCPUs failed") utils_lib.init_connection(self) self._verify_cpu_cores()
def test_memory_vnuma(self): ''' case_tag: Memory case_name: test_memory_vnuma case_file: os_tests.tests.test_nutanix_vm.test_memory_vnuma component: Memory bugzilla_id: N/A is_customer_case: False customer_case_id: N/A testplan: N/A maintainer: [email protected] description: Verify memory vnuma features key_steps: | 1. Setup memory vnuma nodes number, and verify on RHEL guest OS expect_result: vnuma numbers is consistent between Nutanix AHV and RHEL guest OS debug_want: ''' vnuma_num_current = self.vm.get_memory_vnuma() vnuma_num_target = self.vm.host_cpu_num() if vnuma_num_target < 2: self.skipTest("Skip as AHV host only has 1 physical CPU") if vnuma_num_target != vnuma_num_current: self.vm.set_memory_vnuma(vnuma_num_target) utils_lib.init_connection(self) self._verify_memory_vnuma() self.assertEqual(self.vm.get_memory_vnuma(), vnuma_num_target, "Test failed as setup VM vnuma failed") if vnuma_num_target != vnuma_num_current: self.log.info("Recover VM memory vnuma") self.vm.set_memory_vnuma(vnuma_num_current) utils_lib.init_connection(self) self._verify_memory_vnuma() self.assertEqual(self.vm.get_memory_vnuma(), vnuma_num_current, "Test failed as recover VM vnuma failed")
def test_reboot_resolve_content(self): """ case_tag: cloudinit case_name: test_reboot_resolve_content case_file: https://github.com/liangxiao1/os-tests/blob/master/os_tests/tests/test_lifecycle.py component: NetworkManager bugzilla_id: 1748015 is_customer_case: True testplan: N/A maintainer: [email protected] description: Check /etc/resolv.conf content is regenerated and consistent before and after reboot key_steps: # sudo cp -f /etc/resolv.conf /etc/resolv.conf.orig # sudo truncate -s0 /etc/resolv.conf (skip in openstack platform) # sudo reboot # sudo diff -u /etc/resolv.conf /etc/resolv.conf.orig expect_result: diff returen 0 debug_want: # rpm -q NetworkManager """ utils_lib.run_cmd(self, r'sudo cat /etc/resolv.conf', expect_ret=0, expect_kw='nameserver', msg='check resolv.conf content') utils_lib.run_cmd(self, r'sudo cp -f /etc/resolv.conf /etc/resolv.conf.orig', expect_ret=0, msg='backup /etc/resolv.conf') if not utils_lib.is_openstack(self): utils_lib.run_cmd(self, r'sudo truncate -s0 /etc/resolv.conf', expect_ret=0, msg='cleanup /etc/resolv.conf') utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=self.ssh_timeout) utils_lib.run_cmd(self, r'sudo cat /etc/resolv.conf', expect_ret=0, expect_kw='nameserver', msg='check content after reboot') utils_lib.run_cmd(self, r'sudo diff -u /etc/resolv.conf /etc/resolv.conf.orig', expect_ret=0, msg='check if content identical after reboot')
def test_kdump_no_specify_cpu(self): ''' bz: 1654962 polarion_id: RHEL7-58669 ''' cmd = 'systemctl is-active kdump' utils_lib.run_cmd(self, cmd, expect_ret=0, msg='check kdump service') output = utils_lib.run_cmd(self, 'lscpu', expect_ret=0) self.is_metal = utils_lib.is_metal(self) if utils_lib.is_arch(self, 'aarch64') and not utils_lib.is_metal(self): self.cancel("Cancel as bug 1654962 in arm guest which \ no plan to fix it in the near future!") utils_lib.run_cmd(self, r'sudo rm -rf /var/crash/*', expect_ret=0, msg='clean /var/crash firstly') utils_lib.run_cmd(self, r'sudo sync', expect_ret=0) self.log.info("Before system crash") utils_lib.run_cmd(self, r'find /var/crash', expect_ret=0, msg='list /var/crash') utils_lib.run_cmd(self, 'sudo echo c > /proc/sysrq-trigger', msg='trigger crash') if self.is_metal: self.log.info("Wait 180s") time.sleep(180) else: self.log.info("Wait 30s") time.sleep(30) utils_lib.init_connection(self) self.log.info("After system crash") utils_lib.run_cmd(self, r'find /var/crash', expect_ret=0, msg='list /var/crash after crash') cmd = r'sudo cat /var/crash/*/vmcore-dmesg.txt|tail -50' utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw='write_sysrq_trigger')
def test_kdump_fastboot_kexec_e(self): ''' description: Test loading kernel via kexec with RHEL on AWS. testplan: N/A bugzilla_id: 1758323, 1841578 is_customer_case: True maintainer: xiliang case_priority: 0 case_component: Kdump key_steps: 1. Launch an instance with multi kernels installed. 2. Load each kernel with command "sudo kexec -l /boot/vmlinuz-$version --initrd=/boot/initramfs-$version.img --reuse-cmdline" 3. When the kernel is loaded, run command "sudo kexec -e". pass_criteria: Kernel can be loaded via kexec, and system will reboot into the loaded kernel via kexec -e without calling shutdown(8). ''' utils_lib.run_cmd(self,'uname -r', cancel_not_kw='el7,el6', msg='Not full support earlier than el8, skip!') cmd = 'sudo rpm -qa|grep -e "kernel-[0-9]"' output = utils_lib.run_cmd(self, cmd, msg='Get kernel version') kernels_list = output.split('\n') for kernel in kernels_list: if kernel is None or kernel == '' or len(kernel) < 6: continue self.log.info('try to swith {}'.format(kernel)) kernel_vmlinuz = "/boot/" + kernel.replace('kernel','vmlinuz') kernel_initramfs = "/boot/" + kernel.replace('kernel','initramfs') + ".img" if self.vm.provider == 'nutanix' and self.vm.prism.if_secure_boot: cmd = "sudo kexec -l %s --initrd=%s --reuse-cmdline -s" % (kernel_vmlinuz, kernel_initramfs) #kexec systems using UEFI + SecureBoot using the kexec option "-s" else: cmd = "sudo kexec -l %s --initrd=%s --reuse-cmdline" % (kernel_vmlinuz, kernel_initramfs) utils_lib.run_cmd(self, cmd, msg='Switch kernel', expect_ret=0) cmd = "sudo kexec -e" utils_lib.run_cmd(self, cmd, msg='fast reboot system') time.sleep(10) utils_lib.init_connection(self, timeout=self.ssh_timeout) utils_lib.run_cmd(self, 'uname -r', msg='check kernel', expect_ret=0, expect_kw=kernel[7:])
def test_boot_mitigations(self): ''' polarion_id: bz: 1896786 ''' cmd = 'sudo grubby --update-kernel=ALL --args="mitigations=auto,nosmt"' utils_lib.run_cmd(self, cmd, msg='Append mitigations=auto,nosmt to command line!', timeout=600) utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=800) utils_lib.run_cmd(self, 'cat /proc/cmdline', expect_kw='mitigations=auto,nosmt') utils_lib.check_log(self, "error,warn,fail,trace,Trace", rmt_redirect_stdout=True)
def test_reboot_vm(self): """ case_tag: Lifecycle case_name: test_reboot_vm case_file: os_tests.tests.test_lifecycle.TestLifeCycle.test_reboot_vm component: lifecycle bugzilla_id: N/A is_customer_case: False testplan: N/A maintainer: [email protected] description: Check time in last reboot before and after VM reboot. key_steps: 1. Check time in last reboot before and after VM reboot. expect_result: 1. Check time is different in last reboot before and after VM reboot. debug_want: N/A """ before = utils_lib.run_cmd(self, 'last reboot --time-format full') if not self.vm: self.skipTest('no vm provider found') self.vm.reboot(wait=True) time.sleep(30) utils_lib.init_connection(self, timeout=self.ssh_timeout) output = utils_lib.run_cmd(self, 'whoami').strip() self.assertEqual( self.vm.vm_username, output, "Reboot VM error: output of cmd `who` unexpected -> %s" % output) after = utils_lib.run_cmd(self, 'last reboot --time-format full') self.assertNotEqual( before, after, "Reboot VM error: before -> %s; after -> %s" % (before, after)) time.sleep(30) utils_lib.init_connection(self, timeout=self.ssh_timeout)
def _reboot_os_cycles(self, reboot_cycles, time_wait=10): for cycle in range(1, reboot_cycles + 1): self.log.info("Reboot cycle: %d" % cycle) res_before = utils_lib.run_cmd( self, "last reboot", expect_ret=0, msg="Check reboot log before reboot") utils_lib.run_cmd(self, "sudo reboot", msg="Reboot OS") time.sleep(time_wait) utils_lib.init_connection(self) res_after = utils_lib.run_cmd(self, "last reboot", expect_ret=0, msg="Check reboot log after reboot") self.assertNotEqual( res_after, res_before, "Test failed as VM is still alive after reboot")
def test_check_reboot_time(self): """ case_tag: GeneralVerification case_name: test_check_reboot_time case_file: os_tests.tests.test_nutanix_vm.test_check_reboot_time component: GeneralVerification bugzilla_id: N/A is_customer_case: False customer_case_id: N/A testplan: N/A maintainer: [email protected] description: Verify VM reboot time key_steps: | 1. Reboot VM gracefully 1. Get system boot time via "systemd-analyze" 2. Compare with acpi_reboot in nutanix.yaml expect_result: Boot time less than acpi_reboot in nutanix.yaml debug_want: systemd-analyze debug_want: N/A """ self.vm.reboot(wait=True) utils_lib.init_connection(self) reboot_time = self.vm.params['BootTime']['acpi_reboot'] boot_time_sec = utils_lib.getboottime(self) utils_lib.compare_nums(self, num1=boot_time_sec, num2=reboot_time, ratio=0, msg="Compare with cfg specified reboot_time")
def test_boot_mitigations(self): ''' bz: 1896786 polarion_id: ''' utils_lib.run_cmd(self, r'sudo rm -rf /var/crash/*', expect_ret=0, msg='clean /var/crash firstly') cmd = 'cat /proc/cpuinfo |grep processor|wc -l' cpucount = utils_lib.run_cmd(self, cmd, msg='get cpu count') if int(cpucount) > 36: self.skipTest("skip when cpu count over 36 when nosmt passing") cmd = 'sudo grubby --update-kernel=ALL --args="mitigations=auto,nosmt"' utils_lib.run_cmd(self, cmd, msg='Append mitigations=auto,nosmt to command line!', timeout=600) utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=self.ssh_timeout) utils_lib.run_cmd(self, 'cat /proc/cmdline', expect_kw='mitigations=auto,nosmt') utils_lib.check_log(self, "error,warn,fail,CallTrace", skip_words='ftrace,Failed to write ATTR', rmt_redirect_stdout=True)
def test_add_memory(self): ''' case_tag: Memory case_name: test_add_memory case_file: os_tests.tests.test_nutanix_vm.test_add_memory component: Memory bugzilla_id: N/A is_customer_case: False customer_case_id: N/A testplan: N/A maintainer: [email protected] description: Add memory to a powered off VM key_steps: | 1. Add memory to a powered off VM 2. Verify memory size on RHEL guest OS expect_result: Memory size is consistent between Nutanix AHV and RHEL guest OS ''' if self.vm.is_started(): self.vm.stop(wait=True) mem_gb_current = self.vm.get_memory_size() mem_gb_target = mem_gb_current * 2 self.log.info("Add VM memory") self.vm.update_memory_size(mem_gb_target) self.assertEqual(self.vm.get_memory_size(), mem_gb_target, "Test failed as add VM memory failed") utils_lib.init_connection(self) self._verify_memory_size() self._recover_memory(mem_gb_current)
def test_boot_fipsenabled(self): ''' polarion_id: bz: 1787270 ''' #self.skipTest("skip it for now because paramiko know issue when enabled fips https://github.com/paramiko/paramiko/pull/1643") self.log.info("Check system can boot with fips=1") output = utils_lib.run_cmd(self, 'uname -r', expect_ret=0) if 'el7' in output: utils_lib.run_cmd(self, 'sudo dracut -v -f', msg='regenerate the initramfs!', timeout=600) cmd = 'sudo grubby --update-kernel=ALL --args="fips=1"' utils_lib.run_cmd(self, cmd, msg='Enable fips!', timeout=600) utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=800) utils_lib.run_cmd(self, 'cat /proc/cmdline', expect_kw='fips=1') utils_lib.run_cmd(self, 'dmesg', msg='save dmesg') cmd = 'sudo grubby --update-kernel=ALL --remove-args="fips=1"' utils_lib.run_cmd(self, cmd, msg='Disable fips!') else: cmd = 'sudo fips-mode-setup --enable' utils_lib.run_cmd(self, cmd, msg='Enable fips!', timeout=600) utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=800) utils_lib.run_cmd(self, 'sudo fips-mode-setup --check', expect_kw='enabled') utils_lib.run_cmd(self, 'cat /proc/cmdline', expect_kw='fips=1') utils_lib.run_cmd(self, 'dmesg', msg='save dmesg') cmd = 'sudo fips-mode-setup --disable' utils_lib.run_cmd(self, cmd, msg='Disable fips!')
def test_hibernate_resume(self): """ case_tag: lifecycle case_name: test_hibernate_resume case_file: https://github.com/virt-s1/os-tests/blob/master/os_tests/tests/test_vm_operation.py component: kernel bugzilla_id: 1898677 is_customer_case: True testplan: N/A maintainer: [email protected] description: Test system hibernation and process is still running after resumed key_steps: | 1. enable hibernation on system 2. start a test process, eg. sleep 1800 3. hibernate system 4. start system 5. the test process still running expect_result: test process resume successfully debug_want: dmesg or console output """ if not self.vm: self.skipTest('vm not init') utils_lib.run_cmd(self, 'lscpu', expect_ret=0, cancel_not_kw="Xen", msg="Not support in xen instance") utils_lib.is_cmd_exist(self, "acpid") if self.vm.provider == 'aws': product_id = utils_lib.get_os_release_info(self, field='VERSION_ID') if float(product_id) >= 8.0 and float(product_id) < 9.0: pkg_url = 'https://dl.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/e/ec2-hibinit-agent-1.0.4-1.el8.noarch.rpm' elif float(product_id) < 8.0: self.skipTest('not supported earlier than rhel8') else: pkg_url = "https://dl.fedoraproject.org/pub/fedora/linux/releases/34/Everything/x86_64/os/Packages/e/ec2-hibinit-agent-1.0.3-5.fc34.noarch.rpm" utils_lib.pkg_install(self, pkg_name='ec2-hibinit-agent', pkg_url=pkg_url, force=True) cmd = 'sudo systemctl is-enabled hibinit-agent.service' output = utils_lib.run_cmd(self, cmd) if 'enabled' not in output: cmd = 'sudo systemctl enable --now hibinit-agent.service' utils_lib.run_cmd(self, cmd) utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') utils_lib.init_connection(self, timeout=self.ssh_timeout) timeout = 180 interval = 5 time_start = int(time.time()) while True: cmd = 'sudo systemctl is-active hibinit-agent.service' out = utils_lib.run_cmd(self, cmd) if 'inactive' in out: break time_end = int(time.time()) if time_end - time_start > timeout: self.log.info('timeout ended: {}'.format(timeout)) break self.log.info('retry after {}s'.format(interval)) time.sleep(interval) cmd = 'sudo systemctl status hibinit-agent.service' utils_lib.run_cmd(self, cmd) else: cmd = 'cat /proc/swaps' output = utils_lib.run_cmd(self, cmd, msg='check whether system has swap on') if '-2' not in output: self.log.info("No swap found, creating new one") cmd = """ sudo dd if=/dev/zero of=/swap bs=1024 count=2000000; sudo chmod 0600 /swap; sudo mkswap /swap; sudo swapon /swap; offset=$(filefrag -v /swap| awk '{if($1==\"0:\"){print $4}}'); uuid=$(findmnt -no UUID -T /swap); sudo grubby --update-kernel=ALL --args=\"resume_offset=${offset//.} resume=UUID=$uuid\"; sudo echo '/swap swap swap defaults 0 0' >> /etc/fstab """ utils_lib.run_cmd(self, cmd, timeout=240) cmd = "sleep 360 > /dev/null 2>&1 &" utils_lib.run_cmd(self, cmd) vm_hibernate_success = False try: if not self.vm.send_hibernation(): self.skipTest('send hibernate not succeed') vm_hibernate_success = True except NotImplementedError: self.log.info( 'send_hibernation func is not implemented in {}'.format( self.vm.provider)) except UnSupportedAction: self.log.info( 'send_hibernation func is not supported in {}'.format( self.vm.provider)) if not vm_hibernate_success: cmd = "sudo systemctl hibernate" utils_lib.run_cmd(self, cmd, msg="Try to hibernate inside system!") time.sleep(20) self.vm.start() time.sleep(10) self.params['remote_node'] = self.vm.floating_ip utils_lib.init_connection(self, timeout=self.ssh_timeout) utils_lib.run_cmd(self, 'dmesg', expect_kw="Restarting tasks", expect_not_kw="Call", msg="check the system is resumed") cmd = 'pgrep -a sleep' utils_lib.run_cmd(self, cmd, expect_ret=0, msg='check sleep process still exists')
def test_cloud_init_lineoverwrite(self): ''' case_tag: cloudinit description: This is a specific case of openstack, because the cloud guest images need to have "NOZEROCONF=yes" in /etc/sysconfig/network so that it works well as an openstack guest. (Bug 983611 - Cloud guest images needs to have "NOZEROCONF=yes" in /etc/sysconfig/network) cloud-init removed user configuration in /etc/sysconfig/network and rewrite the default configuration in every prevision before cloud-init-18.2-4.el7, after this version, certain lines in network configuration isn't removed after re-provision. linked case RHEL-152730 testplan: N/A bugzilla_id: 1653131 is_customer_case: True maintainer: xiliang case_priority: 0 case_component: cloud-init key_steps: 1. Launch an instance on AWS EC2. 2. Add "NOZEROCONF=yes" to top of network config /etc/sysconfig/network. 3. Add "NETWORKING_IPV6=no" to top of network config /etc/sysconfig/network. 4. Clean cloud-init with command: "rm /run/cloud-init/ /var/lib/cloud/* -rf" and reboot instance. 5. Check the new network configuration /etc/sysconfig/network after boot. pass_criteria: "NETWORKING_IPV6=no" and "NOZEROCONF=yes" should be in the network configuration. ''' utils_lib.run_cmd(self, 'uname -r', msg='Get instance kernel version') out = utils_lib.run_cmd(self, 'rpm -q cloud-init', expect_ret=0) cloudinit_ver = re.findall('\d+.\d', out)[0] if float(cloudinit_ver) >= 22.1: self.skipTest( 'not supported from cloudinit 22.1, render profile changed to networkmanager' ) cmd = 'ifconfig eth0' utils_lib.run_cmd(self, cmd, msg="Previous ifconfig status") cmd = 'cat /etc/sysconfig/network' output = utils_lib.run_cmd(self, cmd, msg="Previous network configuration.") if "NOZEROCONF=yes" not in output: cmd = r"sudo sed -i '1s/^/NOZEROCONF=yes\n/' \ /etc/sysconfig/network" utils_lib.run_cmd( self, cmd, msg='add NOZEROCONF=yes to top of network config') if "NETWORKING_IPV6=no" not in output: cmd = r"sudo sed -i '1s/^/NETWORKING_IPV6=no\n/' \ /etc/sysconfig/network" utils_lib.run_cmd( self, cmd, msg='add NETWORKING_IPV6=no top of network config') cmd = 'cat /etc/sysconfig/network' output = utils_lib.run_cmd(self, cmd, msg="Updated network configuration.") cmd = 'sudo rm /run/cloud-init/ /var/lib/cloud/* -rf' utils_lib.run_cmd(self, cmd, msg='clean cloud-init and redo it') self.vm.reboot() time.sleep(20) utils_lib.init_connection(self, timeout=self.ssh_timeout) cmd = 'cat /etc/sysconfig/network' output = utils_lib.run_cmd(self, cmd, msg="New network configuration.") if "NETWORKING_IPV6=no" in output: self.fail("NETWORKING_IPV6=no is not expected") if "NOZEROCONF=yes" not in output: self.fail("NOZEROCONF=yes is expected")
def test_reboot_vm_debugkernel(self): """ case_tag: GeneralVerification case_name: test_reboot_vm_debugkernel case_file: os_tests.tests.test_nutanix_vm.test_reboot_vm_debugkernel component: GeneralVerification bugzilla_id: N/A is_customer_case: False customer_case_id: N/A testplan: N/A maintainer: [email protected] description: Reboot VM for 10 times with debug kernel and check VM status key_steps: | 1. Boot VM with debug kernel 2. Reboot VM for more than 100 times 2. Check VM status 3. Check OS status expect_result: 1. VM working normally after reboot 2. No unexpected error debug_want: N/A debug_want: N/A """ if_secure_boot = self.vm.params['VM']['if_secure_boot'] if if_secure_boot: self.skipTest('''Red Hat Insights error \ "sed: can't read /sys/kernel/debug/sched_features: Operation not permitted" When using secure boot''' ) mem_gb_current = self.vm.get_memory_size() if mem_gb_current < 2: self.skipTest( "Skip test as minimal 2G memory is required for debug kernel") default_kernel = utils_lib.run_cmd(self, "sudo grubby --default-kernel", expect_ret=0) kernel_version = utils_lib.run_cmd(self, "uname -r", expect_ret=0) if "debug" in kernel_version: self.log.info("Already in debug kernel") else: debug_kernel = "/boot/vmlinuz-" + kernel_version.strip( '\n') + "+debug" debug_kernel_pkg = "kernel-debug-" + kernel_version utils_lib.is_pkg_installed(self, pkg_name=debug_kernel_pkg, timeout=300) utils_lib.run_cmd(self, "sudo grubby --info=%s" % debug_kernel, expect_ret=0, msg="check kernel-debug installed") cmd = "sudo grubby --info=%s|grep index|cut -d'=' -f2" % debug_kernel debug_kernel_index = utils_lib.run_cmd( self, cmd, expect_ret=0, cancel_ret='0', msg="check kernel-debug index") cmd = "sudo grubby --set-default-index=%s" % debug_kernel_index utils_lib.run_cmd(self, cmd, expect_ret=0, msg="change default boot index") utils_lib.run_cmd(self, "sudo reboot", msg='Reboot OS to boot to debug kernel') time.sleep(60) utils_lib.init_connection(self) utils_lib.run_cmd(self, "uname -r", expect_ret=0, expect_kw="debug", msg="checking debug kernel booted") cmd = "sudo systemd-analyze" time_start = int(time.time()) while True: output = utils_lib.run_cmd(self, cmd) if 'Bootup is not yet finished' not in output: break time_end = int(time.time()) utils_lib.run_cmd(self, 'sudo systemctl list-jobs') if time_end - time_start > 120: self.fail("Bootup is not yet finished after 120s") self.log.info("Wait for bootup finish......") time.sleep(1) reboot_cycles = 10 self._reboot_os_cycles(reboot_cycles, time_wait=30) cmd = "sudo grubby --set-default %s" % default_kernel utils_lib.run_cmd(self, cmd, expect_ret=0, msg="Recover kernel to origin: %s" % default_kernel) utils_lib.run_cmd(self, "sudo reboot", msg='Reboot OS to boot to default kernel') time.sleep(30) utils_lib.init_connection(self) utils_lib.run_cmd(self, "uname -r", expect_ret=0, expect_not_kw="debug", msg="Verifying default kernel has recovered")
def test_boot_debugkernel(self): ''' polarion_id: bz: 1703366 ''' if self.vm.provider == 'nutanix' and self.vm.prism.if_secure_boot: self.skipTest('''Red Hat Insights error "sed: can't read /sys/kernel/debug/sched_features: Operation not permitted" When using secure boot''') self.old_grub_index = utils_lib.run_cmd(self, 'sudo grubby --default-index', expect_ret=0) self.log.info("Check kernel-debug can boot up!") mini_mem = utils_lib.get_memsize(self) if int(mini_mem) < 2: self.skipTest('minimal 2G memory required for debug kernel') if utils_lib.is_arch(self, 'aarch64') and int(mini_mem) < 4: self.skipTest('minimal 4G memory required in aarch64') need_reboot = False kernel_ver = utils_lib.run_cmd(self, 'uname -r', expect_ret=0) if 'debug' in kernel_ver: self.log.info('already in debug kernel') else: need_reboot = True if 'el7' in kernel_ver: debug_kernel = "/boot/vmlinuz-" + kernel_ver.strip('\n') + ".debug" else: debug_kernel = "/boot/vmlinuz-" + kernel_ver.strip('\n') + "+debug" kernel_pkg = 'kernel-debug-' + kernel_ver utils_lib.is_pkg_installed(self, pkg_name=kernel_pkg, timeout=600) #Increase this timeout time for once failure against Nutanix VM utils_lib.run_cmd(self, "sudo grubby --info=%s" % debug_kernel, expect_ret=0, msg="check kernel-debug installed") cmd = "sudo grubby --info=%s|grep index|cut -d'=' -f2" % debug_kernel debug_kernel_index = utils_lib.run_cmd(self, cmd, expect_ret=0, cancel_ret='0', msg="check kernel-debug index") cmd = "sudo grubby --set-default-index=%s" % debug_kernel_index utils_lib.run_cmd(self, cmd, expect_ret=0, msg="change default boot index") cmd = 'cat /proc/cmdline' cmd_options = utils_lib.run_cmd(self, cmd) if 'kmemleak=on' not in cmd_options: need_reboot = True if need_reboot: cmd = 'sudo grubby --update-kernel=ALL --args="kmemleak=on"' utils_lib.run_cmd(self, cmd, expect_ret=0, msg="enable kmemleak") utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=self.ssh_timeout) utils_lib.run_cmd(self, 'uname -r', expect_ret=0, expect_kw='debug', msg="checking debug kernel booted") utils_lib.run_cmd(self, 'dmesg', expect_ret=0, msg="saving dmesg output") cmd = 'journalctl > /tmp/journalctl.log' utils_lib.run_cmd(self, cmd, expect_ret=0, msg="saving journalctl output") utils_lib.run_cmd(self, 'cat /tmp/journalctl.log', expect_ret=0) utils_lib.run_cmd(self, "sudo systemd-analyze blame > /tmp/blame.log") utils_lib.run_cmd(self, "cat /tmp/blame.log") cmd = "sudo systemd-analyze " time_start = int(time.time()) while True: output = utils_lib.run_cmd(self, cmd) if 'Bootup is not yet finished' not in output: break time_end = int(time.time()) utils_lib.run_cmd(self, 'sudo systemctl list-jobs') if time_end - time_start > 120: self.fail("Bootup is not yet finished after 120s") self.log.info("Wait for bootup finish......") time.sleep(1) utils_lib.run_cmd(self, "dmesg", expect_not_kw="Call trace,Call Trace") if int(mini_mem) < 17: cmd = 'sudo bash -c "echo scan > /sys/kernel/debug/kmemleak"' utils_lib.run_cmd(self, cmd, expect_ret=0, timeout=1800) cmd = 'sudo cat /sys/kernel/debug/kmemleak' output = utils_lib.run_cmd(self, cmd, expect_ret=0) if len(output) > 0: self.fail('Memory leak found!')
def test_kdump_each_cpu(self): """ case_tag: kdump case_name: test_kdump_each_cpu case_file: os_tests.tests.test_lifecycle.test_kdump_each_cpu component: kdump bugzilla_id: 1396554 is_customer_case: False customer_case_id: N/A testplan: N/A maintainer: [email protected] description: Test kdump on each cpu core key_steps: | 1. Triger crash on each cpu core 2. Check if kdump is working and dump file will be generated expect_result: kdump is working and dump file will be generated debug_want: N/A debug_want: N/A """ utils_lib.run_cmd(self, 'lscpu', expect_ret=0) product_id = utils_lib.get_product_id(self) if utils_lib.is_arch(self, 'aarch64') and not utils_lib.is_metal(self) and float(product_id) < 8.6: self.skipTest("Cancel as bug 1654962 in arm guest earlier than 8.6 2082405" ) cmd = "grep processor /proc/cpuinfo | wc -l" cpu_counts = int(utils_lib.run_cmd(self, cmd, expect_ret=0, msg = "Get cpu counts")) for core_num in range(cpu_counts): self.log.info("Trigger kdump on core %d" % core_num) cmd = "systemctl is-active kdump || sudo systemctl start kdump" utils_lib.run_cmd(self, cmd, expect_ret=0, msg="check kdump service status") utils_lib.run_cmd(self, "sudo rm -rf /var/crash/*", expect_ret=0, msg="clean /var/crash") utils_lib.run_cmd(self, "sudo sync", expect_ret=0) self.log.info("Before system crash") res_before = utils_lib.run_cmd(self, "find /var/crash", expect_ret=0, msg="list /var/crash before crash") cmd = "sudo bash -c 'taskset -c %d echo c > /proc/sysrq-trigger'" % core_num utils_lib.run_cmd(self, cmd, msg='trigger crash') time.sleep(30) self.params['remote_node'] = self.vm.floating_ip utils_lib.init_connection(self, timeout=self.ssh_timeout) self.log.info("After system crash") res_after = utils_lib.run_cmd(self, "find /var/crash", expect_ret=0, msg="list /var/crash after crash") self.assertNotEqual(res_after, res_before, "Test failed as no crash dump file found") cmd = "sudo cat /var/crash/*/vmcore-dmesg.txt|tail -50" utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw="write_sysrq_trigger", msg="Check if crash happened")
def test_kdump_unknown_nmi_panic_enabled(self): ''' description: Test Diagnostic Interrupt triggers the kdump when unknown_nmi_panic is enabled with RHEL on AWS. https://aws.amazon.com/blogs/aws/new-trigger-a-kernel-panic-to-diagnose-unresponsive-ec2-instances/ testplan: N/A bugzilla_id: n/a customer_case_id: n/a maintainer: xiliang case_priority: 0 case_component: Kdump key_steps: 1. Launch an instance on AWS EC2. 2. Check the kdump status by command "systemctl status kdump.service". 3. Disable kernel to trigger a kernel panic upon receiving the interrupt by set /etc/sysctl.conf and add a line : kernel.unknown_nmi_panic=1 and reboot. Or by command "sudo sysctl kernel.unknown_nmi_panic=1". 4. Send Diagnostic Interrupt to the instance. pass_criteria: Kernel panic is triggered, system reboot after panic, and vm core is gernerated in /var/crash after crash. ''' timeout = 120 interval = 5 time_start = int(time.time()) while True: cmd = 'sudo systemctl is-active kdump' ret = utils_lib.run_cmd(self, cmd, ret_status=True, msg='check kdump is active') if ret == 0: break time_end = int(time.time()) if time_end - time_start > timeout: self.log.info('timeout ended: {}'.format(timeout)) break self.log.info('retry after {}s'.format(interval)) time.sleep(interval) utils_lib.run_cmd(self, 'lscpu', cancel_not_kw='aarch64', msg='Not support in arm instance') utils_lib.run_cmd(self, r'sudo rm -rf /var/crash/*', expect_ret=0, msg='clean /var/crash firstly') utils_lib.run_cmd(self, r'sudo sysctl kernel.unknown_nmi_panic=1', expect_ret=0, msg='enable unknown_nmi_panic') utils_lib.run_cmd(self, r'sudo sysctl -a|grep -i nmi', expect_ret=0, expect_kw='kernel.unknown_nmi_panic = 1') try: is_success = self.vm.send_nmi() except UnSupportedAction as err: self.skipTest("current {} not support nmi operation".format( self.vm.provider)) if not is_success: self.fail("Cannot trigger panic via nmi!") time.sleep(10) utils_lib.init_connection(self, timeout=self.ssh_timeout) utils_lib.run_cmd(self, r'sudo ls /var/crash/', expect_ret=0, msg='list /var/crash after crash') cmd = r'sudo cat /var/crash/1*/vmcore-dmesg.txt|tail -50' utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw='nmi_panic')
def test_virsh_pci_reattach(self): ''' case_name: test_virsh_pci_reattach case_priority: 1 component: kernel bugzilla_id: 1700254 polarion_id: n/a maintainer: [email protected] description: Test no exception when system does pci detach and attach operation in virsh. virsh can detach host pci device and attach it to guest. key_steps: 1. #virsh nodedev-detach $pci 2. #virsh nodedev-reattach $pci expected_result: No panic/crash happen. eg. # virsh nodedev-detach pci_0000_2b_00_0 Device pci_0000_2b_00_0 detached # virsh nodedev-reattach pci_0000_2b_00_0 Device pci_0000_2b_00_0 re-attached ''' utils_lib.is_metal(self, action="cancel") cmd = "sudo yum install -y libvirt" utils_lib.run_cmd(self, cmd, msg="install libvirt pkg") cmd = "sudo systemctl restart libvirtd" utils_lib.run_cmd(self, cmd, cancel_ret='0', msg="restart libvirtd") utils_lib.is_cmd_exist(self, cmd='virsh') self.ssh_timeout = 1200 self.log.info('set ssh connection timeout to {}'.format( self.ssh_timeout)) if utils_lib.is_arch(self, arch='x86_64'): boot_param_required = 'intel_iommu=on' out = utils_lib.run_cmd(self, 'cat /proc/cmdline', msg='Check boot line') if boot_param_required not in out: cmd = 'sudo grubby --update-kernel=ALL --args="{}"'.format( boot_param_required) utils_lib.run_cmd( self, cmd, msg="append {} to boot params".format(boot_param_required)) utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=self.ssh_timeout) utils_lib.run_cmd(self, 'sudo lspci', msg="get pci list") tmp_pci = None cmd = "lspci|grep 'Non-Volatile memory'|wc -l" out = utils_lib.run_cmd(self, cmd) if int(out) > 0: cmd = 'sudo find /sys/devices -name *nvme*n1p1*' tmp_root = utils_lib.run_cmd(self, cmd, msg="get boot nvme pci") boot_pci = tmp_root.split('/')[-2] cmd = 'sudo find /sys/devices -name *nvme*|grep -v %s|\ grep -i pci|grep n1' % boot_pci ret = utils_lib.run_cmd(self, cmd, msg="get test pci", ret_status=True) if int(ret) == 0: tmp_pci = utils_lib.run_cmd(self, cmd, msg="get test pci") if len(tmp_pci) > 0: tmp_pci = tmp_pci.split('/')[-4] else: tmp_pci = None else: tmp_pci = None if tmp_pci is None: cmd = 'sudo find /sys/devices -name *ttyS0*|grep [0-9]:[0-9]' tmp_root = utils_lib.run_cmd(self, cmd, msg="try to get ttyS0 pci device") if len(tmp_root) == 0: tmp_pci = None else: serial_pci = tmp_root.split('/')[-3] tmp_pci = serial_pci if tmp_pci is None: cmd = 'sudo find /sys/devices -name *vga*|grep [0-9]:[0-9]' tmp_root = utils_lib.run_cmd(self, cmd, msg="try to get vga pci device") if len(tmp_root) == 0: tmp_pci = None else: vga_pci = tmp_root.split('/')[-2] tmp_pci = vga_pci if tmp_pci is not None: self.log.info("Get pci device: {}".format(tmp_pci)) else: self.skipTest("No pci device found to detach") tmp_pci = tmp_pci.replace('.', '_') tmp_pci = tmp_pci.replace(':', '_') pci_dev_1 = utils_lib.run_cmd( self, 'sudo virsh nodedev-list|grep %s |tail -1' % tmp_pci, msg='pick up device to detach') pci_dev_1 = re.findall('pci_.*', pci_dev_1) if len(pci_dev_1) > 0: pci_dev_1 = pci_dev_1[0] else: self.fail("no {} found in output".format(tmp_pci)) if pci_dev_1.endswith('1'): pci_dev_0 = pci_dev_1.rstrip('1') + '0' utils_lib.run_cmd(self, 'sudo virsh nodedev-detach %s' % pci_dev_0, msg='detach pci device', expect_ret=0) utils_lib.run_cmd(self, 'sudo virsh nodedev-detach %s' % pci_dev_1, msg='detach pci device', expect_ret=0) utils_lib.run_cmd(self, 'sudo virsh nodedev-reattach %s' % pci_dev_1, msg='reattach pci device', expect_ret=0) if pci_dev_1.endswith('1'): utils_lib.run_cmd(self, 'sudo virsh nodedev-reattach %s' % pci_dev_0, msg='reattach pci device', expect_ret=0) utils_lib.check_log(self, "error,warn,fail,trace,Trace", log_cmd='dmesg -T', cursor=self.dmesg_cursor)
def test_boot_debugkernel(self): ''' polarion_id: bz: 1703366 ''' self.old_grub_index = utils_lib.run_cmd(self, 'sudo grubby --default-index', expect_ret=0) self.log.info("Check kernel-debug can boot up!") mini_mem = utils_lib.get_memsize(self) if int(mini_mem) < 2: self.skipTest('minimal 2G memory required for debug kernel') if utils_lib.is_arch(self, 'aarch64') and int(mini_mem) < 4: self.skipTest('minimal 4G memory required in aarch64') kernel_ver = utils_lib.run_cmd(self, 'uname -r', expect_ret=0) if 'el7' in kernel_ver: debug_kernel = "/boot/vmlinuz-" + kernel_ver.strip('\n') + ".debug" else: debug_kernel = "/boot/vmlinuz-" + kernel_ver.strip('\n') + "+debug" utils_lib.run_cmd(self, "sudo grubby --info=%s" % debug_kernel, expect_ret=0, msg="check kernel-debug installed") cmd = "sudo grubby --info=%s|grep index|cut -d'=' -f2" % debug_kernel debug_kernel_index = utils_lib.run_cmd(self, cmd, expect_ret=0, cancel_ret='0', msg="check kernel-debug index") cmd = "sudo grubby --set-default-index=%s" % debug_kernel_index utils_lib.run_cmd(self, cmd, expect_ret=0, msg="change default boot index") cmd = 'sudo grubby --update-kernel=ALL --args="kmemleak=on"' utils_lib.run_cmd(self, cmd, expect_ret=0, msg="enable kmemleak") utils_lib.run_cmd(self, 'sudo reboot', msg='reboot system under test') time.sleep(10) utils_lib.init_connection(self, timeout=800) utils_lib.run_cmd(self, 'uname -r', expect_ret=0, expect_kw='debug', msg="checking debug kernel booted") utils_lib.run_cmd(self, 'dmesg', expect_ret=0, msg="saving dmesg output") cmd = 'journalctl > /tmp/journalctl.log' utils_lib.run_cmd(self, cmd, expect_ret=0, msg="saving journalctl output") utils_lib.run_cmd(self, 'cat /tmp/journalctl.log', expect_ret=0) utils_lib.run_cmd(self, "sudo systemd-analyze blame > /tmp/blame.log") utils_lib.run_cmd(self, "cat /tmp/blame.log") cmd = "sudo systemd-analyze " time_start = int(time.time()) while True: output = utils_lib.run_cmd(self, cmd) if 'Bootup is not yet finished' not in output: break time_end = int(time.time()) utils_lib.run_cmd(self, 'sudo systemctl list-jobs') if time_end - time_start > 120: self.fail("Bootup is not yet finished after 120s") self.log.info("Wait for bootup finish......") time.sleep(1) utils_lib.run_cmd(self, "dmesg", expect_not_kw="Call trace,Call Trace") if int(mini_mem) < 17: cmd = 'sudo echo scan > /sys/kernel/debug/kmemleak' utils_lib.run_cmd(self, cmd, expect_ret=0, timeout=1800) cmd = 'sudo cat /sys/kernel/debug/kmemleak' output = utils_lib.run_cmd(self, cmd, expect_ret=0) if len(output) > 0: self.fail('Memory leak found!')