Esempio n. 1
0
 def test_fsadm_resize(self):
     """
     case_name:
         test_fsadm_resize
     component:
         lvm2
     bugzilla_id:
         1905705
     is_customer_case:
         True
     maintainer:
         [email protected]
     description:
         test if "fsdadm resize" can run normally
     key_steps:
         1.check cmd fsadm
         2.sudo fsadm resize $(findmnt -n -o source /)
     expect_result:
         fsadm does nothing since the filesystem is already at maximum size
     """
     utils_lib.is_cmd_exist(self, 'fsadm')
     utils_lib.run_cmd(self,
                       'sudo fsadm resize $(findmnt -n -o source /)',
                       expect_ret=0,
                       expect_not_kw="unbound variable",
                       msg="fsadm should not crash")
Esempio n. 2
0
 def test_check_dmidecode_dump_segfault(self):
     '''
     case_name:
         test_check_dmidecode_dump_segfault
     case_priority:
         2
     component:
         dmidecode
     bugzilla_id:
         1885823
     customer_case_id:
         02939365
     polarion_id:
         n/a
     maintainer:
         [email protected]
     description:
         check there is no segmentation fault while run 'dmidecode --dump'
     key_steps:
         # dmidecode --dump |grep -i Segmentation 
     expected_result:
         No segmentation fault found.
     '''
     utils_lib.is_cmd_exist(self, cmd='dmidecode')
     cmd = "sudo dmidecode --dump"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       expect_not_kw='Segmentation')
Esempio n. 3
0
    def test_iostat_x(self):
        '''
        case_name:
            test_iostat_x
        case_priority:
            1
        component:
            kernel
        bugzilla_id:
            1661977
        polarion_id:
            n/a
        maintainer:
            [email protected]
        description:
            Check "iostat -x" report and make sure there is no high utils when there is no obviously read/write operations.
        key_steps:
            1. # iostat -x
        expected_result:
            No high utils reported when no obviously read/write operations.
            eg. # iostat -x
                Linux 4.18.0-236.el8.aarch64 (ip-xx-xxx-x-xxx.us-west-2.compute.internal) 	09/28/2020 	_aarch64_	(2 CPU)

                avg-cpu:  %user   %nice %system %iowait  %steal   %idle
                           7.77    0.00    1.48    0.69    0.00   90.06

                Device            r/s     w/s     rkB/s     wkB/s   rrqm/s   wrqm/s  %rrqm  %wrqm r_await w_await aqu-sz rareq-sz wareq-sz  svctm  %util
                nvme0n1         46.06    2.82   1587.81    274.62     0.00     0.23   0.00   7.52    0.50    1.32   0.00    34.47    97.31   0.86   4.19
                nvme1n1          0.15    0.00     10.43      0.00     0.00     0.00   0.00   0.00    1.00    0.00   0.00    70.40     0.00   1.50   0.02
        '''
        expect_utils = self.params.get('disk_utils')
        self.log.info("Check no disk utils lager than %s" % expect_utils)
        utils_lib.is_cmd_exist(self, cmd='iostat')
        cmd = 'sudo  iostat -x -o JSON'
        output = utils_lib.run_cmd(self, cmd)
        try:
            res_dict = json.loads(output)
            for x in res_dict["sysstat"]["hosts"][0]["statistics"][0]["disk"]:
                self.assertLessEqual(
                    x["util"],
                    expect_utils,
                    msg="Utils more than %s without any large io! act: %s" %
                    (expect_utils, x["util"]))
        except ValueError as err:
            self.log.info("cmd has no json support")
            cmd = "sudo iostat -x"
            utils_lib.run_cmd(self, cmd, expect_ret=0)
            cmd = "sudo iostat -x|awk -F' ' '{print $NF}'"
            output = utils_lib.run_cmd(self, cmd, expect_ret=0)
            compare = False
            for util in output.split('\n'):
                if 'util' in util:
                    compare = True
                    continue
                if compare and not util == '':
                    if float(util) > expect_utils:
                        self.fail("Some disk's utils %s is larger than %s" %
                                  (util, expect_utils))
Esempio n. 4
0
 def test_podman_leaks_exit(self):
     '''
     case_name:
         test_podman_leaks_exit
     case_priority:
         2
     component:
         podman
     bugzilla_id:
         1730281
     customer_case_id:
         02390622
     polarion_id:
         n/a
     maintainer:
         [email protected]
     description:
         podman leaks kernel memory due to return code stored in tmpfs
     key_steps:
         1. $ podman run --name test -d ubi
         2. $ sudo ls /run/libpod/exits/
     expected_result:
         Step2 return nothing.
     '''
     utils_lib.is_cmd_exist(self, 'podman')
     self.log.info(
         "Test podman can build an image using '--network container'")
     cmd = "podman ps -a"
     utils_lib.run_cmd(self,
                       cmd,
                       msg='try to list all containers before testing')
     cmd = "podman rm -a -f"
     utils_lib.run_cmd(self,
                       cmd,
                       msg='try to clean all containers before testing')
     cmd = "podman run --name test -d ubi"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       msg='try to start a test container')
     time.sleep(2)
     cmd = "podman ps -a"
     utils_lib.run_cmd(self,
                       cmd,
                       msg='try to list all containers after testing')
     product_name = utils_lib.get_os_release_info(self, field='NAME')
     cmd = "sudo ls /run/libpod/exits/"
     out = utils_lib.run_cmd(self,
                             cmd,
                             msg='check if saved exit code in tmpfs')
     if 'No such file or directory' in out:
         self.log.info('no leak found')
     else:
         if out != '':
             self.fail('leaks found')
         else:
             self.log.info('no leak found')
Esempio n. 5
0
 def test_cpupower_exception(self):
     '''
     No exception when run cpupower command
     polarion_id: N/A
     bz: 1626505, 1659883
     '''
     utils_lib.is_cmd_exist(self, 'cpupower')
     cmd = "sudo cpupower info"
     utils_lib.run_cmd(self, cmd, expect_ret=0, expect_not_kw='core dumped')
     cmd = "sudo cpupower idle-info"
     utils_lib.run_cmd(self, cmd, expect_ret=0, expect_not_kw='core dumped')
     cmd = "sudo cpupower frequency-info"
     utils_lib.run_cmd(self, cmd, expect_ret=0, expect_not_kw='core dumped')
Esempio n. 6
0
    def test_fork_pte(self):
        '''
        case_name:
            test_fork_pte

        case_priority:
            2

        component:
            kernel

        bugzilla_id:
            1908439

        polarion_id:
            n/a

        maintainer:
            [email protected]

        description:
            Ensure dirty bit is preserved across pte_wrprotect

        key_steps:
            1. # wget https://github.com/redis/redis/files/5717040/redis_8124.c.txt
            2. # mv redis_8124.c.txt redis_8124.c
            3. # gcc -o reproduce redis_8124.c
            4. # systemd-run --scope -p MemoryLimit=550M ./reproduce

        expected_result:
            Your kernel looks fine.
        '''
        utils_lib.run_cmd(self,
                          'uname -r',
                          cancel_not_kw='el7,el6',
                          msg='not support in el7 and el6')
        if utils_lib.get_memsize(self) < 4:
            self.skipTest('skip when mem lower than 4GiB')
        utils_lib.is_cmd_exist(self, cmd='gcc', cancel_case=True)
        utils_lib.is_cmd_exist(self, cmd='wget', cancel_case=True)
        cmd_list = [
            'wget https://github.com/redis/redis/files/5717040/redis_8124.c.txt',
            'mv redis_8124.c.txt redis_8124.c',
            'gcc -o reproduce redis_8124.c',
            'sudo systemd-run --scope -p MemoryLimit=550M ./reproduce'
        ]
        for cmd in cmd_list:
            out = utils_lib.run_cmd(self, cmd, expect_ret=0, timeout=120)
        if 'Your kernel looks fine' not in out:
            self.fail("'Your kernel looks fine' not found in {}".format(out))
Esempio n. 7
0
 def test_podman_dev_null_permission(self):
     '''
     case_name:
         test_podman_dev_null_permission
     case_priority:
         2
     component:
         podman
     bugzilla_id:
         1952698
     customer_case_id:
         02920986
     polarion_id:
         n/a
     maintainer:
         [email protected]
     description:
         Make sure permission on /dev/null are not changing from 666 to 777 after running podman as root
     key_steps:
         1. # sudo podman run -d -p 80:80 httpd
         2. # ls -l /dev/null
     expected_result:
         /dev/null permission keeps 666
     '''
     utils_lib.is_cmd_exist(self, 'podman')
     cmd = "ls -l /dev/null"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       msg='check /dev/null permission before test')
     cmd = "sudo chmod 666 /dev/null"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       msg='change /dev/null permission to 666')
     cmd = "podman rm -a -f"
     utils_lib.run_cmd(self,
                       cmd,
                       msg='try to clean all containers before testing')
     cmd = "podman run --name test -d ubi"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       msg='try to start a test container')
     cmd = "ls -l /dev/null"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       expect_kw='crw-rw-rw-.',
                       msg='check /dev/null permission after test')
Esempio n. 8
0
    def test_podman_build_image(self):
        '''
        case_name:
            test_podman_build_image

        case_priority:
            2

        component:
            podman

        bugzilla_id:
            1903412

        polarion_id:
            n/a

        maintainer:
            [email protected]

        description:
            podman can build an image using '--network container' in rootless or root mode

        key_steps:
            1. $ cat Dockerfile
               FROM registry.access.redhat.com/ubi8/ubi
               RUN touch /tmp/test.txt
            2. # podman build --network container -t build_test .

        expected_result:
            Build successfully.
        '''
        self.log.info(
            "Test podman can build an image using '--network container'")
        utils_lib.is_cmd_exist(self, 'podman')
        dockerfile = '''
FROM registry.access.redhat.com/ubi8/ubi
RUN touch /tmp/test.txt
        '''
        cmd = "echo '{}' > Dockerfile".format(dockerfile)
        utils_lib.run_cmd(self, cmd, expect_ret=0, msg='generate Dockerfile')
        cmd = "podman build --network container -t build_test ."
        utils_lib.run_cmd(self, cmd, expect_ret=0, msg='build image')
        cmd = "podman run --rm -it build_test uname -r"
        utils_lib.run_cmd(self, cmd, expect_ret=0, msg='check kernel')
        cmd = "podman run --rm -it build_test whoami"
        utils_lib.run_cmd(self, cmd, expect_ret=0, msg='check user')
        cmd = "podman run --rm -it build_test ls -l /tmp/test.txt"
        utils_lib.run_cmd(self, cmd, expect_ret=0, msg='check test file')
Esempio n. 9
0
 def test_podman_rm_stopped(self):
     '''
     bz: 1913295
     des: podman can remove a stopped container
     '''
     self.log.info("Test podman can remove a stopped container")
     utils_lib.is_cmd_exist(self, 'podman')
     cmd = "podman ps -a"
     utils_lib.run_cmd(self,
                       cmd,
                       msg='try to list all containers before testing')
     cmd = "podman rm -a -f"
     utils_lib.run_cmd(self,
                       cmd,
                       msg='try to clean all containers before testing')
     cmd = "podman run --name myctr1 -td quay.io/libpod/alpine"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       msg='run myctr1',
                       timeout=180)
     cmd = "podman run --name myctr2 -td quay.io/libpod/alpine"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       msg='run myctr2',
                       timeout=180)
     cmd = "timeout 5 podman exec myctr1 sleep 10"
     utils_lib.run_cmd(self, cmd)
     cmd = "podman kill myctr1"
     utils_lib.run_cmd(self, cmd, expect_ret=0)
     time.sleep(1)
     cmd = "podman inspect myctr1"
     utils_lib.run_cmd(self, cmd, expect_ret=0)
     cmd = "podman rm myctr1"
     utils_lib.run_cmd(self, cmd, expect_ret=0, msg='try to remove myctr1')
     cmd = "timeout 5 podman exec myctr2 sleep 10"
     utils_lib.run_cmd(self, cmd)
     cmd = "podman stop myctr2"
     utils_lib.run_cmd(self, cmd, expect_ret=0)
     cmd = "podman inspect myctr2"
     utils_lib.run_cmd(self, cmd, expect_ret=0)
     cmd = "podman rm myctr2"
     utils_lib.run_cmd(self, cmd, expect_ret=0, msg='try to remove myctr2')
     cmd = "podman ps"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_not_kw='myctr1,myctr2',
                       msg='try to list all containers again after testing')
Esempio n. 10
0
    def test_check_tuned_adm_active(self):
        '''
        case_name:
            test_check_tuned_adm_active

        case_priority:
            1

        component:
            kernel

        bugzilla_id:
            1893063

        polarion_id:
            n/a

        maintainer:
            [email protected]

        description:
            Check tuned-adm loads default "virtual-guest" in vm and does not load virtual-guest in metal instance

        key_steps:
            1. # tuned-adm active

        expected_result:
            Should not load virtual-guest in bare metal.
            Should load virtual-guest in vm by default.

        '''
        utils_lib.is_cmd_exist(self, cmd='tuned-adm', cancel_case=True)
        if 'inactive' in utils_lib.run_cmd(self,
                                           'sudo systemctl is-active tuned'):
            utils_lib.run_cmd(self,
                              'sudo systemctl enable --now tuned',
                              msg='enable tuned service')
        if utils_lib.is_metal(self):
            utils_lib.run_cmd(
                self,
                'tuned-adm active',
                expect_not_kw='virtual-guest',
                msg='Should not load virtual-guest in bare metal')
        else:
            utils_lib.run_cmd(self,
                              'tuned-adm active',
                              expect_kw='virtual-guest',
                              msg='Should load virtual-guest in vm by default')
Esempio n. 11
0
 def test_fsadm_resize(self):
     '''
     bz: 1905705
     polarion_id: N/A
     fsadm resize should not crash as below without NEW_SIZE specified
     # fsadm resize $(findmnt -n -o source /)
     /sbin/fsadm: line 818: $3: unbound variable
     expected result:
     fsadm does nothing since the filesystem is already at maximum size
     '''
     utils_lib.is_cmd_exist(self, 'fsadm')
     utils_lib.run_cmd(self,
                       'sudo fsadm resize $(findmnt -n -o source /)',
                       expect_ret=0,
                       expect_not_kw="unbound variable",
                       msg="fsadm should not crash")
Esempio n. 12
0
 def test_fio_cpuclock(self):
     '''
     bz: 1943474
     polarion_id: N/A
     Perform test and validation of internal CPU clock.
     '''
     utils_lib.run_cmd(self, 'sudo lscpu', cancel_not_kw="aarch64")
     utils_lib.is_cmd_exist(self, 'fio')
     cmd = "sudo fio --cpuclock-test"
     utils_lib.run_cmd(
         self,
         cmd,
         expect_ret=0,
         expect_kw="Pass",
         msg='Perform test and validation of internal CPU clock.',
         timeout=1200)
Esempio n. 13
0
    def test_check_lshw_mem(self):
        '''
        case_name:
            test_check_lshw_mem

        case_priority:
            1

        component:
            lshw

        bugzilla_id:
            1882157

        polarion_id:
            n/a

        maintainer:
            [email protected]

        description:
            Check "lshw -C memory -json" reported memory size is correct.

        key_steps:
            1. # lshw -C memory -json

        expected_result:
            No big gap found.
            eg. #  lshw -C memory -json|grep -i size
                    "size" : 98304,
                    "size" : 4286578688, <-- 4GiB is correct
                        "size" : 4286578688,

        '''
        utils_lib.is_cmd_exist(self, cmd='lshw')
        base_memory = utils_lib.get_memsize(self)
        cmd = 'lshw -json'
        output = utils_lib.run_cmd(self, cmd, expect_ret=0)
        out = json.loads(output)['children'][0]["children"]
        for i in out:
            if i['id'] == 'memory':
                mem_in_byte = i['size']
                break
        mem_in_gib = mem_in_byte / 1024 / 1024 / 1024
        self.log.info("lshw showed mem: {}".format(mem_in_gib))

        utils_lib.compare_nums(self, mem_in_gib, base_memory, ratio=15)
Esempio n. 14
0
 def test_subscription_manager_config(self):
     '''
     bz: 1862431
     des: "subscription-manager config" output should equal "subscription-manager config --list"
     '''
     utils_lib.is_cmd_exist(self, 'subscription-manager')
     cmd1 = "sudo subscription-manager config"
     out1 = utils_lib.run_cmd(self,
                              cmd1,
                              expect_ret=0,
                              msg='get {} output'.format(cmd1))
     cmd2 = "sudo subscription-manager config --list"
     out2 = utils_lib.run_cmd(self,
                              cmd2,
                              expect_ret=0,
                              msg='get {} output'.format(cmd2))
     if out1 != out2:
         self.fail('"{}" output not same with "{}"'.format(cmd1, cmd2))
Esempio n. 15
0
 def test_subscription_manager_config(self):
     """
     case_name:
         test_subscription_manager_config
     component:
         subscription-manager
     bugzilla_id:
         1862431
     is_customer_case:
         True
     maintainer:
         [email protected]
     description:
         check if "subscription-manager config" output equals "subscription-manager config --list" output
     key_steps:
         1.sudo subscription-manager config > /tmp/sm_config.log
         2.sudo subscription-manager config --list > /tmp/sm_config_list.log
         3.sudo diff -u /tmp/sm_config.log /tmp/sm_config_list.log
     expect_result:
         These two configs are same
     debug_want:
         sm_config and sm_config_list
     """
     '''
     bz: 1862431
     des: "subscription-manager config" output should equal "subscription-manager config --list"
     '''
     utils_lib.is_cmd_exist(self, 'subscription-manager')
     cmd1 = "sudo subscription-manager config > /tmp/sm_config.log"
     out1 = utils_lib.run_cmd(self,
                              cmd1,
                              expect_ret=0,
                              msg='get {} output'.format(cmd1))
     cmd2 = "sudo subscription-manager config --list > /tmp/sm_config_list.log"
     out2 = utils_lib.run_cmd(self,
                              cmd2,
                              expect_ret=0,
                              msg='get {} output'.format(cmd2))
     utils_lib.run_cmd(self, 'sudo cat /tmp/sm_config.log', expect_ret=0)
     cmd = "sudo diff -u /tmp/sm_config.log /tmp/sm_config_list.log"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       msg='check if both are identical')
Esempio n. 16
0
 def test_cpupower_exception(self):
     '''
     case_name:
         test_cpupower_exception
     case_file:
         https://github.com/liangxiao1/os-tests/blob/master/os_tests/tests/test_general_test.py
     case_priority:
         2
     component:
         kernel-Platform Enablement
     bugzilla_id:
         1626505, 1659883, 1999926
     customer_case_id:
         02172487
     polarion_id:
         n/a
     maintainer:
         [email protected]
     description:
         Run cpupower to query processor power related values. It may not support all fields, but crash is not expected.
     key_steps:
         1. # cpupower info
         2. # cpupower idle-info
         3. # cpupower frequency-info
     expected_result:
         No application crash when run cpupower command, all return 0.
     debug_want:
         1. # lscpu
         2. # rpm -q kernel-tools
         3. # uname -r
     '''
     utils_lib.is_cmd_exist(self, 'cpupower')
     debug_cmds = ['lscpu', 'rpm -q kernel-tools', 'uname -r']
     for cmd in debug_cmds:
         utils_lib.run_cmd(
             self,
             cmd,
             msg='please attach {} output if file bug'.format(cmd))
     cmd = "sudo cpupower info"
     utils_lib.run_cmd(self, cmd, expect_ret=0, expect_not_kw='core dumped')
     cmd = "sudo cpupower idle-info"
     utils_lib.run_cmd(self, cmd, expect_ret=0, expect_not_kw='core dumped')
     cmd = "sudo cpupower frequency-info"
     utils_lib.run_cmd(self, cmd, expect_ret=0, expect_not_kw='core dumped')
Esempio n. 17
0
    def test_check_lspci_nvme(self):
        '''
        case_name:
            test_check_lspci_nvme

        case_priority:
            1

        component:
            kernel

        bugzilla_id:
            1656862

        polarion_id:
            n/a

        maintainer:
            [email protected]

        description:
            Check all nvme pci devices are found by "lsblk"

        key_steps:
            1. # lspci|grep "Non-Volatile memory"|wc -l
            2. # lsblk -d|grep nvme|wc -l

        expected_result:
            The nums are equal.

        '''
        utils_lib.is_cmd_exist(self, cmd='lspci')
        lspci_cmd = "lspci|grep 'Non-Volatile memory'|wc -l"
        lsblk_cmd = "lsblk -d|grep nvme|wc -l"
        lspci_out = utils_lib.run_cmd(self,
                                      lspci_cmd,
                                      cancel_not_kw='0',
                                      msg="Check nvme pci device")
        lsblk_out = utils_lib.run_cmd(self,
                                      lsblk_cmd,
                                      msg="Check nvme block device")
        self.assertEqual(lspci_out,
                         lsblk_out,
                         msg="No all nvme pci device nvme driver are loaded")
Esempio n. 18
0
 def test_check_dmidecode_outofspec(self):
     '''
     bz: 1858350
     des: make sure there is no "OUT OF SPEC" in dmidecode output
     '''
     utils_lib.is_cmd_exist(self, cmd='dmidecode')
     cmd = "sudo dmidecode --dump-bin {}/debug/dmidecode_debug.bin".format(
         self.log_dir)
     utils_lib.run_cmd(
         self,
         cmd,
         msg=
         'save dmidecode_debug.bin for debug purpose, please attach it if file bug'
     )
     utils_lib.run_cmd(
         self,
         'sudo dmidecode',
         expect_ret=0,
         expect_not_kw='OUT OF SPEC',
         msg='Check there is no "OUT OF SPEC" in dmidecode output')
Esempio n. 19
0
    def _fio_test(self, test_log):
        '''
        Initial fio test and put into background for processing.
        '''
        self.log.info(
            "Initial fio test and put into background for processing")
        cmd = "[[ -d /tmp/fio_test ]] || mkdir /tmp/fio_test"
        utils_lib.run_cmd(self, cmd, expect_ret=0, msg="Create test dir")
        utils_lib.is_cmd_exist(self, cmd='fio')
        cmd = "setsid fio --group_reporting=1 --name=nutanix-fio-test \
--numjobs=4 --iodepth=4 --size=500m --bs=4k --rw=randrw -rwmixread=70 \
--ioengine=psync --time_based=1 --runtime=300 \
--directory=/tmp/fio_test --filename=test01:test02:test03:test04:test05 > %s" % test_log
        utils_lib.run_cmd(self, cmd, msg="Start fio test", timeout=2)

        cmd = "ps -ef | grep -v grep | grep fio-test"
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_kw="nutanix",
                          msg="Check if all fio test jobs have started")
Esempio n. 20
0
 def test_check_virtwhat(self):
     '''
     bz: 1782435
     polarion_id: RHEL7-103857
     test virt-what, not use systemd-detect-virt
     '''
     utils_lib.is_cmd_exist(self, cmd='virt-what')
     utils_lib.run_cmd(self,
                       "rpm -q virt-what",
                       expect_ret=0,
                       msg='get virt-what version')
     virt_what_output = utils_lib.run_cmd(self,
                                          r"sudo virt-what",
                                          expect_ret=0)
     lscpu_output = utils_lib.run_cmd(self, 'lscpu', expect_ret=0)
     if 'Xen' in lscpu_output:
         self.log.info("Found it is a xen system!")
         if 'full' in lscpu_output:
             self.assertIn('xen-hvm', virt_what_output)
         else:
             self.assertIn('xen-domU', virt_what_output)
     elif 'KVM' in lscpu_output and not utils_lib.is_metal(self):
         self.log.info("Found it is a kvm system!")
         self.assertIn('kvm', virt_what_output)
     elif 'VMware' in lscpu_output:
         self.log.info("Found it is a vmware system!")
         self.assertIn('vmware', virt_what_output)
     elif 'Microsoft' in lscpu_output:
         self.log.info("Found it is a Hyper-V system!")
         self.assertIn('hyperv', virt_what_output)
     elif utils_lib.is_metal(self) and utils_lib.is_aws(self):
         self.log.info("Found it is a bare metal system!")
         self.assertEqual('aws', virt_what_output.strip('\n'))
     elif utils_lib.is_metal(self):
         self.log.info("Found it is a bare metal system!")
     elif utils_lib.is_aws(self) and utils_lib.is_arch(self,
                                                       arch='aarch64'):
         self.assertEqual('aws', virt_what_output.strip('\n'))
     else:
         self.skipTest("Unknow hypervisor")
Esempio n. 21
0
    def _verify_memory_vnuma(self):
        '''
        Verify memory vnuma nodes number between Nutanix AHV and RHEL guest.
        '''
        self.log.info(
            "Verify memory vnuma nodes number between Nutanix AHV and RHEL guest"
        )
        vnuma_in_ahv = self.vm.get_memory_vnuma()
        if vnuma_in_ahv == 0:
            vnuma_in_ahv += 1
        utils_lib.is_cmd_exist(self, cmd='numactl')
        cmd = "numactl --hardware | grep available:"
        res = utils_lib.run_cmd(self,
                                cmd,
                                expect_ret=0,
                                msg="Check numa nodes number on RHEL guest")
        numa_in_guest = int(re.findall(r'available: (\d)', res)[0])

        self.assertEqual(
            numa_in_guest, vnuma_in_ahv,
            "Test failed, memory vnuma nodes number on RHEL guest: %s does not match with Nutanix AHV: %s"
            % (numa_in_guest, vnuma_in_ahv))
Esempio n. 22
0
 def test_fio_cpuclock(self):
     """
     case_name:
         test_fio_cpuclock
     component:
         fio
     bugzilla_id:
         1943474
     is_customer_case:
         False
     maintainer:
         [email protected]
     description:
         test if fio can run normally
     key_steps:
         1.check cpu is not aarch64
         2.sudo fio --cpuclock-test
     expect_result:
         pass the test without return
     debug_want:
         N/A
     """
     '''
     bz: 1943474
     polarion_id: N/A
     Perform test and validation of internal CPU clock.
     '''
     utils_lib.run_cmd(self, 'sudo lscpu', cancel_not_kw="aarch64")
     utils_lib.is_cmd_exist(self, 'fio')
     cmd = "sudo fio --cpuclock-test"
     utils_lib.run_cmd(
         self,
         cmd,
         expect_ret=0,
         expect_kw="Pass",
         msg='Perform test and validation of internal CPU clock.',
         timeout=1200)
Esempio n. 23
0
    def test_mtu_min_max_set(self):
        '''
        polarion_id: RHEL-111097
        bz: 1502554, 1497228
        ena mtu range: 128~9216
        ixgbevf mtu range: 68~9710
        vif mtu range: 68~65535
        vmxnet3 mtu range: 60~9000
        '''

        utils_lib.is_cmd_exist(self, cmd='ethtool')
        utils_lib.msg_to_syslog(self)
        cmd = 'ip link show {}'.format(self.nic)
        out = utils_lib.run_cmd(self,
                                cmd,
                                expect_ret=0,
                                msg='save the mtu before change')
        self.mtu_old = re.findall('mtu [0-9]+', out)[0].split(' ')[1]
        self.log.info("Get old mtu: {}".format(self.mtu_old))
        cmd = "sudo ethtool -i {}".format(self.nic)
        output = utils_lib.run_cmd(self, cmd, expect_ret=0)
        if 'ena' in output:
            self.log.info('ena found!')
            mtu_range = [0, 127, 128, 4500, 9216, 9217]
            mtu_min = 128
            mtu_max = 9216
        elif 'ixgbe' in output:
            self.log.info('ixgbevf found!')
            mtu_range = [0, 67, 68, 4500, 9710, 9711]
            mtu_min = 68
            mtu_max = 9710
        elif 'vif' in output:
            self.log.info('vif found!')
            mtu_range = [0, 67, 68, 4500, 65535, 65536]
            mtu_min = 68
            mtu_max = 65535
        elif 'vmxnet3' in output:
            self.log.info('vmxnet3 found!')
            if self.params['remote_node'] != 'None' or len(
                    self.params['remote_node']) >= 5:
                self.skipTest(
                    "Skip mtu test while running remotely with vmxnet3")
            self.log.info(
                "vmxnet3 min mtu is 60, because of bz1503193, skip test lower value than 68"
            )
            mtu_range = [68, 4500, 9000, 9001]
            mtu_min = 60
            mtu_max = 9000
        elif 'igb' in output:
            self.log.info('igb found!')
            mtu_range = [0, 67, 68, 4500, 9216, 9217]
            mtu_min = 68
            mtu_max = 9216
        elif 'tg3' in output:
            self.log.info('tg3 found!')
            mtu_range = [0, 59, 60, 4500, 9000, 9001]
            mtu_min = 60
            mtu_max = 9000
        else:
            self.log.info(
                'Did not detect network type, use default min~max mtu. %s' %
                output)
            mtu_range = [0, 67, 68, 4500, 65535, 65536]
            mtu_min = 68
            mtu_max = 65535

        self.log.info("Trying to change mtu to %s" % mtu_range)
        for mtu_size in mtu_range:
            mtu_cmd = "sudo ip link set dev %s mtu %s" % (self.nic, mtu_size)
            mtu_check = "sudo ip link show dev {}".format(self.nic)
            if mtu_size <= mtu_max and mtu_size >= mtu_min:
                utils_lib.run_cmd(self, mtu_cmd, expect_ret=0)
                utils_lib.run_cmd(self,
                                  mtu_check,
                                  expect_ret=0,
                                  expect_kw="mtu {}".format(mtu_size))
            elif mtu_size < mtu_min or mtu_size > mtu_max:
                utils_lib.run_cmd(self, mtu_cmd, expect_not_ret=0)
                utils_lib.run_cmd(self,
                                  mtu_check,
                                  expect_ret=0,
                                  expect_not_kw="mtu {}".format(mtu_size))
        cmd = "ping {} -c 2 -I {}".format(self.params.get('ping_server'),
                                          self.nic)
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        utils_lib.check_log(self,
                            "error,warn,fail,trace",
                            log_cmd='dmesg -T',
                            cursor=self.dmesg_cursor,
                            skip_words='ftrace')
Esempio n. 24
0
    def test_ethtool_S_xdp(self):
        '''
        case_name:
            test_ethtool_S_xdp

        case_priority:
            2

        component:
            kernel

        bugzilla_id:
            1908542

        polarion_id:
            n/a

        maintainer:
            [email protected]

        description:
            Use ethtool to query the specified network device xdp statistics.

        key_steps:
            1. # ethtool -S $nic |grep xdp

        expected_result:
            xdp status found
            eg. # ethtool -S eth0 |grep xdp
                  queue_0_rx_xdp_aborted: 0
                  queue_0_rx_xdp_drop: 0
                  queue_0_rx_xdp_pass: 0
                  queue_0_rx_xdp_tx: 0
                  queue_0_rx_xdp_invalid: 0
                  queue_0_rx_xdp_redirect: 0

        '''
        product_id = utils_lib.get_product_id(self)
        cmd = "sudo ethtool -i {}".format(self.nic)
        output = utils_lib.run_cmd(self, cmd, expect_ret=0)
        if 'ena' in output:
            self.log.info('ena driver found!')
            if float(product_id) > 8.4:
                cmd = "ethtool -S {}|grep xdp".format(self.nic)
                utils_lib.run_cmd(self,
                                  cmd,
                                  expect_ret=0,
                                  msg='Check if have xdp information')
            else:
                self.skipTest('ena driver does not support xdp prior to 8.4')
        else:
            cmd = "ethtool -S {}|grep xdp".format(self.nic)
            utils_lib.run_cmd(self,
                              cmd,
                              cancel_ret='0',
                              msg='Check if have xdp support')
        if float(product_id) > 8.3 and utils_lib.is_arch(self, arch='x86_64'):
            utils_lib.is_cmd_exist(self, 'xdp-loader')
            cmd = 'sudo xdp-loader status'
            utils_lib.run_cmd(self,
                              cmd,
                              expect_ret=0,
                              msg='Check xdp-loader status')
            cmd = 'sudo xdp-loader unload -a {}'.format(self.nic)
            utils_lib.run_cmd(self, cmd, msg='unload xdp-filter if have')
            cmd = 'sudo xdp-filter load --mode skb {}'.format(self.nic)
            utils_lib.run_cmd(self, cmd, expect_ret=0, msg='load xdp-filter')
            cmd = 'sudo xdp-loader status'
            utils_lib.run_cmd(self,
                              cmd,
                              expect_ret=0,
                              expect_kw='XDP_PASS',
                              msg='Check xdp-loader status again')
            cmd = 'sudo xdp-loader unload -a {}'.format(self.nic)
            utils_lib.run_cmd(self, cmd, expect_ret=0, msg='unload xdp-filter')
            cmd = 'sudo xdp-loader status'
            utils_lib.run_cmd(self,
                              cmd,
                              expect_ret=0,
                              expect_not_kw='XDP_PASS',
                              msg='Check xdp-loader status again')
Esempio n. 25
0
    def test_hibernate_resume(self):
        """
        case_tag:
            lifecycle
        case_name:
            test_hibernate_resume
        case_file:
            https://github.com/virt-s1/os-tests/blob/master/os_tests/tests/test_vm_operation.py
        component:
            kernel
        bugzilla_id:
            1898677
        is_customer_case:
            True
        testplan:
            N/A
        maintainer:
            [email protected]
        description:
            Test system hibernation and process is still running after resumed
        key_steps: |
            1. enable hibernation on system
            2. start a test process, eg. sleep 1800
            3. hibernate system
            4. start system
            5. the test process still running
        expect_result:
            test process resume successfully
        debug_want:
            dmesg or console output
        """
        if not self.vm:
            self.skipTest('vm not init')
        utils_lib.run_cmd(self,
                          'lscpu',
                          expect_ret=0,
                          cancel_not_kw="Xen",
                          msg="Not support in xen instance")
        utils_lib.is_cmd_exist(self, "acpid")
        if self.vm.provider == 'aws':
            product_id = utils_lib.get_os_release_info(self,
                                                       field='VERSION_ID')
            if float(product_id) >= 8.0 and float(product_id) < 9.0:
                pkg_url = 'https://dl.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/e/ec2-hibinit-agent-1.0.4-1.el8.noarch.rpm'
            elif float(product_id) < 8.0:
                self.skipTest('not supported earlier than rhel8')
            else:
                pkg_url = "https://dl.fedoraproject.org/pub/fedora/linux/releases/34/Everything/x86_64/os/Packages/e/ec2-hibinit-agent-1.0.3-5.fc34.noarch.rpm"
            utils_lib.pkg_install(self,
                                  pkg_name='ec2-hibinit-agent',
                                  pkg_url=pkg_url,
                                  force=True)
            cmd = 'sudo systemctl is-enabled hibinit-agent.service'
            output = utils_lib.run_cmd(self, cmd)
            if 'enabled' not in output:
                cmd = 'sudo systemctl enable --now hibinit-agent.service'
                utils_lib.run_cmd(self, cmd)
                utils_lib.run_cmd(self,
                                  'sudo reboot',
                                  msg='reboot system under test')
                utils_lib.init_connection(self, timeout=self.ssh_timeout)
                timeout = 180
                interval = 5
                time_start = int(time.time())
                while True:
                    cmd = 'sudo systemctl is-active hibinit-agent.service'
                    out = utils_lib.run_cmd(self, cmd)
                    if 'inactive' in out:
                        break
                    time_end = int(time.time())
                    if time_end - time_start > timeout:
                        self.log.info('timeout ended: {}'.format(timeout))
                        break
                    self.log.info('retry after {}s'.format(interval))
                    time.sleep(interval)
                cmd = 'sudo systemctl status hibinit-agent.service'
                utils_lib.run_cmd(self, cmd)
        else:
            cmd = 'cat /proc/swaps'
            output = utils_lib.run_cmd(self,
                                       cmd,
                                       msg='check whether system has swap on')
            if '-2' not in output:
                self.log.info("No swap found, creating new one")
                cmd = """
                    sudo dd if=/dev/zero of=/swap bs=1024 count=2000000;
                    sudo chmod 0600 /swap;
                    sudo mkswap /swap;
                    sudo swapon /swap;
                    offset=$(filefrag -v /swap| awk '{if($1==\"0:\"){print $4}}');
                    uuid=$(findmnt -no UUID -T /swap);
                    sudo grubby --update-kernel=ALL  --args=\"resume_offset=${offset//.} resume=UUID=$uuid\";
                    sudo echo '/swap    swap    swap   defaults 0 0' >> /etc/fstab
                    """
                utils_lib.run_cmd(self, cmd, timeout=240)

        cmd = "sleep 360 > /dev/null 2>&1 &"
        utils_lib.run_cmd(self, cmd)
        vm_hibernate_success = False
        try:
            if not self.vm.send_hibernation():
                self.skipTest('send hibernate not succeed')
            vm_hibernate_success = True
        except NotImplementedError:
            self.log.info(
                'send_hibernation func is not implemented in {}'.format(
                    self.vm.provider))
        except UnSupportedAction:
            self.log.info(
                'send_hibernation func is not supported in {}'.format(
                    self.vm.provider))
        if not vm_hibernate_success:
            cmd = "sudo systemctl hibernate"
            utils_lib.run_cmd(self, cmd, msg="Try to hibernate inside system!")
            time.sleep(20)

        self.vm.start()
        time.sleep(10)
        self.params['remote_node'] = self.vm.floating_ip
        utils_lib.init_connection(self, timeout=self.ssh_timeout)
        utils_lib.run_cmd(self,
                          'dmesg',
                          expect_kw="Restarting tasks",
                          expect_not_kw="Call",
                          msg="check the system is resumed")
        cmd = 'pgrep -a sleep'
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          msg='check sleep process still exists')
Esempio n. 26
0
    def test_virsh_pci_reattach(self):
        '''
        case_name:
            test_virsh_pci_reattach
        case_priority:
            1
        component:
            kernel
        bugzilla_id:
            1700254
        polarion_id:
            n/a
        maintainer:
            [email protected]
        description:
            Test no exception when system does pci detach and attach operation in virsh.
            virsh can detach host pci device and attach it to guest.
        key_steps:
            1. #virsh  nodedev-detach $pci
            2. #virsh  nodedev-reattach $pci
        expected_result:
            No panic/crash happen.
            eg. # virsh nodedev-detach pci_0000_2b_00_0
                Device pci_0000_2b_00_0 detached
                # virsh nodedev-reattach pci_0000_2b_00_0
                Device pci_0000_2b_00_0 re-attached
        '''
        utils_lib.is_metal(self, action="cancel")
        cmd = "sudo yum install -y libvirt"
        utils_lib.run_cmd(self, cmd, msg="install libvirt pkg")
        cmd = "sudo systemctl restart libvirtd"
        utils_lib.run_cmd(self, cmd, cancel_ret='0', msg="restart libvirtd")
        utils_lib.is_cmd_exist(self, cmd='virsh')
        if utils_lib.is_arch(self, arch='x86_64'):
            utils_lib.run_cmd(self,
                              'cat /proc/cmdline',
                              cancel_kw='intel_iommu=on',
                              msg='Check boot line')

        utils_lib.run_cmd(self, 'sudo lspci', msg="get pci list")
        tmp_pci = None
        cmd = "lspci|grep 'Non-Volatile memory'|wc -l"
        out = utils_lib.run_cmd(self, cmd)
        if int(out) > 0:
            cmd = 'sudo find /sys/devices -name *nvme*n1p1*'
            tmp_root = utils_lib.run_cmd(self, cmd, msg="get boot nvme pci")
            boot_pci = tmp_root.split('/')[-2]
            cmd = 'sudo find /sys/devices -name *nvme*|grep -v %s|\
grep -i pci|grep n1' % boot_pci
            ret = utils_lib.run_cmd(self,
                                    cmd,
                                    msg="get test pci",
                                    ret_status=True)
            if int(ret) == 0:
                tmp_pci = utils_lib.run_cmd(self, cmd, msg="get test pci")
                if len(tmp_pci) > 0:
                    tmp_pci = tmp_pci.split('/')[-4]
                else:
                    tmp_pci = None
            else:
                tmp_pci = None
        if tmp_pci is None:
            cmd = 'sudo find /sys/devices -name *ttyS0*|grep [0-9]:[0-9]'
            tmp_root = utils_lib.run_cmd(self,
                                         cmd,
                                         msg="try to get ttyS0 pci device")
            if len(tmp_root) == 0:
                tmp_pci = None
            else:
                serial_pci = tmp_root.split('/')[-3]
                tmp_pci = serial_pci
        if tmp_pci is None:
            cmd = 'sudo find /sys/devices -name *vga*|grep [0-9]:[0-9]'
            tmp_root = utils_lib.run_cmd(self,
                                         cmd,
                                         msg="try to get vga pci device")
            if len(tmp_root) == 0:
                tmp_pci = None
            else:
                vga_pci = tmp_root.split('/')[-2]
                tmp_pci = vga_pci
        if tmp_pci is not None:
            self.log.info("Get pci device: {}".format(tmp_pci))
        else:
            self.skipTest("No pci device found to detach")

        tmp_pci = tmp_pci.replace('.', '_')
        tmp_pci = tmp_pci.replace(':', '_')
        pci_dev_1 = utils_lib.run_cmd(
            self,
            'sudo virsh nodedev-list|grep %s |tail -1' % tmp_pci,
            msg='pick up device to detach')
        if pci_dev_1.endswith('1'):
            pci_dev_0 = pci_dev_1.rstrip('1') + '0'
            utils_lib.run_cmd(self,
                              'sudo virsh nodedev-detach %s' % pci_dev_0,
                              msg='detach pci device',
                              expect_ret=0)
        utils_lib.run_cmd(self,
                          'sudo virsh nodedev-detach %s' % pci_dev_1,
                          msg='detach pci device',
                          expect_ret=0)
        utils_lib.run_cmd(self,
                          'sudo virsh nodedev-reattach %s' % pci_dev_1,
                          msg='reattach pci device',
                          expect_ret=0)
        if pci_dev_1.endswith('1'):
            utils_lib.run_cmd(self,
                              'sudo virsh nodedev-reattach %s' % pci_dev_0,
                              msg='reattach pci device',
                              expect_ret=0)
        utils_lib.check_log(self,
                            "error,warn,fail,trace,Trace",
                            log_cmd='dmesg -T',
                            cursor=self.dmesg_cursor)
Esempio n. 27
0
    def test_fips_selftest(self):
        '''
        case_name:
            test_fips_selftest
        case_priority:
            2
        component:
            openssl
        bugzilla_id:
            1940085
        customer_case_id:
            02874840
        polarion_id:
            n/a
        maintainer:
            [email protected]
        description:
            FIPS_selftest() pass
        key_steps:
            1. # gcc fipstest.c -o fipstest -lcrypto
            2. # ./fipstest
        expected_result:
            No fips selftest failed.
        '''
        fipstest = """
//required pkg: openssl-devel
//compile: gcc fipstest.c -o fipstest -lcrypto
//run ./fipstest
//https://www.openssl.org/docs/fips/UserGuide-2.0.pdf
#include <stdio.h>
#include <openssl/ssl.h>
#include <openssl/fips.h>
#include <openssl/err.h>

int fips_test(int fipsset){
    FIPS_mode_set(fipsset);
    if (FIPS_mode()){
        fprintf(stderr, "fips mode set.\\n");
    }
    else{
        fprintf(stderr, "fips mode not set.\\n");
    }
    if (FIPS_selftest()){
        fprintf(stderr, "fips selftest pass.\\n");
    }
    else{
        fprintf(stderr, "fips selftest failed.\\n");
        ERR_print_errors_fp(stderr);
    }
}
int main(int argc, char *argv[])
{
	fips_test(0);
	fips_test(1);
}
        """
        product_id = utils_lib.get_os_release_info(self, field='VERSION_ID')
        if float(product_id) >= 9.0:
            self.skipTest(
                'openssl-3.0.0 does not provide FIPS_selftest() API bz:1969692'
            )
        utils_lib.is_pkg_installed(self, pkg_name="openssl-devel")
        utils_lib.is_cmd_exist(self, 'gcc')
        cmd = "echo '{}' > /tmp/fipstest.c".format(fipstest)
        utils_lib.run_cmd(self, cmd, expect_ret=0, msg='generate fipstest.c')
        cmd = "gcc /tmp/fipstest.c -o /tmp/fipstest -lcrypto"
        utils_lib.run_cmd(self, cmd, expect_ret=0, msg='compile fipstest.c')
        cmd = "/tmp/fipstest"
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_not_kw="fips selftest failed",
                          msg='run fipstest')
Esempio n. 28
0
 def test_systemd_coredump(self):
     """
     case_name:
         test_systemd_coredump
     case_file:
         os_tests.tests.test_general_test.TestGeneralTest.test_systemd_coredump
     component:
         systemd
     bugzilla_id:
         2025479, 1905582
     is_customer_case:
         False
     testplan:
         N/A
     maintainer:
         [email protected]
     description:
         The DefaultLimitCORESoft is set to 0 by default.
         Test systemd-coredump can save process core successfully when process crashed
     key_steps:
         1. # systemctl show | grep CORE
            DefaultLimitCORE=infinity
            DefaultLimitCORESoft=0 (rhel default set)
         2. create test.c
            #include <stdio.h>
            #include <stdlib.h>
            void main(){
            int x;
            free(&x);
            }
         3. # gcc -g -o pp test.c
         4. # ./pp
     expect_result:
         pp crashed and new core file is generated under /var/lib/systemd/coredump
     debug_want:
         - journal log
     """
     test_str = '''
     #include <stdio.h>
     #include <stdlib.h>
     void main(){
         int x;
         free(&x);
     }
     '''
     product_name = utils_lib.get_os_release_info(self, field='NAME')
     if 'Red Hat Enterprise Linux' in product_name:
         cmd = 'systemctl show | grep CORE'
         utils_lib.run_cmd(
             self,
             cmd,
             expect_kw='DefaultLimitCORESoft=0,DefaultLimitCORE=infinity',
             msg='check default limit core setting')
     utils_lib.run_cmd(self,
                       'ulimit -c 0;ulimit -c',
                       expect_ret=0,
                       expect_kw='0',
                       msg='test user can change limit core setting')
     utils_lib.run_cmd(self,
                       'ulimit -c unlimited;ulimit -c',
                       expect_ret=0,
                       expect_kw='unlimited',
                       msg='test user can change limit core setting')
     utils_lib.run_cmd(self,
                       'sudo rm -rf /var/lib/systemd/coredump/core.pp*',
                       msg='clean up core files before testing')
     self.cursor = utils_lib.get_cmd_cursor(self,
                                            cmd='journalctl -b0',
                                            rmt_redirect_stdout=True)
     test_file = '/tmp/test.c'
     utils_lib.is_cmd_exist(self, 'gcc')
     cmd = "echo '{}' > {}".format(test_str, test_file)
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       msg='generate {}'.format(test_file))
     cmd = "gcc -g -o /tmp/pp {}".format(test_file)
     utils_lib.run_cmd(self, cmd, expect_ret=0)
     utils_lib.run_cmd(self,
                       'ulimit -c unlimited;/tmp/pp',
                       msg='run it to trigger core dump')
     utils_lib.run_cmd(self,
                       'sudo ls /var/lib/systemd/coredump/core.pp*',
                       expect_ret=0,
                       msg='check core file generated')
     utils_lib.check_log(self,
                         "error,warn,fail",
                         log_cmd='journalctl -b0',
                         cursor=self.cursor,
                         rmt_redirect_stdout=True)
Esempio n. 29
0
    def test_collect_insights_result(self):
        '''
        case_name:
            test_collect_insights_result

        case_priority:
            1

        component:
            kernel

        bugzilla_id:
            1889702

        polarion_id:
            n/a

        maintainer:
            [email protected]

        description:
            Check if insights-client hits some rules.

        key_steps:
            1. #insights-client --register
            2. #insights-client --check-result
            3. #insights-client --show-results

        expected_result:
            If run in dev compose, we simply assume there is no insights rule should be hit because no pkg update available in the latest build.
            But if it is expected in dev compose, we can skip it in this case.
            If run in GAed compose, please follow rule suggestion to check manually.
        '''
        cmd = "cat /etc/redhat-release"
        utils_lib.run_cmd(self,
                          cmd,
                          cancel_not_kw='CentOS',
                          msg='Not run in centos')
        utils_lib.is_cmd_exist(self, cmd="insights-client")
        utils_lib.run_cmd(self, 'sudo lscpu', msg="get cpu information")
        utils_lib.run_cmd(self,
                          'sudo rpm -q insights-client',
                          msg="get insights-client version")
        utils_lib.run_cmd(self,
                          'sudo insights-client --register',
                          msg="try to register system",
                          timeout=120)
        utils_lib.run_cmd(
            self,
            'sudo insights-client --status',
            cancel_kw="System is registered",
            msg=
            "Please register system or add user to '/etc/insights-client/insights-client.conf'"
        )
        utils_lib.run_cmd(self,
                          'sudo insights-client --check-result',
                          expect_ret=0,
                          msg="checking system")
        out = utils_lib.run_cmd(self,
                                'sudo insights-client --show-result',
                                expect_ret=0,
                                msg="show insights result")
        #hit_list = json.loads(out)
        tmp_dict = json.loads(out)
        if len(tmp_dict) > 0:
            out = utils_lib.run_cmd(
                self,
                'sudo insights-client --no-upload --keep-archive',
                expect_ret=0,
                msg="generate archive")
            gz_file = re.findall('/var/.*tar.gz', out)[0]
            file_name = gz_file.split('/')[-1]
            utils_lib.run_cmd(self,
                              'sudo cp {} {}'.format(gz_file, self.log_dir))
            self.fail("{} insights rule hit".format(len(tmp_dict)))
Esempio n. 30
0
 def test_podman_rm_stopped(self):
     """
     case_name:
         test_podman_rm_stopped
     component:
         podman
     bugzilla_id:
         1913295
     is_customer_case:
         True
     maintainer:
         [email protected]
     description:
         Test podman can remove a stopped container
     key_steps:
         1.podman ps -a
         2.podman rm -a -f
         3.podman run --name myctr1 -td quay.io/libpod/alpine
         4.podman run --name myctr2 -td quay.io/libpod/alpine
         5.timeout 5 podman exec myctr1 sleep 10
         6.podman kill myctr1
         7.podman inspect myctr1
         8.podman rm myctr1
         9.timeout 5 podman exec myctr2 sleep 10
         10.podman stop myctr2
         11.podman inspect myctr2
         12.podman rm myctr2
         13.podman ps
     expect_result:
         There's no myctr1 and myctr2 left after removing them
     """
     self.log.info("Test podman can remove a stopped container")
     utils_lib.is_cmd_exist(self, 'podman')
     cmd = "podman ps -a"
     utils_lib.run_cmd(self,
                       cmd,
                       msg='try to list all containers before testing')
     cmd = "podman rm -a -f"
     utils_lib.run_cmd(self,
                       cmd,
                       msg='try to clean all containers before testing')
     cmd = "podman run --name myctr1 -td quay.io/libpod/alpine"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       msg='run myctr1',
                       timeout=180)
     cmd = "podman run --name myctr2 -td quay.io/libpod/alpine"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       msg='run myctr2',
                       timeout=180)
     cmd = "timeout 5 podman exec myctr1 sleep 10"
     utils_lib.run_cmd(self, cmd)
     cmd = "podman kill myctr1"
     utils_lib.run_cmd(self, cmd, expect_ret=0)
     time.sleep(1)
     cmd = "podman inspect myctr1"
     utils_lib.run_cmd(self, cmd, expect_ret=0)
     cmd = "podman rm myctr1"
     utils_lib.run_cmd(self, cmd, expect_ret=0, msg='try to remove myctr1')
     cmd = "timeout 5 podman exec myctr2 sleep 10"
     utils_lib.run_cmd(self, cmd)
     cmd = "podman stop myctr2"
     utils_lib.run_cmd(self, cmd, expect_ret=0)
     cmd = "podman inspect myctr2"
     utils_lib.run_cmd(self, cmd, expect_ret=0)
     cmd = "podman rm myctr2"
     utils_lib.run_cmd(self, cmd, expect_ret=0, msg='try to remove myctr2')
     cmd = "podman ps"
     utils_lib.run_cmd(self,
                       cmd,
                       expect_not_kw='myctr1,myctr2',
                       msg='try to list all containers again after testing')