def test_ethtool_P(self):
        '''
        :avocado: tags=test_ethtool_P,fast_check
        polarion_id:
        bug_id: 1704435
        Assertion: Can read mac address successfully
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        cmd = "ethtool -P eth0"
        output = utils_lib.run_cmd(self,
                                   cmd,
                                   expect_not_kw='00:00:00:00:00:00',
                                   msg='ethtool can read mac successfully')
        mac = ''.join(
            re.findall(
                '[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}:\
[0-9a-z]{2}:[0-9a-z]{2}', output))
        self.log.info("Get mac: %s" % mac)
        cmd = "ip addr show eth0"
        output = utils_lib.run_cmd(self,
                                   cmd,
                                   expect_kw=mac,
                                   msg='compare with ip showed mac')
Beispiel #2
0
    def test_reboot_vm_from_control(self):
        '''
        :avocado: tags=test_reboot_vm_from_control,kernel_tier1
        polarion_id: RHEL7-103636
        '''

        self.session.connect(timeout=self.ssh_wait_timeout)
        time.sleep(10)
        aws.check_session(self)

        utils_lib.run_cmd(self,
                          'last|grep reboot',
                          expect_ret=0,
                          msg="Before rebooting %s" % self.vm.instance_id)
        self.log.info("Rebooting %s" % self.vm.instance_id)
        if self.vm.reboot():
            if 'metal' in self.vm.instance_type:
                self.log.info("Wait %s" % self.ssh_wait_timeout)
                time.sleep(self.ssh_wait_timeout)
            else:
                self.log.info("Wait 30s")
                time.sleep(30)
            if self.session.session.is_responsive():
                self.fail("SSH connection keeps live!")
            self.session.connect(timeout=self.ssh_wait_timeout)
            utils_lib.run_cmd(self,
                              'last|grep reboot',
                              expect_ret=0,
                              msg="After reboot %s" % self.vm.instance_id)
            self.log.info("Reboot %s successfully" % self.vm.instance_id)

        else:
            self.fail("Reboot %s operation failed!" % self.vm.instance_id)
Beispiel #3
0
    def test_change_instance_type(self):
        '''
        :avocado: tags=test_change_instance_type
        description:
        Test reboot RHEL instance on AWS platform. Linked case RHEL7-103853.
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]LifeCycleTest.test_change_instance_type"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority: 
            0
        case_component: 
            LifeCycle
        key_steps:
            1. Launch an instance on AWS EC2.
            2. Stop the instance.
            3. When the instance is in stopped status, from AWS console, AWS cli or API, change the instance type.
            4. Start the instance.
        pass_criteria: 
            Instance is started successfully with the new instance type.
            Note: Only change instance types between the same arch.
        '''
        cmd = 'lscpu'
        output = utils_lib.run_cmd(self, cmd, expect_ret=0)
        if 'aarch64' in output:
            self.log.info("arm instance")
            instance_list = [
                "t4g.small", "c6g.medium", "a1.xlarge", "c6gd.medium",
                "m6gd.medium", "r6g.medium","r6g.medium","c6gn.medium"
            ]
        else:
            self.log.info("x86 instance")
            instance_list = [
                "t2.small", "t3.medium", "m5.2xlarge", "m4.2xlarge",
                "c4.xlarge", "c5.xlarge", "c5d.xlarge", "g3.4xlarge",
                "i3.xlarge", "r5d.xlarge"
            ]

        if not self.vm.is_stopped():
            self.vm.stop(loops=4)
        if not self.vm.is_stopped():
            self.fail("Instance is not in stopped state!")
        old_type = self.vm.instance_type
        new_type = random.choice(instance_list)
        self.log.info("Try to change %s to %s" % (old_type, new_type))
        ret = self.vm.modify_instance_type(new_type)
        self.assertTrue(ret, msg="Failed to change instance type!")
        self.vm.start()

        self.session.connect(timeout=self.ssh_wait_timeout)
        aws.check_session(self)
        self.log.info(
            "Changed instance type done! Restore it back to previous.")
        self.vm.stop(wait=True)
        ret = self.vm.modify_instance_type(old_type)
        self.assertTrue(ret, msg="Failed to restore instance type!")
Beispiel #4
0
    def test_reboot_vm_inside_guest(self):
        '''
        :avocado: tags=test_reboot_vm_inside_guest
        polarion_id: RHEL7-103635
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        time.sleep(10)
        aws.check_session(self)

        self.log.info("Before rebooting %s" % self.vm.instance_id)
        output1 = utils_lib.run_cmd(self, 'last|grep reboot')
        self.log.info("VM last reboot log:\n %s" % output1)
        # session.cmd_ouput failes to get ret status as reboot close connection
        # considering add a function to guest.py to handle this. For now, use
        # sendline directly.
        self.session.session.sendline('sudo reboot')
        self.log.info("Rebooting %s via ssh" % self.vm.instance_id)
        if 'metal' in self.vm.instance_type:
            self.log.info("Wait %s" % self.ssh_wait_timeout)
            time.sleep(self.ssh_wait_timeout)
        else:
            self.log.info("Wait 60s")
            time.sleep(60)
        if self.session.session.is_responsive():
            self.fail("SSH connection keeps live!")

        self.session.connect(timeout=self.ssh_wait_timeout)
        output2 = utils_lib.run_cmd(self, 'last|grep reboot')
        self.log.info("VM last reboot log:\n %s" % output2)
        # self.assertEqual(output1, output2, "Reboot %s operation failed!" % \
        #     self.vm.instance_id)
        self.log.info("Reboot %s successfully" % self.vm.instance_id)
    def test_ethtool_s_msglvl(self):
        '''
        :avocado: tags=test_ethtool_s_msglvl,fast_check
        polarion_id:
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        cmd = "ethtool eth0"
        output = utils_lib.run_cmd(self, cmd, expect_ret=0)
        if "Current message level" not in output:
            self.cancel("Operation not supported!")
        self.log.info("Trying to change msglvl")
        msglvl_list = [
            'drv', 'probe', 'link', 'timer', 'ifdown', 'ifup', 'rx_err',
            'tx_err', 'tx_queued', 'intr', 'tx_done', 'rx_status', 'pktdata',
            'hw', 'wol'
        ]
        cmd = 'sudo  ethtool -s eth0 msglvl 0'
        utils_lib.run_cmd(self, cmd, msg='Disable all msglvl for now!')
        for msglvl in msglvl_list:
            cmd = 'sudo ethtool -s eth0 msglvl %s on' % msglvl
            utils_lib.run_cmd(self, cmd, expect_ret=0)
            cmd = "sudo ethtool eth0"
            utils_lib.run_cmd(self, cmd, expect_kw=msglvl)

        for msglvl in msglvl_list:
            cmd = 'sudo ethtool -s eth0 msglvl %s off' % msglvl
            utils_lib.run_cmd(self, cmd, expect_ret=0)
            cmd = "sudo ethtool eth0|grep -v 'link modes'"
            utils_lib.run_cmd(self, cmd, expect_not_kw=msglvl)
        cmd = 'dmesg|tail -20'
        utils_lib.run_cmd(self, cmd)
Beispiel #6
0
    def test_stop_vm_inside_guest(self):
        '''
        :avocado: tags=test_stop_vm_inside_guest,fast_check
        polarion_id: RHEL7-103846
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        aws.check_session(self)
        utils_lib.run_cmd(self, 'uname -r', msg='Get instance kernel version')

        self.log.info("Before shuting down %s" % self.vm.instance_id)
        output = utils_lib.run_cmd(self, 'last|grep reboot')
        self.log.info("VM last reboot log:\n %s" % output)
        self.log.info("Stopping vm from inside itself %s" %
                      self.vm.instance_id)
        self.session.session.sendline('sudo init 0')

        time.sleep(30)
        if self.session.session.is_responsive():
            self.fail("SSH connection keeps live!")
        start_time = int(time.time())
        while True:
            time.sleep(20)
            if self.vm.is_stopped():
                self.log.info("VM is stopped!")
                break
            else:
                self.log.info(
                    "VM is not in stopped state, check again after 20s!")
            end_time = int(time.time())
            if end_time - start_time > 3 * self.ssh_wait_timeout:
                self.fail("VM is not in stopped state after %s seconds!" %
                          self.ssh_wait_timeout)
                break
    def test_ethtool_C_coalesce(self):
        '''
        :avocado: tags=test_ethtool_C_coalesce,fast_check
        polarion_id:
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        cmd = ' sudo  ethtool -c eth0'
        utils_lib.run_cmd(self, cmd, msg='Show current settings.')
        cmd = "ethtool -C eth0  rx-usecs 3"
        output = utils_lib.run_cmd(self, cmd)
        if "Operation not supported" in output:
            self.cancel("Operation not supported!")
        if "Operation not permitted" in output:
            self.cancel("Operation not permitted")
        self.log.info("Trying to change coalesce")
        coalesce_list = [
            'stats-block-usecs', 'sample-interval', 'pkt-rate-low',
            'pkt-rate-high', 'rx-usecs', 'rx-frames', 'rx-usecs-irq',
            'rx-frames-irq', 'tx-usecs', 'tx-frames', 'tx-usecs-irq',
            'tx-frames-irq', 'rx-usecs-low', 'rx-frame-low', 'tx-usecs-low',
            'tx-frame-low', 'rx-usecs-high', 'rx-frame-high', 'tx-usecs-high',
            'tx-frame-high'
        ]

        for coalesce in coalesce_list:
            cmd = 'sudo ethtool -C eth0 %s 2' % coalesce
            utils_lib.run_cmd(self, cmd, expect_ret=0)
            cmd = 'sudo  ethtool -c eth0'
            utils_lib.run_cmd(self, cmd, expect_kw="%s: 2" % coalesce)
        cmd = 'dmesg|tail -20'
        utils_lib.run_cmd(self, cmd)
    def test_ethtool_C_coalesce(self):
        '''
        :avocado: tags=test_ethtool_C_coalesce,fast_check
        description:
        Use ethtool to change the coalescing settings of the specified network device.
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]NetworkTest.test_ethtool_C_coalesce"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority: 
            0
        case_component: 
            network
        key_steps:
            1. Launch an instance on AWS EC2.
            2. Use ethtool to query the specified network device for coalescing information via command "$ sudo ethtool -c  eth0".
            3. Use ethtool to change the coalescing settings via command "$ sudo ethtool -C eth0 rx-usecs 3".
            4. Change the coalescing settings to different vaules like 
            'stats-block-usecs', 'sample-interval', 'pkt-rate-low',
            'pkt-rate-high', 'rx-usecs', 'rx-frames', 'rx-usecs-irq',
            'rx-frames-irq', 'tx-usecs', 'tx-frames', 'tx-usecs-irq',
            'tx-frames-irq', 'rx-usecs-low', 'rx-frame-low', 'tx-usecs-low',
            'tx-frame-low', 'rx-usecs-high', 'rx-frame-high', 'tx-usecs-high',
            'tx-frame-high'.
        pass_criteria: 
            The coalescing setting can be change successfully, or if cannot be change, message like "Operation not permitted" should be reported.
            And no error, warning, call track or other exception in dmesg.
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        cmd = ' sudo  ethtool -c eth0'
        utils_lib.run_cmd(self, cmd, msg='Show current settings.')
        cmd = "ethtool -C eth0  rx-usecs 3"
        output = utils_lib.run_cmd(self, cmd)
        if "Operation not supported" in output:
            self.cancel("Operation not supported!")
        if "Operation not permitted" in output:
            self.cancel("Operation not permitted")
        self.log.info("Trying to change coalesce")
        coalesce_list = [
            'stats-block-usecs', 'sample-interval', 'pkt-rate-low',
            'pkt-rate-high', 'rx-usecs', 'rx-frames', 'rx-usecs-irq',
            'rx-frames-irq', 'tx-usecs', 'tx-frames', 'tx-usecs-irq',
            'tx-frames-irq', 'rx-usecs-low', 'rx-frame-low', 'tx-usecs-low',
            'tx-frame-low', 'rx-usecs-high', 'rx-frame-high', 'tx-usecs-high',
            'tx-frame-high'
        ]

        for coalesce in coalesce_list:
            cmd = 'sudo ethtool -C eth0 %s 2' % coalesce
            utils_lib.run_cmd(self, cmd, expect_ret=0)
            cmd = 'sudo  ethtool -c eth0'
            utils_lib.run_cmd(self, cmd, expect_kw="%s: 2" % coalesce)
        cmd = 'dmesg|tail -20'
        utils_lib.run_cmd(self, cmd)
 def test_ethtool_G(self):
     '''
     :avocado: tags=test_ethtool_G,fast_check
     description:
         os-tests Use ethtool to change the rx/tx ring parameters of the specified network device.
     polarion_id:
         https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]NetworkTest.test_ethtool_G"
     bugzilla_id: 
         1722628
     customer_case_id: 
         n/a
     maintainer: 
         xiliang
     case_priority: 
         0
     case_component: 
         network
     key_steps:
         1. Launch an instance on AWS EC2.
         2. Use ethtool to check maximums and current rx/tx ring parameters via command "$ sudo ethtool -g  eth0".
         2. Use ethtool to change the rx/tx ring parameters via command "$ sudo ethtool -G eth0 rx 512 tx 512".
         3. Change the rx/tx ring parameters to maximums, minimal and invalid parameters e.g., -1.
         4. Check the rx/tx ring is changed via command "$ sudo ethtool -g eth0".
     pass_criteria: 
         The supported rx/tx ring parameters can be changed.
         Cannot set the ring parameters to -1.
         And no error, warning, call track or other exception in dmesg.
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     aws.check_session(self)
     case_name = "os_tests.tests.test_network_test.TestNetworkTest.test_ethtool_G"
     utils_lib.run_os_tests(self, case_name=case_name)
Beispiel #10
0
 def test_persistent_route(self):
     '''
     :avocado: tags=test_persistent_route,fast_check
     description:
         os-tests check if can add persistent static route.
     polarion_id:
         https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]NetworkTest.test_persistent_route"
     bugzilla_id: 
         1971527
     customer_case_id: 
         BZ1971527
     maintainer: 
         xiliang
     case_priority: 
         0
     case_component: 
         network
     key_steps:
         1. # nmcli connection modify 'System eth0' +ipv4.routes "10.8.8.0/24 10.7.9.5"
         2. # nmcli connection down 'System eth0';nmcli connection up 'System eth0'
         3. # ip r
     expected_result:
         New static route added.
         eg. 10.8.8.0/24 via 10.7.9.5 dev eth0 proto static metric 100
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     aws.check_session(self)
     case_name = "os_tests.tests.test_network_test.TestNetworkTest.test_persistent_route"
     utils_lib.run_os_tests(self, case_name=case_name)
 def test_sriov_ena_dmesg(self):
     '''
     :avocado: tags=test_sriov_ena_dmesg,fast_check
     description:
         Check dmesg related to ENA (Elastic Network Adapter) driver in RHEL on AWS. Linked case RHEL7-87117.
     polarion_id:
         https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]NetworkTest.test_sriov_ena_dmesg"
     bugzilla_id: 
         n/a
     customer_case_id: 
         n/a
     maintainer: 
         xiliang
     case_priority:
         0
     case_component: 
         network
     key_steps:
         1. Launch an instance with ENA drivers (instance list refer to description in case test_sriov_ena) on AWS.
         2. Connect the instance via ssh, and verify the ena module is being used on a particular interface via command "$ sudo ethtool -i eth0".
         3. Check the dmesg info related to ena "$ sudo dmesg|grep -w ena".
     pass_criteria: 
         The ena driver is used by the specified network interface.
         There are no error/warning/failure/unsupported feature messages about ena, better to compare the output with privious version, make sure there isn't regression which doesn't display as error in message.
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     aws.check_session(self)
     cmd = "ethtool -i eth0"
     output = utils_lib.run_cmd(self, cmd, expect_ret=0)
     if "driver: ena" not in output:
         self.cancel("No ena driver found!")
     self.log.info("Trying to check sriov ena boot messages!")
     aws.check_dmesg(self, 'ena', match_word_exact=True)
 def test_fio_crctest(self):
     '''
     :avocado: tags=test_fio_crctest,acceptance,fast_check,outposts
     polarion_id:
     description:
         Use fio to test the speed of the built-in checksumming functions in RHEL on AWS.
     polarion_id:
         https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]StorageTest.test_fio_crctest"
     bugzilla_id: 
         n/a
     customer_case_id: 
         n/a
     maintainer: 
         xiliang
     case_priority:
         0
     case_component: 
         Storage
     key_steps:
         1. Launch an instance on AWS.
         2. Connect the instance, Use "$ sudo fio --crctest" to test the speed of the built-in checksumming functions.
     pass_criteria: 
         crc test pass.
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session
     aws.check_session(self)
     cmd = 'sudo fio --crctest'
     utils_lib.run_cmd(
         self,
         cmd,
         expect_ret=0,
         msg='Test  the  speed  of  the built-in checksumming functions.',
         timeout=1200)
 def test_disk_info(self):
     '''
     :avocado: tags=test_disk_info,acceptance,fast_check,outposts
     description:
         Check disk information via fdisk and lsblk in RHEL on AWS. Linked case RHEL7-103855.
         For now, no exactly check result as output format may different on RHEL6/7/8.
         Only comparing disk count from fdisk and lsblk to vm assigned.
     polarion_id:
         https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]StorageTest.test_disk_info"
     bugzilla_id: 
         n/a
     customer_case_id: 
         n/a
     maintainer: 
         xiliang
     case_priority:
         0
     case_component: 
         Storage
     key_steps:
         1. Launch an instance on AWS.
         2. Check disk information via command "$ sudo fdisk -l".
     pass_criteria: 
         Disk information lists as the same with instance specs, and no error, hang or crash in system.
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session
     aws.check_session(self)
     fdisk_cmd = 'sudo fdisk -l'
     utils_lib.run_cmd(self, fdisk_cmd, expect_ret=0)
 def test_ethtool_P(self):
     '''
     :avocado: tags=test_ethtool_P,fast_check
     description:
         os-tests Test using ethtool to query permanent address.
     polarion_id:
         https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]NetworkTest.test_ethtool_P"
     bugzilla_id: 
         1704435
     customer_case_id: 
         BZ1704435
     maintainer: 
         xiliang
     case_priority: 
         0
     case_component: 
         network
     key_steps:
         1. Launch an instance on AWS EC2.
         2. Use ethtool to query permanent address via command "$ sudo ethtool -P ethX".
         3. Check the output of this command.
     pass_criteria: 
         Actual permanent address should be shown, but not 00:00:00:00:00:00.
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     aws.check_session(self)
     case_name = "os_tests.tests.test_network_test.TestNetworkTest.test_ethtool_P"
     utils_lib.run_os_tests(self, case_name=case_name)
Beispiel #15
0
    def test_start_vm_iommu(self):
        '''
        :avocado: tags=test_start_vm_iommu
        description:
            Test boot up an RHEL bare metal instance on AWS with iommu. Note this case is only for Bare metal instance since iommu is for configuring a host for PCI passthrough.
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]LifeCycleTest.test_start_vm_iommu"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority: 
            0
        case_component: 
            LifeCycle
        key_steps:
            1. Launch a bare metal instance on AWS EC2.
            2. Connect the instance via ssh with user:ec2-user, add iommu parameter to kernel command line with command "sudo grubby --update-kernel=ALL --args="intel_iommu=on"".
            3. Reboot instance, check if instance can boot up, and check the kernel command line with command "cat /proc/cmdline".
        pass_criteria: 
            Instance boots up as normal and there are "intel_iommu=on" in kernel command line.
        '''
        if 'metal' in self.vm.instance_type:
            self.log.info("Instance is bare metal")
        else:
            self.log.info("Instance is xen/kvm")
            self.cancel("Only run in bare metal instances!")
        self.session.connect(timeout=self.ssh_wait_timeout)
        utils_lib.run_cmd(self, 'uname -r', msg='Get instance kernel version')

        utils_lib.run_cmd(self,
                    'lscpu',
                    expect_ret=0,
                    cancel_not_kw="Xen,aarch64,AuthenticAMD")

        cmd = 'sudo grubby --update-kernel=ALL --args="intel_iommu=on"'
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        if not self.vm.is_stopped():
            self.vm.stop(loops=4)
        if not self.vm.is_stopped():
            self.fail("Instance is not in stopped state!")

        self.log.info("Start instance %s" % self.vm.instance_id)
        if self.vm.start(wait=True):
            self.session.connect(timeout=self.ssh_wait_timeout)
            aws.check_session(self)
            utils_lib.run_cmd(self,
                        'cat /proc/cmdline',
                        msg='Get instance boot cmdline')
            cmd = 'sudo grubby --update-kernel=ALL \
--remove-args="intel_iommu=on"'

            utils_lib.run_cmd(self, cmd, expect_ret=0)

        else:
            self.fail("Failed to start instance!")
    def test_nvme_basic(self):
        '''
        :avocado: tags=test_nvme_basic,acceptance,fast_check,outposts
        polarion_id: RHEL7-87122
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session
        aws.check_session(self)
        cmd = 'lsblk'
        out = utils_lib.run_cmd(self, cmd)
        if 'nvme' not in out:
            self.cancel('No nvme disk found!')
        aws.install_pkgs(self.session, 'pciutils')
        find_nvme_pci = 'sudo lspci|grep Non-Volatile'
        find_nvme_module = 'sudo lsmod|grep nvme'
        utils_lib.run_cmd(self,
                    find_nvme_pci,
                    expect_ret=0,
                    msg='Try to find nvme pci!')
        utils_lib.run_cmd(self,
                    find_nvme_module,
                    expect_ret=0,
                    msg='Try to find nvme module in loading drivers!')

        aws.install_pkgs(self.session, 'nvme-cli')
        nvme_list = 'sudo nvme list'
        self.log.info("CMD: %s" % nvme_list)
        output = utils_lib.run_cmd(self, nvme_list, expect_ret=0)
        search_for = re.compile(r'/dev/nvme\dn\d')
        nvme_blks = search_for.findall(output)
        if len(nvme_blks) > 0:
            self.log.info("Found nvme devices %s" % nvme_blks)
        else:
            self.fail("No nvme blks found %s" % output)
        output = utils_lib.run_cmd(self, 'lsblk', expect_ret=0)
        if 'xvda' in output:
            bootdisk = 'xvda'
        else:
            cmd = " sudo lsblk -o NAME,MOUNTPOINT|grep -w '/'"
            out = utils_lib.run_cmd(self, cmd)
            bootdisk = re.findall('nvme[0-9]+', out)[0]
        self.log.info("Boot disk is %s" % bootdisk)
        for nvme_blk in nvme_blks:
            nvme_read = 'sudo nvme read %s --data-size=10000' % nvme_blk
            utils_lib.run_cmd(self,
                        nvme_read,
                        expect_ret=0,
                        expect_kw=r'read: Success',
                        msg="%s read test" % nvme_blk)
            if bootdisk not in nvme_blk:
                nvme_write = 'echo "write test"|sudo nvme write %s \
--data-size=10000' % nvme_blk
                utils_lib.run_cmd(self,
                            nvme_write,
                            expect_ret=0,
                            expect_kw=r'write: Success',
                            msg="%s write test" % nvme_blk)
Beispiel #17
0
    def test_multi_disk_hotplug(self):
        '''
        :avocado: tags=test_multi_disk_hotplug,acceptance
        check disk hotplug when instance running
        will add disk read&write test later
        polarion_id: RHEL7-93570
        '''
        disk_dict = {
            self.disk1: 'sds',
            self.disk2: 'sdt',
            self.disk3: 'sdu',
            self.disk4: 'sdv'
        }
        if self.vm.is_stopped():
            self.vm.start()
        self.session.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session
        aws.check_session(self)
        count1 = self._get_disk_online()
        dmesg1 = self.session.cmd_output('dmesg')
        for i in disk_dict.keys():
            if i.is_attached():
                i.detach_from_instance(force=True)
            if not i.attach_to_instance(self.vm.instance_id, disk_dict.get(i)):
                aws.get_debug_log(self)
                self.fail("Attached failed!")
        aws.run_cmd(self, 'dmesg|tail -20', msg='save dmesg after attached!')
        time.sleep(30)
        count2 = self._get_disk_online()
        if count2 - count1 != 4:
            self.fail("count2(%s) - count1(%s) not equal new addded 4!" %
                      (count2, count1))
        for i in disk_dict.keys():
            if not i.detach_from_instance():
                aws.get_debug_log(self)
                self.fail("Dettached failed!")

        dmesg2 = self.session.cmd_output('dmesg')
        if not aws.compare_dmesg(dmesg1, dmesg2):
            self.fail("dmesg log check fail!")
        # test system can reboot with multidisks
        for i in disk_dict.keys():
            if i.attach_to_instance(self.vm.instance_id, disk_dict.get(i)):
                self.log.info('Attached successfully!')
            else:
                aws.get_debug_log(self)
                self.fail("Attached failed!")

        self.vm.reboot()
        self.session.connect(timeout=self.ssh_wait_timeout)
        self._check_disk_count()
        for i in disk_dict.keys():
            if i.detach_from_instance():
                self.log.info('Dettached successfully!')
            else:
                aws.get_debug_log(self)
                self.fail("Dettached failed!")
    def test_ethtool_s_msglvl(self):
        '''
        :avocado: tags=test_ethtool_s_msglvl,fast_check
        description:
            Test use ethtool to change the driver message type flags in RHEL on AWS.
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]NetworkTest.test_ethtool_s_msglvl"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority: 
            0
        case_component: 
            network
        key_steps:
            1. Launch an instance on AWS EC2.
            2. Use ethtool to disable all msglvl via command "$ sudo ethtool -s ethX msglvl 0".
            3. Change the driver message type to different vaules like 'drv', 'probe', 'link', 'timer', 'ifdown', 'ifup', 'rx_err', 'tx_err', 'tx_queued', 'intr', 'tx_done', 'rx_status', 'pktdata', 'hw', 'wol'.
               "$ sudo ethtool -s eth0 msglvl $type on"
               "$ sudo ethtool -s eth0 msglvl $type off"
            4. Check the message level after each change via command "$ sudo ethtool eth0".
       pass_criteria: 
            When all msglvl disabled, no message level name displays.
            The corresponding message level name displays when it's on.
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        cmd = "ethtool eth0"
        output = utils_lib.run_cmd(self, cmd, expect_ret=0)
        if "Current message level" not in output:
            self.cancel("Operation not supported!")
        self.log.info("Trying to change msglvl")
        msglvl_list = [
            'drv', 'probe', 'link', 'timer', 'ifdown', 'ifup', 'rx_err',
            'tx_err', 'tx_queued', 'intr', 'tx_done', 'rx_status', 'pktdata',
            'hw', 'wol'
        ]
        cmd = 'sudo  ethtool -s eth0 msglvl 0'
        utils_lib.run_cmd(self, cmd, msg='Disable all msglvl for now!')
        for msglvl in msglvl_list:
            cmd = 'sudo ethtool -s eth0 msglvl %s on' % msglvl
            utils_lib.run_cmd(self, cmd, expect_ret=0)
            cmd = "sudo ethtool eth0"
            utils_lib.run_cmd(self, cmd, expect_kw=msglvl)

        for msglvl in msglvl_list:
            cmd = 'sudo ethtool -s eth0 msglvl %s off' % msglvl
            utils_lib.run_cmd(self, cmd, expect_ret=0)
            cmd = "sudo ethtool eth0|grep -v 'link modes'"
            utils_lib.run_cmd(self, cmd, expect_not_kw=msglvl)
        cmd = 'dmesg|tail -20'
        utils_lib.run_cmd(self, cmd)
Beispiel #19
0
 def test_ethtool_G(self):
     '''
     :avocado: tags=test_ethtool_G,fast_check
     polarion_id:
     bz#: 1722628
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     aws.check_session(self)
     case_name = "os_tests.tests.test_network_test.TestNetworkTest.test_ethtool_G"
     utils_lib.run_os_tests(self, case_name=case_name)
Beispiel #20
0
 def test_ethtool_P(self):
     '''
     :avocado: tags=test_ethtool_P,fast_check
     polarion_id:
     bug_id: 1704435
     Assertion: Can read mac address successfully
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     aws.check_session(self)
     case_name = "os_tests.tests.test_network_test.TestNetworkTest.test_ethtool_P"
     utils_lib.run_os_tests(self, case_name=case_name)
Beispiel #21
0
    def test_reboot_vm_from_control(self):
        '''
        :avocado: tags=test_reboot_vm_from_control,kernel_tier1
        description:
        Test reboot RHEL instance from AWS platform. Linked case RHEL7-103636.
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]LifeCycleTest.test_reboot_vm_from_control"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority: 
            0
        case_component: 
            LifeCycle
        key_steps:
            1. Launch an instance on AWS EC2.
            2. From AWS console, AWS cli or API, reboot the instance.
        pass_criteria: 
            Instance reboot as normal, and there is new record about the lastest reboot in output of "last" command.
            Note: It will take longer time for Bare Metal instance to reboot.
        '''

        self.session.connect(timeout=self.ssh_wait_timeout)
        time.sleep(10)
        aws.check_session(self)

        utils_lib.run_cmd(self,
                    'last|grep reboot',
                    expect_ret=0,
                    msg="Before rebooting %s" % self.vm.instance_id)
        self.log.info("Rebooting %s" % self.vm.instance_id)
        if self.vm.reboot():
            if 'metal' in self.vm.instance_type:
                self.log.info("Wait %s" % self.ssh_wait_timeout)
                time.sleep(self.ssh_wait_timeout)
            else:
                self.log.info("Wait 30s")
                time.sleep(30)
            if self.session.session.is_responsive():
                self.fail("SSH connection keeps live!")
            self.session.connect(timeout=self.ssh_wait_timeout)
            utils_lib.run_cmd(self,
                        'last|grep reboot',
                        expect_ret=0,
                        msg="After reboot %s" % self.vm.instance_id)
            self.log.info("Reboot %s successfully" % self.vm.instance_id)

        else:
            self.fail("Reboot %s operation failed!" % self.vm.instance_id)
    def test_create_vm(self):
        '''
        :avocado: tags=test_create_vm
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        aws.check_session(self)

        aws.run_cmd(self,
                    'whoami',
                    expect_ret=0,
                    expect_output=self.vm.vm_username,
                    msg="New VM is created: %s" % self.vm.instance_id)
        aws.run_cmd(self, 'uname -r', msg='Get instance kernel version')
Beispiel #23
0
 def test_mtu_min_set(self):
     '''
     :avocado: tags=test_mtu_min_set,fast_check
     polarion_id: RHEL-111097
     ena mtu range: 128~9216
     ixgbevf mtu range: 68~9710
     vif mtu range: 68~65535
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     aws.check_session(self)
     case_name = "os_tests.tests.test_network_test.TestNetworkTest.test_mtu_min_max_set"
     utils_lib.run_os_tests(self, case_name=case_name)
Beispiel #24
0
    def test_stop_vm_inside_guest(self):
        '''
        :avocado: tags=test_stop_vm_inside_guest
        description:
        Test stop RHEL instance on AWS inside instance. Linked case RHEL7-103846.
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]LifeCycleTest.test_stop_vm_inside_guest"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority: 
            0
        case_component: 
            LifeCycle
        key_steps:
            1. Launch an instance on AWS EC2.
            2. Connect instace via ssh, run command "sudo init 0" inside the instance to stop the instance.
        pass_criteria: 
            Instance status is stopped.
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        aws.check_session(self)
        utils_lib.run_cmd(self, 'uname -r', msg='Get instance kernel version')

        self.log.info("Before shuting down %s" % self.vm.instance_id)
        output = utils_lib.run_cmd(self, 'last|grep reboot')
        self.log.info("VM last reboot log:\n %s" % output)
        self.log.info("Stopping vm from inside itself %s" %
                      self.vm.instance_id)
        self.session.session.sendline('sudo init 0')

        time.sleep(30)
        if self.session.session.is_responsive():
            self.fail("SSH connection keeps live!")
        start_time = int(time.time())
        while True:
            time.sleep(20)
            if self.vm.is_stopped():
                self.log.info("VM is stopped!")
                break
            else:
                self.log.info(
                    "VM is not in stopped state, check again after 20s!")
            end_time = int(time.time())
            if end_time - start_time > 3 * self.ssh_wait_timeout:
                self.fail("VM is not in stopped state after %s seconds!" %
                          self.ssh_wait_timeout)
                break
Beispiel #25
0
    def test_reboot_vm_inside_guest(self):
        '''
        :avocado: tags=test_reboot_vm_inside_guest
        description:
        Test reboot RHEL instance on AWS inside instance. Linked case RHEL7-103635.
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]LifeCycleTest.test_reboot_vm_inside_guest"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority: 
            0
        case_component: 
            LifeCycle
        key_steps:
            1. Launch an instance on AWS EC2.
            2. Connect instace via ssh, run command "sudo Reboot" inside the instance to reboot the instance.
        pass_criteria: 
            Instance reboot as normal, and there is new record about the lastest reboot in output of "last" command.
            Note: It will take longer time for Bare Metal instance to reboot.
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        time.sleep(10)
        aws.check_session(self)

        self.log.info("Before rebooting %s" % self.vm.instance_id)
        output1 = utils_lib.run_cmd(self, 'last|grep reboot')
        self.log.info("VM last reboot log:\n %s" % output1)
        # session.cmd_ouput failes to get ret status as reboot close connection
        # considering add a function to guest.py to handle this. For now, use
        # sendline directly.
        self.session.session.sendline('sudo reboot')
        self.log.info("Rebooting %s via ssh" % self.vm.instance_id)
        if 'metal' in self.vm.instance_type:
            self.log.info("Wait %s" % self.ssh_wait_timeout)
            time.sleep(self.ssh_wait_timeout)
        else:
            self.log.info("Wait 60s")
            time.sleep(60)
        if self.session.session.is_responsive():
            self.fail("SSH connection keeps live!")

        self.session.connect(timeout=self.ssh_wait_timeout)
        output2 = utils_lib.run_cmd(self, 'last|grep reboot')
        self.log.info("VM last reboot log:\n %s" % output2)
        # self.assertEqual(output1, output2, "Reboot %s operation failed!" % \
        #     self.vm.instance_id)
        self.log.info("Reboot %s successfully" % self.vm.instance_id)
 def test_disk_info(self):
     '''
     :avocado: tags=test_disk_info,acceptance,fast_check,outposts
     check disk information via fdisk and lsblk.
     For now, no exactly check result as output format may different
     on RHEL6/7/8.
     Only comparing disk count from fdisk and lsblk to vm assigned.
     polarion_id: RHEL7-103855
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session
     aws.check_session(self)
     fdisk_cmd = 'sudo fdisk -l'
     utils_lib.run_cmd(self, fdisk_cmd, expect_ret=0)
    def test_mtu_min_set(self):
        '''
        :avocado: tags=test_mtu_min_set,fast_check
        polarion_id: RHEL-111097
        ena mtu range: 128~9216
        ixgbevf mtu range: 68~9710
        vif mtu range: 68~65535
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        cmd = "sudo ethtool -i eth0"
        output = utils_lib.run_cmd(self, cmd, expect_ret=0)
        if 'ena' in output:
            self.log.info('ena found!')
            mtu_range = [0, 127, 128, 4500, 9216, 9217]
            mtu_min = 128
            mtu_max = 9216
        elif 'ixgbe' in output:
            self.log.info('ixgbevf found!')
            mtu_range = [0, 67, 68, 4500, 9710, 9711]
            mtu_min = 68
            mtu_max = 9710
        elif 'vif' in output:
            self.log.info('vif found!')
            mtu_range = [0, 67, 68, 4500, 65535, 65536]
            mtu_min = 68
            mtu_max = 65535
        else:
            self.fail('Did not detect network type! %s' % output)

        self.log.info("Trying to change mtu to %s" % mtu_range)
        for mtu_size in mtu_range:
            mtu_cmd = "sudo ip link set dev eth0 mtu %s" % mtu_size
            mtu_check = "sudo ip link show dev eth0"
            self.log.info("CMD: %s" % mtu_cmd)
            status, output = self.session1.cmd_status_output(mtu_cmd)
            if mtu_size <= mtu_max and mtu_size >= mtu_min:
                self.assertEqual(status,
                                 0,
                                 msg='Change mtu size failed! %s' % output)
            elif mtu_size < mtu_min or mtu_size > mtu_max:
                self.assertGreater(
                    status,
                    0,
                    msg='Change mtu size successfully which should not! %s' %
                    output)

            status, output = self.session1.cmd_status_output(mtu_check)
            self.log.info("After set mtu size %s \n %s " % (mtu_size, output))
 def test_sriov_ena_dmesg(self):
     '''
     :avocado: tags=test_sriov_ena_dmesg,fast_check
     polarion_id:
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     aws.check_session(self)
     cmd = "ethtool -i eth0"
     output = utils_lib.run_cmd(self, cmd, expect_ret=0)
     if "driver: ena" not in output:
         self.cancel("No ena driver found!")
     self.log.info("Trying to check sriov ena boot messages!")
     aws.check_dmesg(self, 'ena', match_word_exact=True)
 def test_fio_crctest(self):
     '''
     :avocado: tags=test_fio_crctest,acceptance,fast_check,outposts
     polarion_id:
     Test  the  speed  of  the built-in checksumming functions.
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session
     aws.check_session(self)
     cmd = 'sudo fio --crctest'
     utils_lib.run_cmd(
         self,
         cmd,
         expect_ret=0,
         msg='Test  the  speed  of  the built-in checksumming functions.', timeout=1200)
    def test_iperf_ipv4(self):
        '''
        :avocado: tags=test_iperf_ipv4
        polarion_id:
        For now, we only run iperf test and did not compare result with
        standard. If there is big gap, please manuall run inside the same
        placement group.
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        perf_spec = self.params.get('net_perf', '*/instance_types/*')
        if int(perf_spec) > 40:
            self.cancel('Cancel case as iperf3 is not suitable for \
bandwidth higher than 40G')
        self._iperf3_test()