Esempio n. 1
0
 def test_check_dmesg_calltrace(self):
     '''
     :avocado: tags=test_check_dmesg_calltrace,fast_check,kernel_tier1
     polarion_id: RHEL7-103851
     bz#: 1777179
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     aws.run_cmd(self, 'dmesg', expect_ret=0, expect_not_kw='Call Trace')
Esempio n. 2
0
 def test_check_avclog(self):
     '''
     :avocado: tags=test_check_avclog,fast_check,kernel_tier1
     polarion_id: N/A
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     cmd = "sudo ausearch -m AVC -ts today"
     aws.run_cmd(self, cmd, expect_not_ret=0, msg='Checking avc log!')
     aws.run_cmd(self, 'uname -r', msg='Get instance kernel version')
Esempio n. 3
0
    def test_multi_disk_hotplug(self):
        '''
        :avocado: tags=test_multi_disk_hotplug,acceptance
        check disk hotplug when instance running
        will add disk read&write test later
        polarion_id: RHEL7-93570
        '''
        disk_dict = {
            self.disk1: 'sds',
            self.disk2: 'sdt',
            self.disk3: 'sdu',
            self.disk4: 'sdv'
        }
        if self.vm.is_stopped():
            self.vm.start()
        self.session.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session
        aws.check_session(self)
        count1 = self._get_disk_online()
        dmesg1 = self.session.cmd_output('dmesg')
        for i in disk_dict.keys():
            if i.is_attached():
                i.detach_from_instance(force=True)
            if not i.attach_to_instance(self.vm.instance_id, disk_dict.get(i)):
                aws.get_debug_log(self)
                self.fail("Attached failed!")
        aws.run_cmd(self, 'dmesg|tail -20', msg='save dmesg after attached!')
        time.sleep(30)
        count2 = self._get_disk_online()
        if count2 - count1 != 4:
            self.fail("count2(%s) - count1(%s) not equal new addded 4!" %
                      (count2, count1))
        for i in disk_dict.keys():
            if not i.detach_from_instance():
                aws.get_debug_log(self)
                self.fail("Dettached failed!")

        dmesg2 = self.session.cmd_output('dmesg')
        if not aws.compare_dmesg(dmesg1, dmesg2):
            self.fail("dmesg log check fail!")
        # test system can reboot with multidisks
        for i in disk_dict.keys():
            if i.attach_to_instance(self.vm.instance_id, disk_dict.get(i)):
                self.log.info('Attached successfully!')
            else:
                aws.get_debug_log(self)
                self.fail("Attached failed!")

        self.vm.reboot()
        self.session.connect(timeout=self.ssh_wait_timeout)
        self._check_disk_count()
        for i in disk_dict.keys():
            if i.detach_from_instance():
                self.log.info('Dettached successfully!')
            else:
                aws.get_debug_log(self)
                self.fail("Dettached failed!")
Esempio n. 4
0
    def test_check_modload(self):
        '''
        :avocado: tags=test_check_modload,fast_check
        polarion_id:
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        utils_lib.run_cmd(self, 'lsmod', expect_ret=0)

        aws.run_cmd(self, 'uname -r', msg='Get instance kernel version')
    def test_ethtool_C_coalesce(self):
        '''
        :avocado: tags=test_ethtool_C_coalesce,fast_check
        polarion_id:
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        cmd = ' sudo  ethtool -c eth0'
        aws.run_cmd(self, cmd, msg='Show current settings.')
        cmd = "ethtool -C eth0  rx-usecs 3"
        output = aws.run_cmd(self, cmd)
        if "Operation not supported" in output:
            self.cancel("Operation not supported!")
        if "Operation not permitted" in output:
            self.cancel("Operation not permitted")
        self.log.info("Trying to change coalesce")
        coalesce_list = [
            'stats-block-usecs', 'sample-interval', 'pkt-rate-low',
            'pkt-rate-high', 'rx-usecs', 'rx-frames', 'rx-usecs-irq',
            'rx-frames-irq', 'tx-usecs', 'tx-frames', 'tx-usecs-irq',
            'tx-frames-irq', 'rx-usecs-low', 'rx-frame-low', 'tx-usecs-low',
            'tx-frame-low', 'rx-usecs-high', 'rx-frame-high', 'tx-usecs-high',
            'tx-frame-high'
        ]

        for coalesce in coalesce_list:
            cmd = 'sudo ethtool -C eth0 %s 2' % coalesce
            aws.run_cmd(self, cmd, expect_ret=0)
            cmd = 'sudo  ethtool -c eth0'
            aws.run_cmd(self, cmd, expect_kw="%s: 2" % coalesce)
        cmd = 'dmesg|tail -20'
        aws.run_cmd(self, cmd)
Esempio n. 6
0
 def test_check_microcode_load(self):
     '''
     :avocado: tags=test_check_microcode_load,fast_check
     For bug 1607899, RHEL should not update microcode inside VMs.
     This case checks it from dmesg output.
     polarion_id: N/A
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     lscpu_output = aws.run_cmd(self, 'lscpu', expect_ret=0)
     aws.run_cmd(self,
                 'lscpu',
                 expect_ret=0,
                 cancel_not_kw="aarch64,AMD",
                 msg="Only run in intel platform")
     cmd = 'rpm -qa|grep microcode'
     aws.run_cmd(self, cmd)
     cmd = 'dmesg|grep microcode|grep -v "no microcode"'
     if 'Xen' in lscpu_output or 'KVM' in lscpu_output:
         aws.run_cmd(self,
                     cmd,
                     expect_not_ret=0,
                     msg='microcode should not load in VMs')
     else:
         aws.run_cmd(self,
                     cmd,
                     expect_ret=0,
                     msg='microcode should load in bare metals')
Esempio n. 7
0
    def test_check_memleaks(self):
        '''
        :avocado: tags=test_check_memleaks
        polarion_id: RHEL-117648
        '''
        self.log.info("Check memory leaks")
        self.session.connect(timeout=self.ssh_wait_timeout)
        aws.run_cmd(self,
                    'uname -a',
                    expect_ret=0,
                    cancel_kw="debug",
                    msg="Only run in debug kernel")
        aws.run_cmd(self,
                    'cat /proc/cmdline',
                    expect_ret=0,
                    cancel_kw="kmemleak=on",
                    msg="Only run with kmemleak=on")

        aws.run_cmd(self, 'sudo su', expect_ret=0)
        cmd = 'echo scan > /sys/kernel/debug/kmemleak'
        aws.run_cmd(self, cmd, expect_ret=0, timeout=1800)

        cmd = 'cat /sys/kernel/debug/kmemleak'
        output = aws.run_cmd(self, cmd, expect_ret=0)
        if len(output) > 0:
            self.fail('Memory leak found!')
Esempio n. 8
0
 def test_check_dmesg_unknownsymbol(self):
     '''
     :avocado: tags=test_check_dmesg_unknownsymbol,fast_check,kernel_tier1
     polarion_id:
     bz#: 1649215
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     aws.run_cmd(self,
                 'dmesg',
                 expect_ret=0,
                 expect_not_kw='Unknown symbol',
                 msg='Check there is no Unknown symbol')
    def test_create_vm(self):
        '''
        :avocado: tags=test_create_vm
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        aws.check_session(self)

        aws.run_cmd(self,
                    'whoami',
                    expect_ret=0,
                    expect_output=self.vm.vm_username,
                    msg="New VM is created: %s" % self.vm.instance_id)
        aws.run_cmd(self, 'uname -r', msg='Get instance kernel version')
Esempio n. 10
0
 def test_check_journalctl_traceback(self):
     '''
     :avocado: tags=test_check_journalctl_traceback,fast_check
     polarion_id:
     bz#: 1801999, 1736818
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     # redirect journalctl output to a file as it is not get return
     # normally in RHEL7
     cmd = 'journalctl > /tmp/journalctl.log'
     aws.run_cmd(self, cmd, expect_ret=0)
     cmd = 'cat /tmp/journalctl.log'
     aws.run_cmd(self, cmd, expect_ret=0, expect_not_kw='Traceback,Backtrace')
Esempio n. 11
0
 def test_check_journalctl_dumpedcore(self):
     '''
     :avocado: tags=test_check_journalctl_dumpedcore,fast_check
     polarion_id:
     bz#: 1797973
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     # redirect journalctl output to a file as it is not get return
     # normally in RHEL7
     cmd = 'journalctl > /tmp/journalctl.log'
     aws.run_cmd(self, cmd, expect_ret=0)
     cmd = 'cat /tmp/journalctl.log'
     aws.run_cmd(self, cmd, expect_ret=0, expect_not_kw='dumped core')
    def tearDown(self):

        self.session = self.session1
        if self.session.session.is_responsive(
        ) is not None and self.vm1.is_started():
            if self.name.name.endswith("test_pci_reset"):
                cmd = 'sudo dmesg --clear'
                aws.run_cmd(self, cmd, msg='Clear dmesg')
            aws.gcov_get(self)
            aws.get_memleaks(self)
            self.session.close()
        self.session1.close()
        if self.name.name.endswith("test_iperf_ipv4"):
            self.session2.close()
Esempio n. 13
0
 def test_ltp_ipsec_icmp(self):
     '''
     :avocado: tags=test_ltp_ipsec_icmp
     polarion_id: RHEL7-98754
     '''
     utils_lib.ltp_run(self,
                       case_name="icmp4-uni-vti11",
                       file_name='net_stress.ipsec_icmp')
     self.log.info("Try to remove ccm module after test.")
     try:
         aws.run_cmd(self, 'sudo modprobe -r ccm', expect_ret=0)
     except Exception as err:
         aws.handle_exception(self.vm, err)
         self.fail("Got exceptions during test!")
Esempio n. 14
0
    def test_collect_log(self):
        '''
        :avocado: tags=test_collect_log
        polarion_id: N/A
        '''
        self.log.info("This case is only saving log for future check purpose")
        self.session.connect(timeout=self.ssh_wait_timeout)
        aws.check_cmd(self, cmd='wget')
        aws.check_cmd(self, cmd='tar')
        aws.check_cmd(self, cmd='unzip')
        virt_utils_url = "https://github.com/SCHEN2015/virt-utils/archive/\
master.zip"

        self.log.info("Download %s" % virt_utils_url)
        cmd = "wget %s ; unzip master.zip" % virt_utils_url
        aws.run_cmd(self, cmd, expect_ret=0)
        cmd = "virt-utils-master/vm_check/vm_check.sh"
        aws.run_cmd(self, cmd, expect_ret=0)
        cmd = "tar zcf vmlog.tar.gz workspace/log"
        aws.run_cmd(self, cmd, expect_ret=0)
        remote_path = "vmlog.tar.gz"
        local_path = "%s/%s_vmlog.tar.gz" % (self.job.logdir,
                                             self.vm.instance_type)
        self.log.info("Copy %s from guest to %s, please wait" %
                      (remote_path, local_path))
        self.session.copy_files_from(remote_path, local_path, timeout=600)

        aws.run_cmd(self, 'uname -r', msg='Get instance kernel version')
Esempio n. 15
0
 def _get_blktest(self):
     '''
     Clone blktests from github
     '''
     test_disk = self._get_test_disk()
     cmd = 'sudo yum install -y blktrace fio nvme-cli git sysstat'
     aws.run_cmd(self, cmd)
     cmd = 'which git'
     aws.run_cmd(self, cmd, expect_ret=0)
     aws.run_cmd(self, 'sudo rm -rf blktests', expect_ret=0)
     cmd = 'git clone https://github.com/osandov/blktests.git'
     aws.run_cmd(self, cmd, expect_ret=0)
     cmd = "echo 'TEST_DEVS=(/dev/%s)' > blktests/config" % test_disk
     aws.run_cmd(self, cmd, expect_ret=0)
Esempio n. 16
0
 def test_disk_info(self):
     '''
     :avocado: tags=test_disk_info,acceptance,fast_check
     check disk information via fdisk and lsblk.
     For now, no exactly check result as output format may different
     on RHEL6/7/8.
     Only comparing disk count from fdisk and lsblk to vm assigned.
     polarion_id: RHEL7-103855
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session
     aws.check_session(self)
     fdisk_cmd = 'sudo fdisk -l'
     aws.run_cmd(self, fdisk_cmd, expect_ret=0)
Esempio n. 17
0
 def test_check_journalctl_invalid(self):
     '''
     :avocado: tags=test_check_journalctl_invalid,fast_check
     polarion_id:
     BZ#:1750417
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     # redirect journalctl output to a file as it is not get return
     # normally in RHEL7
     # skip sshd to filter out invalid user message
     cmd = 'journalctl|grep -v sshd|grep -v MTU > /tmp/journalctl.log'
     aws.run_cmd(self, cmd, expect_ret=0)
     cmd = 'cat /tmp/journalctl.log'
     aws.run_cmd(self, cmd, expect_ret=0, expect_not_kw='invalid,Invalid')
Esempio n. 18
0
 def test_fio_crctest(self):
     '''
     :avocado: tags=test_fio_crctest,acceptance,fast_check
     polarion_id:
     Test  the  speed  of  the built-in checksumming functions.
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session
     aws.check_session(self)
     cmd = 'sudo fio --crctest'
     aws.run_cmd(
         self,
         cmd,
         expect_ret=0,
         msg='Test  the  speed  of  the built-in checksumming functions.')
Esempio n. 19
0
 def test_fio_cpuclock(self):
     '''
     :avocado: tags=test_fio_cpuclock,acceptance,fast_check
     polarion_id:
     Perform test and validation of internal CPU clock.
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session
     aws.check_session(self)
     aws.run_cmd(self, 'sudo lscpu', cancel_not_kw="aarch64")
     cmd = 'sudo fio --cpuclock-test'
     aws.run_cmd(self,
                 cmd,
                 expect_ret=0,
                 expect_kw="Pass",
                 msg='Perform test and validation of internal CPU clock.')
Esempio n. 20
0
    def test_check_release_name(self):
        '''
        :avocado: tags=test_check_release_name,fast_check
        check /etc/redhat-release have the correct name
        polarion_id: RHEL7-103850
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        check_cmd = r"sudo cat /etc/redhat-release"
        self.log.info("Check release name cmd: %s" % check_cmd)
        status, output = self.session.cmd_status_output(check_cmd)
        kernel_ver = aws.run_cmd(self, 'uname -r')
        self.log.info("Guest kernel version: %s " % kernel_ver)
        if status == 0:
            if '2.6.32' in kernel_ver:
                self.assertIn(
                    'Red Hat Enterprise Linux Server release 6',
                    output,
                    msg="It should be like: Red Hat Enterprise Linux \
Server release 6.n\n but it is %s" % output)
            elif '3.10.0' in kernel_ver:
                self.assertIn(
                    'Red Hat Enterprise Linux Server release 7',
                    output,
                    msg="It should be like: Red Hat Enterprise Linux \
Server release 7.n\n but it is %s" % output)
            elif 'el8' in kernel_ver:
                self.assertIn(
                    'Red Hat Enterprise Linux release 8',
                    output,
                    msg="It should be like: Red Hat Enterprise Linux \
release 8.n\n but it is %s" % output)
            self.log.info("Check PASS: %s" % output)
        else:
            self.fail("Failed to get /etc/redhat-release information!")
    def test_start_vm(self):
        '''
        :avocado: tags=test_start_vm
        polarion_id: RHEL7-103633
        '''

        if not self.vm.is_stopped():
            self.vm.stop(loops=4)
        if not self.vm.is_stopped():
            self.fail("Instance is not in stopped state!")

        self.log.info("Start instance %s" % self.vm.instance_id)
        if self.vm.start(wait=True):
            self.log.info("Instance is started: %s" % self.vm.instance_id)
            self.session.connect(timeout=self.ssh_wait_timeout)
            aws.check_session(self)
            aws.run_cmd(self, 'uname -r', msg='Get instance kernel version')
        else:
            self.fail("Failed to start instance!")
Esempio n. 22
0
    def test_xenfs_mount(self):
        '''
        :avocado: tags=test_xenfs_mount,fast_check
        polarion_id:
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)

        aws.run_cmd(self,
                    'sudo lscpu',
                    expect_ret=0,
                    cancel_kw="Xen",
                    msg="Only run in xen instance")

        cmd = r'sudo mount -t xenfs xenfs /proc/xen/'
        aws.run_cmd(self, cmd, expect_ret=0)
        utils_lib.run_cmd(self,
                    'sudo ls /proc/xen',
                    expect_ret=0,
                    expect_kw='capabilities  privcmd  xenbus')
Esempio n. 23
0
    def test_kdump_no_specify_cpu(self):
        '''
        :avocado: tags=test_kdump_no_specify_cpu,acceptance,fast_check
        polarion_id: RHEL7-58669
        bz#: 1654962
        '''
        if not self.kdump_status:
            self.cancel("Cancle test as kdump not running!")
        self.session.connect(timeout=self.ssh_wait_timeout)
        time.sleep(10)
        output = utils_lib.run_cmd(self, 'lscpu', expect_ret=0)
        if 'aarch64' in output and 'metal' not in self.vm.instance_type:
            self.log.info("arm instance")
            self.cancel("Cancel it as bug 1654962 in arm instances which \
no plan to fix it in the near future!")

        aws.run_cmd(self,
                    r'sudo rm -rf /var/crash/*',
                    expect_ret=0,
                    msg='clean /var/crash firstly')
        aws.run_cmd(self, r'sudo sync', expect_ret=0)
        self.log.info("Before system crash %s" % self.vm.instance_id)
        aws.run_cmd(self,
                    r'find /var/crash',
                    expect_ret=0,
                    msg='list /var/crash')
        self.log.info("Crashing %s via ssh" % self.vm.instance_id)
        self._trigger_kdump_on_cpu()

        if 'metal' in self.vm.instance_type:
            self.log.info("Wait %s" % self.ssh_wait_timeout)
            time.sleep(self.ssh_wait_timeout)
        else:
            self.log.info("Wait 30s")
            time.sleep(30)
        self.session.connect(timeout=640)
        self.log.info("After system crash %s" % self.vm.instance_id)
        aws.run_cmd(self,
                    r'find /var/crash',
                    expect_ret=0,
                    msg='list /var/crash after crash')
        cmd = r'sudo cat /var/crash/1*/vmcore-dmesg.txt|tail -50'
        aws.run_cmd(self, cmd, expect_ret=0, expect_kw='write_sysrq_trigger')
    def test_sriov_ixbgevf(self):
        '''
        :avocado: tags=test_sriov_ixbgevf,fast_check
        polarion_id: RHEL7-87119
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        if not self.name.name.endswith("test_cleanup"):
            self.session = self.session1
            aws.check_session(self)
        eth_cmd = "ethtool -i eth0"
        if self.params.get('ixgbevf', '*/instance_types/*') > 0:
            self.log.info("Configure shows this instance supports ixgbevf")
        else:
            aws.run_cmd(self, eth_cmd, expect_ret=0, cancel_kw='ixgbevf')

        self.log.info("Trying to check sriov ixbgevf interface!")

        mod_cmd = "modinfo ixgbevf"

        self.log.info("Get eth0 module infomation: %s" % self.vm1.instance_id)
        status, output = self.session1.cmd_status_output(eth_cmd)
        if status > 0:
            self.fail("Failed to check eth0 status: cmd : %s output:%s" %
                      (eth_cmd, output))
        elif status == 0:
            if 'ixgbevf' in output:
                self.log.info("eth0 has ixgbevf loaded. cmd: %s result: %s" %
                              (eth_cmd, output))
            else:
                self.fail(
                    "eth0 does not have ixgbevf loaded. cmd : %s result:%s" %
                    (eth_cmd, output))
        self.log.info("Get ixgbevf module infomation: %s" %
                      self.vm1.instance_id)
        status, output = self.session1.cmd_status_output(mod_cmd)
        if status > 0:
            self.fail(
                "Failed to get ixgbevf module information: cmd : %s result:%s"
                % (eth_cmd, output))
        elif status == 0:
            self.log.info("Below is ixgbevf information. cmd: %s result: %s" %
                          (eth_cmd, output))
    def test_start_vm_iommu(self):
        '''
        :avocado: tags=test_start_vm_iommu
        polarion_id:
        This test bare metal instance can boot up with iommu
        '''
        if 'metal' in self.vm.instance_type:
            self.log.info("Instance is bare metal")
        else:
            self.log.info("Instance is xen/kvm")
            self.cancel("Only run in bare metal instances!")
        self.session.connect(timeout=self.ssh_wait_timeout)
        aws.check_session(self)
        aws.run_cmd(self, 'uname -r', msg='Get instance kernel version')

        aws.run_cmd(self,
                    'lscpu',
                    expect_ret=0,
                    cancel_not_kw="Xen,aarch64,AuthenticAMD")

        cmd = 'sudo grubby --update-kernel=ALL --args="intel_iommu=on"'
        aws.run_cmd(self, cmd, expect_ret=0)
        if not self.vm.is_stopped():
            self.vm.stop(loops=4)
        if not self.vm.is_stopped():
            self.fail("Instance is not in stopped state!")

        self.log.info("Start instance %s" % self.vm.instance_id)
        if self.vm.start(wait=True):
            self.session.connect(timeout=self.ssh_wait_timeout)
            aws.check_session(self)
            aws.run_cmd(self,
                        'cat /proc/cmdline',
                        msg='Get instance boot cmdline')
            cmd = 'sudo grubby --update-kernel=ALL \
--remove-args="intel_iommu=on"'

            aws.run_cmd(self, cmd, expect_ret=0)

        else:
            self.fail("Failed to start instance!")
Esempio n. 26
0
    def test_ltp_cpuhotplug(self):
        '''
        :avocado: tags=test_ltp_cpuhotplug
        polarion_id: RHEL7-98752
        BZ#: 1464095
        '''
        # ltp will considering fail if more than 1 cpus cannot be offline
        # in bare metal instance
        # but in large metal instances, it is expected. So do not do it in
        # bare metal instances
        aws.run_cmd(self,
                    'lscpu',
                    expect_ret=0,
                    cancel_not_kw="Xen",
                    msg="Not run in xen instance as bug \
            1641510 which is very low priority")
        if 'metal' in self.vm.instance_type:
            self.cancel('Cancel test as bare metal needs 1+ cpus working \
at least which ltp not handle')
        else:
            utils_lib.ltp_run(self, case_name="cpuhotplug")
Esempio n. 27
0
 def _get_test_disk(self):
     '''
     If there 2+ disks found inside system, return its name for block
     testing
     '''
     cmd = 'lsblk -l -o NAME -d|grep -v NAME'
     output = aws.run_cmd(self, cmd, expect_ret=0)
     disk_list = output.split('\n')
     if 'xvda' in disk_list:
         disk_list.remove('xvda')
     else:
         cmd = " sudo lsblk -o NAME,MOUNTPOINT|grep -w '/'"
         out = aws.run_cmd(self, cmd)
         bootdisk = re.findall('nvme[0-9]+', out)[0]
         self.log.info("Boot disk is %s" % bootdisk)
         disk_list.remove('%sn1' % bootdisk)
     if len(disk_list) > 0:
         self.log.info("%s selected for testing." % disk_list[0])
         return disk_list[0]
     else:
         self.cancel("No addtional disk for testing. Skip test")
 def test_network_hotplug(self):
     '''
     :avocado: tags=test_network_hotplug,fast_check
     polarion_id: RHEL7-103904
     '''
     self.network = NetworkInterface(self.params)
     self.assertTrue(self.network.create(),
                     msg='network interface create failed!')
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     netdev_index = 1
     self.network.attach_to_instance(self.vm1.instance_id, netdev_index)
     for i in range(1, 4):
         time.sleep(5)
         self.log.info('Check network in guest, loop%s' % i)
         cmd = "lspci"
         output1 = aws.run_cmd(self, cmd)
         cmd = "ifconfig"
         output1 = aws.run_cmd(self, cmd)
         if 'eth%s' % netdev_index not in output1:
             self.log.info("Added nic not found")
     self.network.detach_from_instance(self.vm1.instance_id)
     time.sleep(5)
     cmd = "ifconfig"
     aws.run_cmd(self, cmd)
     self.network.delete()
     self.assertIn('eth%d' % netdev_index,
                   output1,
                   msg='eth%d not found after attached nic' % netdev_index)
     cmd = 'dmesg'
     aws.run_cmd(self, cmd, expect_not_kw='Call Trace')
Esempio n. 29
0
    def test_blktests_block(self):
        '''
        :avocado: tags=test_blktests_block
        run blktests block test
        polarion_id: N/A
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session
        aws.check_session(self)
        if int(self.params.get('disks', '*/instance_types/*')) == 1:
            self.log.info("Only 1 disk available, attached more for blktest.")
            disk_dict = {
                self.disk1: 'sds',
                self.disk2: 'sdt',
                self.disk3: 'sdu',
                self.disk4: 'sdv'
            }
            self.session.connect(timeout=self.ssh_wait_timeout)
            self.session = self.session
            for i in disk_dict.keys():
                if i.is_attached():
                    i.detach_from_instance(force=True)
                self.log.info("Try to attach %s to %s" %
                              (i.res_id, self.vm.instance_id))
                if not i.attach_to_instance(self.vm.instance_id,
                                            disk_dict.get(i)):
                    self.fail("Attached failed!")

        self._get_blktest()
        cmd = ''
        cmd = 'cd blktests;sudo ./check block'
        # Not all cases are pass due to test tool issue
        output = aws.run_cmd(self, cmd, timeout=1200)
        if output is None:
            self.fail("Cannot get output!")
        if output.count('[failed]') > 1:
            self.fail("%s failed found" % output.count('[failed]'))

        cmd = 'dmesg'
        aws.run_cmd(self, cmd, msg="dmesg after test")
    def test_sriov_ena(self):
        '''
        :avocado: tags=test_sriov_ena,fast_check
        polarion_id: RHEL7-87117
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        eth_cmd = "ethtool -i eth0"
        if self.params.get('ena', '*/instance_types/*') > 0:
            self.log.info("Configure shows this instance supports ena")
        else:
            aws.run_cmd(self, eth_cmd, expect_ret=0, cancel_kw='ena')

        self.log.info("Trying to check sriov ena interface!")

        mod_cmd = "modinfo ena"

        self.log.info("Get eth0 module infomation: %s" % self.vm1.instance_id)
        status, output = self.session1.cmd_status_output(eth_cmd)
        if status > 0:
            self.fail("Failed to check eth0 status: cmd : %s output:%s" %
                      (eth_cmd, output))
        elif status == 0:
            if 'ena' in output:
                self.log.info("eth0 has ena loaded. cmd: %s result: %s" %
                              (eth_cmd, output))
            else:
                self.fail("eth0 does not have ena loaded. cmd : %s result:%s" %
                          (eth_cmd, output))
        self.log.info("Get ena module infomation: %s" % self.vm1.instance_id)
        status, output = self.session1.cmd_status_output(mod_cmd)
        if status > 0:
            self.fail(
                "Failed to get ena module information: cmd : %s result:%s" %
                (eth_cmd, output))
        elif status == 0:
            self.log.info("Below is ena information. cmd: %s result: %s" %
                          (eth_cmd, output))