Пример #1
0
    def test_sriov_ixbgevf(self):
        '''
        :avocado: tags=test_sriov_ixbgevf,fast_check
        polarion_id: RHEL7-87119
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        if not self.name.name.endswith("test_cleanup"):
            self.session = self.session1
            aws.check_session(self)
        eth_cmd = "ethtool -i eth0"
        if self.params.get('ixgbevf', '*/instance_types/*') > 0:
            self.log.info("Configure shows this instance supports ixgbevf")
        else:
            utils_lib.run_cmd(self, eth_cmd, expect_ret=0, cancel_kw='ixgbevf')

        self.log.info("Trying to check sriov ixbgevf interface!")

        mod_cmd = "modinfo ixgbevf"

        self.log.info("Get eth0 module infomation: %s" % self.vm1.instance_id)
        status, output = self.session1.cmd_status_output(eth_cmd)
        if status > 0:
            self.fail("Failed to check eth0 status: cmd : %s output:%s" %
                      (eth_cmd, output))
        elif status == 0:
            if 'ixgbevf' in output:
                self.log.info("eth0 has ixgbevf loaded. cmd: %s result: %s" %
                              (eth_cmd, output))
            else:
                self.fail(
                    "eth0 does not have ixgbevf loaded. cmd : %s result:%s" %
                    (eth_cmd, output))
        self.log.info("Get ixgbevf module infomation: %s" %
                      self.vm1.instance_id)
        status, output = self.session1.cmd_status_output(mod_cmd)
        if status > 0:
            self.fail(
                "Failed to get ixgbevf module information: cmd : %s result:%s"
                % (eth_cmd, output))
        elif status == 0:
            self.log.info("Below is ixgbevf information. cmd: %s result: %s" %
                          (eth_cmd, output))
Пример #2
0
    def test_xe_guest_utilities(self):
        '''
        :avocado: tags=test_xe_guest_utilities,fast_check
        polarion_id:
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)

        aws.run_cmd(self,
                    'lscpu',
                    expect_ret=0,
                    cancel_kw="Xen",
                    msg="Only run in xen instance")

        utils_lib.run_cmd(self, 'sudo su', expect_ret=0)
        aws.install_pkgs(self.session, 'wget')
        cmd = 'sudo wget https://kojipkgs.fedoraproject.org//packages/\
xe-guest-utilities/7.12.0/1.fc29/x86_64/xe-guest-utilities-7.12.0-1.fc29.\
x86_64.rpm'

        aws.run_cmd(self, cmd, expect_ret=0)
        cmd = 'yum localinstall -y xe-guest-utilities-7.12.0-1.fc29.x86_64.rpm'
        aws.run_cmd(self, cmd)
        cmd = 'rpm -ivh xe-guest-utilities-7.12.0-1.fc29.x86_64.rpm --force \
--nodeps'

        aws.run_cmd(self, cmd)
        xenstore_read = '/usr/libexec/xe-guest-utilities/xenstore-read'
        cmd = "%s domid" % xenstore_read
        aws.run_cmd(self, cmd, expect_ret=0)
        cmd = "%s name" % xenstore_read
        aws.run_cmd(self, cmd, expect_ret=0)
        cmd = "%s memory/target" % xenstore_read
        aws.run_cmd(self, cmd, expect_ret=0)
        if 'vif' in aws.run_cmd(self, 'ethtool -i eth0', expect_ret=0):
            cmd = "%s device/vif/0/mac" % xenstore_read
            aws.run_cmd(self, cmd, expect_ret=0)

        xenstore_list = '/usr/libexec/xe-guest-utilities/xenstore-list'
        cmd = "%s device" % xenstore_list
        aws.run_cmd(self, cmd, expect_ret=0)
        cmd = "%s control" % xenstore_list
        aws.run_cmd(self, cmd, expect_ret=0)
Пример #3
0
 def test_second_ip_hotplug(self):
     '''
     :avocado: tags=test_second_ip_hotplug,fast_check
     polarion_id:
     BZ: 1623084, 1642461
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     cmd = 'rpm -q NetworkManager-cloud-setup'
     utils_lib.run_cmd(self,
                       cmd,
                       cancel_not_kw='could not be found,not installed')
     cmd = 'sudo systemctl status nm-cloud-setup.timer'
     utils_lib.run_cmd(self, cmd)
     self.vm1.assign_new_ip()
     cmd = 'sudo ip addr show eth0'
     start_time = time.time()
     while True:
         out = utils_lib.run_cmd(self, cmd)
         if self.vm.another_ip in out:
             break
         end_time = time.time()
         if end_time - start_time > 330:
             cmd = 'sudo systemctl status nm-cloud-setup.timer'
             utils_lib.run_cmd(self, cmd)
             self.fail("expected 2nd ip {} not found in guest".format(
                 self.vm.another_ip))
         time.sleep(25)
     cmd = 'sudo ip addr show eth0'
     start_time = time.time()
     self.vm1.remove_added_ip()
     while True:
         out = utils_lib.run_cmd(self, cmd)
         if self.vm.another_ip not in out:
             break
         end_time = time.time()
         if end_time - start_time > 330:
             cmd = 'sudo systemctl status nm-cloud-setup.timer'
             utils_lib.run_cmd(self, cmd)
             self.fail("expected 2nd ip {} not removed from guest".format(
                 self.vm.another_ip))
         time.sleep(25)
    def test_sriov_ena_unload_load(self):
        '''
        :avocado: tags=test_sriov_ena_unload_load,fast_check,kernel
        description:
            Test unload and reload ENA (Elastic Network Adapter) module in RHEL on AWS.
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]NetworkTest.test_sriov_ena_unload_load"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority:
            0
        case_component: 
            network
        key_steps:
            1. Launch an instance with ENA drivers (instance list refer to description in case test_sriov_ena) on AWS.
            2. Connect the instance via ssh, and verify the ena module is being used on a particular interface via command "$ sudo ethtool -i eth0".
            3. Unload and load ena moudle via command "$ sudo modprobe -r ena; sudo modprobe ena".
            4. Check the dmesg info related to ena "$ sudo dmesg|grep -w ena".
        pass_criteria: 
            The ena can be unloaded and loaded without error.
            There are no error/warning/failure/unsupported feature messages about ena.
        '''
        self.log.info("Test unload and load ena module")
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        utils_lib.run_cmd(self, 'ethtool -i eth0', cancel_kw='ena')
        cmd_string = 'modprobe -r ena;modprobe ena'
        cmd = 'sudo echo "%s" >/tmp/mod.sh' % cmd_string
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = 'sudo chmod 755 /tmp/mod.sh'
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = 'sudo su'
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = '/tmp/mod.sh'
        utils_lib.run_cmd(self, cmd, expect_ret=0)

        aws.check_dmesg(self, 'ena', match_word_exact=True)
Пример #5
0
 def test_check_cloudinit_log_error(self):
     '''
     :avocado: tags=test_check_cloudinit_log_error,fast_check
     polarion_id:
     bz#: 1821999
     check no error log in cloudinit logs
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     utils_lib.run_cmd(self,
                 'sudo cat /var/log/cloud-init.log',
                 expect_ret=0,
                 expect_not_kw='ERROR',
                 msg='check /var/log/cloud-init.log')
     if 'release 7' not in utils_lib.run_cmd(self,
                                       'sudo cat /etc/redhat-release'):
         utils_lib.run_cmd(self,
                     'sudo cat /var/log/cloud-init-output.log',
                     expect_ret=0,
                     expect_not_kw='ERROR',
                     msg='check /var/log/cloud-init-output.log')
Пример #6
0
    def test_start_vm(self):
        '''
        :avocado: tags=test_start_vm
        polarion_id: RHEL7-103633
        '''

        if not self.vm.is_stopped():
            self.vm.stop(loops=4)
        if not self.vm.is_stopped():
            self.fail("Instance is not in stopped state!")

        self.log.info("Start instance %s" % self.vm.instance_id)
        if self.vm.start(wait=True):
            self.log.info("Instance is started: %s" % self.vm.instance_id)
            self.session.connect(timeout=self.ssh_wait_timeout)
            utils_lib.run_cmd(self,
                              'uname -r',
                              msg='Get instance kernel version')
        else:
            self.fail("Failed to start instance!")
 def test_blktests_nvme(self):
     '''
     :avocado: tags=test_blktests_nvme
     run blktests block test
     polarion_id: N/A
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session
     aws.check_session(self)
     if int(self.params.get('disks', '*/instance_types/*')) == 1:
         self.log.info("Only 1 disk available, attached more for blktest.")
         if self.params.get('outpostarn') is not None:
             disk_dict = {
                 self.disk1: 'sds',
             }
         else:
             disk_dict = {
                 self.disk1: 'sds',
                 self.disk2: 'sdt',
                 self.disk3: 'sdu',
                 self.disk4: 'sdv'
             }
         self.session.connect(timeout=self.ssh_wait_timeout)
         self.session = self.session
         for i in disk_dict.keys():
             if i.is_attached():
                 i.detach_from_instance(force=True)
             self.log.info("Try to attach %s to %s" %
                           (i.res_id, self.vm.instance_id))
             if not i.attach_to_instance(self.vm.instance_id,
                                         disk_dict.get(i)):
                 self.fail("Attached failed!")
     self._get_blktest()
     cmd = 'cd blktests;sudo ./check nvme'
     output = utils_lib.run_cmd(self, cmd, timeout=1200)
     # Not all cases are pass due to test tool issue
     output = utils_lib.run_cmd(self, cmd, timeout=1200)
     if output.count('[failed]') > 1:
         self.fail("%s failed found" % output.count('[failed]'))
     cmd = 'dmesg'
     utils_lib.run_cmd(self, cmd, msg="dmesg after test")
Пример #8
0
    def test_start_vm_iommu(self):
        '''
        :avocado: tags=test_start_vm_iommu
        polarion_id:
        This test bare metal instance can boot up with iommu
        '''
        if 'metal' in self.vm.instance_type:
            self.log.info("Instance is bare metal")
        else:
            self.log.info("Instance is xen/kvm")
            self.cancel("Only run in bare metal instances!")
        self.session.connect(timeout=self.ssh_wait_timeout)
        aws.check_session(self)
        utils_lib.run_cmd(self, 'uname -r', msg='Get instance kernel version')

        utils_lib.run_cmd(self,
                          'lscpu',
                          expect_ret=0,
                          cancel_not_kw="Xen,aarch64,AuthenticAMD")

        cmd = 'sudo grubby --update-kernel=ALL --args="intel_iommu=on"'
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        if not self.vm.is_stopped():
            self.vm.stop(loops=4)
        if not self.vm.is_stopped():
            self.fail("Instance is not in stopped state!")

        self.log.info("Start instance %s" % self.vm.instance_id)
        if self.vm.start(wait=True):
            self.session.connect(timeout=self.ssh_wait_timeout)
            aws.check_session(self)
            utils_lib.run_cmd(self,
                              'cat /proc/cmdline',
                              msg='Get instance boot cmdline')
            cmd = 'sudo grubby --update-kernel=ALL \
--remove-args="intel_iommu=on"'

            utils_lib.run_cmd(self, cmd, expect_ret=0)

        else:
            self.fail("Failed to start instance!")
 def test_pci_reset(self):
     '''
     :avocado: tags=test_pci_reset
     description:
         [Skip] Test unload and reload xen_netfront module in RHEL on AWS.
         Skip this case since there is a CANTFIX bug 1687330 with this case.
     polarion_id:
         https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]NetworkTest.test_pci_reset"
     bugzilla_id: 
         n/a
     customer_case_id: 
         n/a
     maintainer: 
         xiliang
     case_priority:
         0
     case_component: 
         network
     key_steps:
         1. Launch an instance with ENA drivers (instance list refer to description in case test_sriov_ena) on AWS.
         2. Connect the instance via ssh, check the ena driver is used by NIC via command "$ sudo ethtool -i eth0".
         3. List PCI devices information via command "$ sudo lspci".
         4. Reset ena device via command "$ sudo echo 1 > /sys/devices/pci0000:00/0000:00:05.0/reset".
     pass_criteria: 
         PCI reset successfully.
         There are no error/warning/failure/unsupported feature messages about ena.
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     cmd = 'sudo lspci'
     utils_lib.run_cmd(self, cmd)
     self.cancel('Cancel this case as bug 1687330 which is TESTONLY!')
     cmd = 'sudo find /sys -name reset* -type f|grep pci'
     output = utils_lib.run_cmd(self, cmd)
     if 'reset' not in output:
         self.cancel("No pci support reset!")
     for pci_reset in output.split('\n'):
         cmd = 'sudo su'
         utils_lib.run_cmd(self, cmd)
         cmd = 'echo 1 > %s' % pci_reset
         utils_lib.run_cmd(self, cmd, expect_ret=0, timeout=120)
     aws.check_dmesg(self, 'fail')
     aws.check_dmesg(self, 'error')
     aws.check_dmesg(self, 'warn')
     cmd = 'dmesg'
     utils_lib.run_cmd(self, cmd, expect_ret=0, expect_not_kw='Call Trace')
    def test_xen_netfront_unload_load(self):
        '''
        :avocado: tags=test_xen_netfront_unload_load,fast_check
        description:
            Test unload and reload xen_netfront module in RHEL on AWS.
            Instances with xen_netfront dirver include T2 and G2.
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]NetworkTest.test_xen_netfront_unload_load"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority:
            0
        case_component: 
            network
        key_steps:
            1. Launch an T2 or G2 instance on AWS.
            2. Connect the instance via ssh, and verify the xen_netfront module is being used on a particular interface via command "$ sudo ethtool -i eth0".
            3. Unload and load xen_netfront moudle via command "$ sudo modprobe -r xen_netfront; sudo modprobe xen_netfront".
            4. Check the dmesg info related to xen_netfront "$ sudo dmesg|grep -w ena".
        pass_criteria: 
            The xen_netfront can be unloaded and loaded without error.
            There are no error/warning/failure/unsupported feature messages about xen_netfront.
        '''
        self.log.info("Test unload and load xen_netfront module")
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        cmd = 'sudo ethtool -i eth0'
        output = utils_lib.run_cmd(self, cmd, msg='Check network driver!')
        if 'driver: vif' not in output:
            self.cancel('No xen_netfront used!')
        aws.check_session(self)
        cmd_string = 'modprobe -r xen_netfront;modprobe xen_netfront'
        cmd = 'sudo echo "%s" >/tmp/mod.sh' % cmd_string
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = 'sudo chmod 755 /tmp/mod.sh'
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = 'sudo su'
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = '/tmp/mod.sh'
        utils_lib.run_cmd(self, cmd, expect_ret=0)

        aws.check_dmesg(self, 'xen_netfront', match_word_exact=True)
Пример #11
0
    def test_ltp_cpuhotplug(self):
        '''
        :avocado: tags=test_ltp_cpuhotplug
        polarion_id: RHEL7-98752
        BZ#: 1464095
        '''
        # ltp will considering fail if more than 1 cpus cannot be offline
        # in bare metal instance
        # but in large metal instances, it is expected. So do not do it in
        # bare metal instances
        utils_lib.run_cmd(self,
                    'lscpu',
                    expect_ret=0,
                    cancel_not_kw="Xen",
                    msg="Not run in xen instance as bug \
            1641510 which is very low priority")
        if 'metal' in self.vm.instance_type:
            self.cancel('Cancel test as bare metal needs 1+ cpus working \
at least which ltp not handle')
        else:
            utils_lib.ltp_run(self, case_name="cpuhotplug")
Пример #12
0
    def test_sriov_ena_unload_load(self):
        '''
        :avocado: tags=test_sriov_ena_unload_load,fast_check
        polarion_id:
        '''
        self.log.info("Test unload and load ena module")
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        utils_lib.run_cmd(self, 'ethtool -i eth0', cancel_kw='ena')
        cmd_string = 'modprobe -r ena;modprobe ena'
        cmd = 'sudo echo "%s" >/tmp/mod.sh' % cmd_string
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = 'sudo chmod 755 /tmp/mod.sh'
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = 'sudo su'
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = '/tmp/mod.sh'
        utils_lib.run_cmd(self, cmd, expect_ret=0)

        aws.check_dmesg(self, 'ena', match_word_exact=True)
 def test_check_virtwhat(self):
     '''
     :avocado: tags=test_check_virtwhat,fast_check,kernel_tier1
     polarion_id: RHEL7-103857
     '''
     utils_lib.run_cmd(self, r'sudo yum install -y virt-what')
     virt_what_output = utils_lib.run_cmd(self,
                                          r"sudo virt-what",
                                          expect_ret=0)
     lscpu_output = utils_lib.run_cmd(self, 'lscpu', expect_ret=0)
     if 'Xen' in lscpu_output:
         self.log.info("Found it is a xen system!")
         if 'full' in lscpu_output:
             self.assertIn('xen-hvm', virt_what_output)
         else:
             self.assertIn('xen-domU', virt_what_output)
     elif 'KVM' in lscpu_output:
         self.log.info("Found it is a kvm system!")
         self.assertIn('kvm', virt_what_output)
     else:
         self.log.info("Found it is a bare metal system!")
 def _get_test_disk(self):
     '''
     If there 2+ disks found inside system, return its name for block
     testing
     '''
     cmd = 'lsblk -l -o NAME -d|grep -v NAME'
     output = utils_lib.run_cmd(self, cmd, expect_ret=0)
     disk_list = output.split('\n')
     if 'xvda' in disk_list:
         disk_list.remove('xvda')
     else:
         cmd = " sudo lsblk -o NAME,MOUNTPOINT|grep -w '/'"
         out = utils_lib.run_cmd(self, cmd)
         bootdisk = re.findall('nvme[0-9]+', out)[0]
         self.log.info("Boot disk is %s" % bootdisk)
         disk_list.remove('%sn1' % bootdisk)
     if len(disk_list) > 0:
         self.log.info("%s selected for testing." % disk_list[0])
         return disk_list[0]
     else:
         self.cancel("No addtional disk for testing. Skip test")
    def test_collect_log(self):
        '''
        :avocado: tags=test_collect_log
        polarion_id: N/A
        '''
        self.log.info("This case is only saving log for future check purpose")
        aws.check_cmd(self, cmd='wget')
        aws.check_cmd(self, cmd='tar')
        aws.check_cmd(self, cmd='unzip')
        virt_utils_url = "https://github.com/SCHEN2015/virt-utils/archive/\
master.zip"

        self.log.info("Download %s" % virt_utils_url)
        cmd = "wget %s ; unzip master.zip" % virt_utils_url
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = "virt-utils-master/vm_check/vm_check.sh"
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = "tar zcf vmlog.tar.gz workspace/log"
        utils_lib.run_cmd(self, cmd, expect_ret=0)
        remote_path = "vmlog.tar.gz"
        local_path = "%s/%s_vmlog.tar.gz" % (self.job.logdir,
                                             self.vm.instance_type)
        self.log.info("Copy %s from guest to %s, please wait" %
                      (remote_path, local_path))
        self.session.copy_files_from(remote_path, local_path, timeout=600)
Пример #16
0
 def test_check_config_ipv6(self):
     '''
     :avocado: tags=test_check_config_ipv6,fast_check
     description:
         Check the IPv6 is configured by default for guests on AWS. Linked case RHEL-131239
     polarion_id:
         https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]CloudInit.test_check_config_ipv6"
     bugzilla_id: 
         n/a
     customer_case_id: 
         n/a
     maintainer: 
         xiliang
     case_priority: 
         0
     case_component: 
         cloud-init
     key_steps:
         1. Launch an instance which support IPv6 on AWS EC2 in subnet with IPv6 auto assigned.
         2. Check the IPv6 is configured and auto assigned for NIC and can be connected via IPv6 address after system boot up.
     pass_criteria: 
         The IPv6 address shows in NIC and can be connected.
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     if not self.params.get('ipv6'):
         self.cancel("Instance not support ipv6, skip check")
     cmd = 'ip addr show eth0'
     utils_lib.run_cmd(self, cmd, expect_kw='inet6 2600')
     cmd = 'cat /etc/sysconfig/network-scripts/ifcfg-eth0'
     utils_lib.run_cmd(self, cmd, expect_kw='IPV6INIT=yes')
     utils_lib.run_cmd(self, 'uname -r', msg='Get instance kernel version')
 def test_network_hotplug(self):
     '''
     :avocado: tags=test_network_hotplug,fast_check
     polarion_id: RHEL7-103904
     '''
     self.network = NetworkInterface(self.params)
     self.assertTrue(self.network.create(),
                     msg='network interface create failed!')
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     netdev_index = 1
     self.network.attach_to_instance(self.vm1.instance_id, netdev_index)
     for i in range(1, 4):
         time.sleep(5)
         self.log.info('Check network in guest, loop%s' % i)
         cmd = "lspci"
         output1 = utils_lib.run_cmd(self, cmd)
         cmd = "ifconfig"
         output1 = utils_lib.run_cmd(self, cmd)
         if 'eth%s' % netdev_index not in output1:
             self.log.info("Added nic not found")
     self.network.detach_from_instance(self.vm1.instance_id)
     time.sleep(5)
     cmd = "ifconfig"
     utils_lib.run_cmd(self, cmd)
     self.network.delete()
     self.assertIn('eth%d' % netdev_index,
                   output1,
                   msg='eth%d not found after attached nic' % netdev_index)
     cmd = 'dmesg'
     utils_lib.run_cmd(self, cmd, expect_not_kw='Call Trace')
Пример #18
0
def gcov_get(self):
    '''
    get lcov log from guest
    '''
    if not self.params.get('code_cover'):
        return True

    self.log.info('Collect code coverage report!')
    self.session.connect(timeout=self.ssh_wait_timeout)
    cmd = 'sudo rm -rf ec2_cov.info'
    utils_lib.run_cmd(self, cmd)
    utils_lib.run_cmd(self, 'sudo su')
    cmd = 'sudo lcov  -c -b /root/rpmbuild/BUILD/kernel*/linux-*/ -o \
ec2_cov.info'

    utils_lib.run_cmd(self, cmd, expect_ret=0)

    remote_path = "ec2_cov.info"
    local_path = "%s/lcov/%s_%s_ec2_cov.info" % (self.job.logdir,
                                                 self.name.uid, time.time())
    if not os.path.exists("%s/lcov" % self.job.logdir):
        os.mkdir("%s/lcov" % self.job.logdir)
    self.log.info("Copy %s from guest to %s, please wait" %
                  (remote_path, local_path))
    try:
        self.session.copy_files_from(remote_path, local_path, timeout=600)
    except Exception as err:
        self.log.info("Copy gcov log failed,but not fail case!%s" % err)
Пример #19
0
    def test_check_userdata(self):
        '''
        :avocado: tags=test_check_userdata,fast_check,kernel_tier1
        description:
            Check the userdata can be passed when creating instance. Linked case RHEL7-87120
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]CloudInit.test_check_userdata"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority: 
            0
        case_component: 
            cloud-init
        key_steps:
            1. Launch an instance on AWS EC2 with passing userdata, e.g., passing an script like this:
                #!/bin/bash
                 date > /home/ec2-user/time.log
            2. Connect the instance and check time.log appears after system boot up.
        pass_criteria: 
            The passed userdata (time.log) should exist and can be edit and remove.
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        user_name = self.params.get('ssh_user')
        user_dir = "/home/%s/instance_create_%s" % (user_name,
                                                    self.vm.instance_type)
        check_cmd = "ls -l %s" % user_dir
        utils_lib.run_cmd(self, check_cmd, expect_ret=0)
        check_cmd = "rm -rf %s" % user_dir
        utils_lib.run_cmd(self, check_cmd, expect_ret=0)

        utils_lib.run_cmd(self, 'uname -r', msg='Get instance kernel version')
    def test_sriov_ena(self):
        '''
        :avocado: tags=test_sriov_ena,fast_check
        polarion_id: RHEL7-87117
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        eth_cmd = "ethtool -i eth0"
        if self.params.get('ena', '*/instance_types/*') > 0:
            self.log.info("Configure shows this instance supports ena")
        else:
            utils_lib.run_cmd(self, eth_cmd, expect_ret=0, cancel_kw='ena')

        self.log.info("Trying to check sriov ena interface!")

        mod_cmd = "modinfo ena"

        self.log.info("Get eth0 module infomation: %s" % self.vm1.instance_id)
        status, output = self.session1.cmd_status_output(eth_cmd)
        if status > 0:
            self.fail("Failed to check eth0 status: cmd : %s output:%s" %
                      (eth_cmd, output))
        elif status == 0:
            if 'ena' in output:
                self.log.info("eth0 has ena loaded. cmd: %s result: %s" %
                              (eth_cmd, output))
            else:
                self.fail("eth0 does not have ena loaded. cmd : %s result:%s" %
                          (eth_cmd, output))
        self.log.info("Get ena module infomation: %s" % self.vm1.instance_id)
        status, output = self.session1.cmd_status_output(mod_cmd)
        if status > 0:
            self.fail(
                "Failed to get ena module information: cmd : %s result:%s" %
                (eth_cmd, output))
        elif status == 0:
            self.log.info("Below is ena information. cmd: %s result: %s" %
                          (eth_cmd, output))
 def _get_disk_online(self):
     '''
     Get online disks in system.
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     cmd = 'sudo lsblk -d'
     output = utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          msg='Get online disk count.')
     count = output.count('disk') - output.count('SWAP')
     self.log.info('Online disks: %s' % count)
     return count
Пример #22
0
    def test_start_vm(self):
        '''
        :avocado: tags=test_start_vm
        description:
            Test start an RHEL instance on AWS. Linked case RHEL7-103633
        polarion_id:
            https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]LifeCycleTest.test_start_vm"
        bugzilla_id: 
            n/a
        customer_case_id: 
            n/a
        maintainer: 
            xiliang
        case_priority: 
            0
        case_component: 
            LifeCycle
        key_steps:
            1. Launch an instance on AWS EC2.
            2. Connect the instance via ssh with user:ec2-user.
        pass_criteria: 
            Instance is in running state without error, and can be connected via ssh.
        '''

        if not self.vm.is_stopped():
            self.vm.stop(loops=4)
        if not self.vm.is_stopped():
            self.fail("Instance is not in stopped state!")

        self.log.info("Start instance %s" % self.vm.instance_id)
        if self.vm.start(wait=True):
            self.log.info("Instance is started: %s" % self.vm.instance_id)
            self.session.connect(timeout=self.ssh_wait_timeout)
            utils_lib.run_cmd(self,
                              'uname -r',
                              msg='Get instance kernel version')
        else:
            self.fail("Failed to start instance!")
    def _trigger_kdump_on_cpu(self, cpu=None):
        utils_lib.run_cmd(self, 'sudo su', msg="Switch to root")
        cmd = 'echo 1 > /proc/sys/kernel/sysrq'
        utils_lib.run_cmd(self,
                          cmd,
                          msg="Make sure it allows trigger panic via sysrq")
        cpuN = cpu
        if cpuN is None:
            trigger_cmd = "bash -c 'echo c > /proc/sysrq-trigger'"
        else:
            trigger_cmd = "bash -c 'taskset -c " + \
                str(cpuN) + " echo c > /proc/sysrq-trigger'"

        self.log.debug("Send command '%s' " % trigger_cmd)
        # session.cmd_ouput failes to get ret status as reboot close connection
        # considering add a function to guest.py to handle this. For now, use
        # sendline directly.
        self.session.session.sendline("'%s'" % trigger_cmd)
        try:
            status, output = self.session.cmd_status_output(trigger_cmd)
            self.log.info("trigger ret: %s, output: %s" % (status, output))
        except Exception as err:
            self.log.info("Error to read output as expected! %s" % err)
 def test_sriov_ena_dmesg(self):
     '''
     :avocado: tags=test_sriov_ena_dmesg,fast_check
     polarion_id:
     '''
     self.session1.connect(timeout=self.ssh_wait_timeout)
     self.session = self.session1
     aws.check_session(self)
     cmd = "ethtool -i eth0"
     output = utils_lib.run_cmd(self, cmd, expect_ret=0)
     if "driver: ena" not in output:
         self.cancel("No ena driver found!")
     self.log.info("Trying to check sriov ena boot messages!")
     aws.check_dmesg(self, 'ena', match_word_exact=True)
    def test_mtu_min_set(self):
        '''
        :avocado: tags=test_mtu_min_set,fast_check
        polarion_id: RHEL-111097
        ena mtu range: 128~9216
        ixgbevf mtu range: 68~9710
        vif mtu range: 68~65535
        '''
        self.session1.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session1
        aws.check_session(self)
        cmd = "sudo ethtool -i eth0"
        output = utils_lib.run_cmd(self, cmd, expect_ret=0)
        if 'ena' in output:
            self.log.info('ena found!')
            mtu_range = [0, 127, 128, 4500, 9216, 9217]
            mtu_min = 128
            mtu_max = 9216
        elif 'ixgbe' in output:
            self.log.info('ixgbevf found!')
            mtu_range = [0, 67, 68, 4500, 9710, 9711]
            mtu_min = 68
            mtu_max = 9710
        elif 'vif' in output:
            self.log.info('vif found!')
            mtu_range = [0, 67, 68, 4500, 65535, 65536]
            mtu_min = 68
            mtu_max = 65535
        else:
            self.fail('Did not detect network type! %s' % output)

        self.log.info("Trying to change mtu to %s" % mtu_range)
        for mtu_size in mtu_range:
            mtu_cmd = "sudo ip link set dev eth0 mtu %s" % mtu_size
            mtu_check = "sudo ip link show dev eth0"
            self.log.info("CMD: %s" % mtu_cmd)
            status, output = self.session1.cmd_status_output(mtu_cmd)
            if mtu_size <= mtu_max and mtu_size >= mtu_min:
                self.assertEqual(status,
                                 0,
                                 msg='Change mtu size failed! %s' % output)
            elif mtu_size < mtu_min or mtu_size > mtu_max:
                self.assertGreater(
                    status,
                    0,
                    msg='Change mtu size successfully which should not! %s' %
                    output)

            status, output = self.session1.cmd_status_output(mtu_check)
            self.log.info("After set mtu size %s \n %s " % (mtu_size, output))
Пример #26
0
    def test_check_timedrift_reboot(self):
        '''
        :avocado: tags=test_check_timedrift_reboot
        After 3 times reboot, if the average drift time over 1 seconds,
        we are considering it as fail.
        polarion_id: RHEL7-110672
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        utils_lib.run_cmd(self,
                    "sudo cat /etc/redhat-release",
                    expect_ret=0,
                    cancel_kw="release 7,release 6",
                    msg="Only run in RHEL7 and RHEL6")
        aws.run_cmd(self, "sudo systemctl stop ntpd")
        aws.run_cmd(self, "sudo systemctl disable ntpd")
        aws.run_cmd(self, "sudo systemctl stop chronyd")
        aws.run_cmd(self, "sudo systemctl disable chronyd")
        aws.run_cmd(self, "sudo timedatectl set-ntp 0")

        offset1 = aws.get_drift(self)
        self.vm.reboot()
        self.session.connect(timeout=self.ssh_wait_timeout)
        offset2 = aws.get_drift(self)
        self.vm.reboot()
        self.session.connect(timeout=self.ssh_wait_timeout)
        offset3 = aws.get_drift(self)
        self.vm.reboot()
        self.session.connect(timeout=self.ssh_wait_timeout)
        offset4 = aws.get_drift(self)
        x = decimal.Decimal(offset2) - decimal.Decimal(offset1)
        y = decimal.Decimal(offset3) - decimal.Decimal(offset1)
        z = decimal.Decimal(offset4) - decimal.Decimal(offset1)
        drift = math.fabs(x) * 10 + math.fabs(y) * 10 + math.fabs(z) * 10
        self.assertLess(drift, 30, msg="Drift is over 1 seconds")
        self.log.info("Average drift is less than 1 seconds. %d/30" % drift)

        aws.run_cmd(self, 'uname -r', msg='Get instance kernel version')
Пример #27
0
 def test_cloudinit_check_log_no_traceback(self):
     '''
     :avocado: tags=tier2,cloudinit
     RHEL-188134 - CLOUDINIT-TC: Check no "Traceback" keyword in /var/log/cloud-init.log
     check no traceback log in cloudinit logs
     '''
     self.session.connect(timeout=self.ssh_wait_timeout)
     cmd = 'sudo cat /var/log/cloud-init.log'
     utils_lib.run_cmd(self,
                       cmd,
                       expect_ret=0,
                       expect_not_kw='Traceback',
                       msg='check /var/log/cloud-init.log',
                       is_get_console=False)
     if 'release 7' not in utils_lib.run_cmd(self,
                                             'sudo cat /etc/redhat-release',
                                             is_get_console=False):
         cmd = 'sudo cat /var/log/cloud-init-output.log'
         utils_lib.run_cmd(self,
                           cmd,
                           expect_ret=0,
                           expect_not_kw='Traceback',
                           msg='check /var/log/cloud-init-output.log',
                           is_get_console=False)
    def _check_disk_count(self):
        '''
        check disk count via lsblk.
        For now, no exactly check result as output format may different on
        RHEL6/7/8.
        Only comparing disk count from fdisk and lsblk to vm assigned.
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        lsblk_cmd = 'sudo lsblk -d'
        vm_volumes = self.vm.get_volumes_id()
        vm_local_disks = int(self.params.get('disks',
                                             '*/instance_types/*')) - 1
        start_time = time.time()
        while True:
            output = utils_lib.run_cmd(self,
                                 lsblk_cmd,
                                 expect_ret=0,
                                 msg='Get online disk count.')
            self.log.info("lsblk result: %s" % output)
            if output.count('disk') - output.count(
                    'SWAP') - vm_local_disks != len(vm_volumes):
                self.log.info("volume cound not match assinged, try again \
later! expected: %s lsblk: %s assigned: %s" %
                              (self.params.get('disks', '*/instance_types/*'),
                               output.count('disk'), vm_volumes))
            else:
                self.log.info(
                    "volumes matches assinged! lsblk: %s assigned: %s" %
                    (output.count('disk'), vm_volumes))
                break
            end_time = time.time()
            if int(end_time) - int(start_time) > 60:
                utils_lib.run_cmd(self, 'dmesg', expect_ret=0)
                self.fail(
                    "volume cound not match assinged after attached 60s!")
            time.sleep(5)
 def test_kdump_fastboot_kexec_e(self):
     '''
     :avocado: tags=test_kdump_fastboot_kexec_e,acceptance,outposts
     description:
         Test loading kernel via kexec with RHEL on AWS.
     polarion_id:
         https://polarion.engineering.redhat.com/polarion/#/project/RedHatEnterpriseLinux7/workitems?query=title:"[AWS]KdumpTest.test_kdump_fastboot_kexec_e"
     bugzilla_id: 
         1758323, 1841578
     customer_case_id: 
         BZ1758323, BZ1841578
     maintainer: 
         xiliang
     case_priority: 
         0
     case_component: 
         Kdump
     key_steps:
         1. Launch an instance on AWS EC2 with multi kernels installed.
         2. Load each kernel with command "sudo kexec -l /boot/vmlinuz-$version --initrd=/boot/initramfs-$version.img --reuse-cmdline"
         3. When the kernel is loaded, run command "sudo kexec -e".
     pass_criteria: 
         Kernel can be loaded via kexec, and system will reboot into the loaded kernel via kexec -e without calling shutdown(8).
     '''
     if not self.kdump_status:
         self.cancel("Cancle test as kdump not running!")
     utils_lib.run_cmd(self,
                       'uname -r',
                       cancel_not_kw='el7,el6',
                       msg='Not full support earlier than el8, skip!')
     self.session.connect(timeout=self.ssh_wait_timeout)
     cmd = 'sudo rpm -qa|grep -e "kernel-[0-9]"'
     output = utils_lib.run_cmd(self, cmd, msg='Get kernel version')
     kernels_list = output.split('\n')
     for kernel in kernels_list:
         kernel_vmlinuz = "/boot/" + kernel.replace('kernel', 'vmlinuz')
         kernel_initramfs = "/boot/" + kernel.replace(
             'kernel', 'initramfs') + ".img"
         cmd = "sudo kexec -l %s --initrd=%s --reuse-cmdline" % (
             kernel_vmlinuz, kernel_initramfs)
         utils_lib.run_cmd(self, cmd, msg='Switch kernel', expect_ret=0)
         cmd = "sudo kexec -e"
         self.log.info("CMD: %s", cmd)
         self.session.session.sendline("%s" % cmd)
         time.sleep(10)
         self.session.connect(timeout=self.ssh_wait_timeout)
         utils_lib.run_cmd(self,
                           'uname -r',
                           msg='check kernel',
                           expect_ret=0,
                           expect_kw=kernel[7:])
    def test_ssd_trim(self):
        '''
        :avocado: tags=test_ssd_trim,acceptance,fast_check,outposts
        polarion_id: RHEL7-87311
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        self.session = self.session
        aws.check_session(self)

        cmd = 'sudo lsblk -d -O -J'
        disk_discard = None
        try:
            output = utils_lib.run_cmd(self, cmd)
            disks_dict = json.loads(output)
            disk_discard = None
            for disk in disks_dict["blockdevices"]:
                if disk["disc-max"] is not None and '0B' not in disk[
                        "disc-max"]:
                    disk_discard = disk["name"]
                    self.log.info("%s supports discard %s" %
                                  (disk_discard, disk["disc-max"]))
        except ValueError as err:
            self.log.info("lsblk no json support")
            cmd = 'sudo lsblk -o NAME,DISC-MAX -d|grep -v NAME'
            output = utils_lib.run_cmd(self, cmd)
            for disk in output.split('\n'):
                if '0B' not in disk:
                    disk_discard = disk.split(' ')[0]
                    self.log.info("%s supports discard" % disk)

        if disk_discard is None:
            self.cancel("No disk supports discard found.")
        cmd = 'sudo lsblk |grep -i part'
        output = utils_lib.run_cmd(self, cmd)
        if disk_discard not in output:
            cmd = "sudo mkfs.xfs /dev/%s" % disk_discard
            utils_lib.run_cmd(self, cmd, expect_ret=0)
            cmd = "sudo mount /dev/%s /mnt" % disk_discard
            utils_lib.run_cmd(self, cmd, expect_ret=0)
        cmd = "sudo fstrim -v /mnt"
        utils_lib.run_cmd(self, cmd, expect_ret=0)