def test_check_boot_partition_guid(self): """ case_name: test_check_boot_partition_guid component: rhel-guest-image bugzilla_id: 2057231 is_customer_case: False maintainer: [email protected] description: check whether the GUID of boot partition is correct key_steps: 1. lsblk -n -o PARTTYPE,MOUNTPOINT | grep '/boot$' expect_result: GUID = bc13c2ff-59e6-4262-a352-b275fd6f7172 """ product_id = utils_lib.get_product_id(self) if float(product_id) >= 9.0 and ( utils_lib.is_arch(self, arch='aarch64') or utils_lib.is_arch(self, arch='x86_64')): cmd = "lsblk -n -o PARTTYPE,MOUNTPOINT | grep '/boot$'" output = utils_lib.run_cmd(self, cmd, expect_ret=0, msg="Get GUID of /boot") self.assertIn("bc13c2ff-59e6-4262-a352-b275fd6f7172", output, "Boot partition GUID incorrect: %s" % output) else: self.skipTest( "Only run on RHEL 9.x or later on x86_64 or aarch64.")
def test_z_check_subscription_manager_auto_function(self): ''' Verify auto_registration function works * Add "z" in the case name to make it run at last ''' product_id = utils_lib.get_product_id(self) # if float(product_id) < float('8.4'): # self.skipTest('skip in earlier than el8.4') cmd = "sudo subscription-manager config --rhsmcertd.auto_registration=1" utils_lib.run_cmd(self, cmd, expect_ret=0, msg="Enable auto_registration") cmd = "sudo subscription-manager config --rhsm.manage_repo=0" utils_lib.run_cmd(self, cmd, expect_ret=0, msg="Disable manage_repo") cmd = "sudo subscription-manager config --rhsmcertd.auto_registration_interval=1" utils_lib.run_cmd(self, cmd, expect_ret=0, msg="Change interval to 1 min") cmd = "sudo systemctl restart rhsmcertd.service" utils_lib.run_cmd(self, cmd, expect_ret=0, msg="Restart rhsmcertd service") time.sleep(60) for retry in range(1, 11): cmd = "sudo subscription-manager identity" output = utils_lib.run_cmd(self, cmd, msg="Checking register status...") if "system identity" in output: self.log.info("Auto register successfully!") break self.log.info("Not registered yet. Wait for 30s...{}/10".format(retry)) time.sleep(30) else: self.fail("Fail to auto register!")
def test_kdump_no_specify_cpu(self): ''' bz: 1654962 polarion_id: RHEL7-58669 ''' for cmd in ['sudo kdumpctl showmem','cat /proc/cmdline','systemctl is-active kdump']: utils_lib.run_cmd(self, cmd, expect_ret=0) output = utils_lib.run_cmd(self, 'lscpu', expect_ret=0) product_id = utils_lib.get_product_id(self) if utils_lib.is_arch(self, 'aarch64') and not utils_lib.is_metal(self) and float(product_id) < 8.6: self.skipTest("Cancel as bug 1654962 in arm guest earlier than 8.6 2082405" ) utils_lib.run_cmd(self, r'sudo rm -rf /var/crash/*', expect_ret=0, msg='clean /var/crash firstly') utils_lib.run_cmd(self, r'sudo sync', expect_ret=0) self.log.info("Before system crash") utils_lib.run_cmd(self, r'find /var/crash', expect_ret=0, msg='list /var/crash') utils_lib.run_cmd(self, "sudo bash -c \"echo c > /proc/sysrq-trigger\"", msg='trigger crash') utils_lib.init_connection(self, timeout=self.ssh_timeout) self.log.info("After system crash") utils_lib.run_cmd(self, r'find /var/crash', expect_ret=0, msg='list /var/crash after crash') cmd = r'sudo cat /var/crash/*/vmcore-dmesg.txt|tail -50' utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw='write_sysrq_trigger')
def test_check_rhel_version(self): ''' check if rhel provider matches /etc/redhat-release ''' release_file = 'redhat-release' product_id = utils_lib.get_product_id(self) cmd = "echo $(sudo rpm -q --qf '%{VERSION}' --whatprovides " + release_file + ')' utils_lib.run_cmd(self,cmd, expect_kw=product_id, msg='check redhat-release version match')
def test_check_services_status(self): """ case_name: test_check_services_status component: rhel-guest-image bugzilla_id: 974554 is_customer_case: False maintainer: [email protected] description: check service status key_steps: 1. systemctl expect_result: services is active and enabled """ enabled_services = ['tuned', 'rhsmcertd'] product_id = utils_lib.get_product_id(self) if float(product_id) <= 7.0: enabled_services = ['tuned'] services = enabled_services for service in services: cmd = "systemctl is-active %s" % service output = utils_lib.run_cmd(self, cmd, expect_ret=0, msg="systemctl is-active") cmd = "systemctl is-enabled %s" % service output = utils_lib.run_cmd(self, cmd, expect_ret=0, msg="systemctl is-enabled") output = "" product_id = utils_lib.get_product_id(self) if float(product_id) < 9.0: cmd = "cat /etc/tuned/active_profile" output = utils_lib.run_cmd(self, cmd, expect_ret=0, msg="check tuned active_profile") self.assertEqual("virtual-guest", output.rstrip('\n'), "Tuned service abnormal")
def test_check_rhui_pkg(self): """ 8.4 images should have EUS RHUI. Other versions should have non-EUS RHUI. """ self.log.info('RHEL image found') product_id = utils_lib.get_product_id(self) x_version = self.rhel_x_version cmd = 'sudo rpm -q google-rhui-client-rhel{}'.format(x_version) utils_lib.run_cmd(self, cmd, expect_ret=0, msg="Verify non-EUS RHUI is installed in RHEL-{}".format(product_id)) cmd = 'sudo rpm -q google-rhui-client-rhel{}-eus'.format(x_version) utils_lib.run_cmd(self, cmd, expect_ret=1, msg="Verify EUS RHUI is not installed in RHEL-{}".format(product_id))
def test_check_boot_cmdline_parameters(self): """ case_name: test_check_boot_cmdline_parameters component: rhel-guest-image bugzilla_id: 1144155 is_customer_case: False maintainer: [email protected] description: check cmdline parameters key_steps: 1. cat /proc/cmdline expect_result: no_timer_check console=tty0 console=ttyS0,115200n8 net.ifnames=0 crashkernel= """ for count in utils_lib.iterate_timeout( 120, "Timed out waiting for getting IP address."): cmd = 'sudo systemctl is-active kdump' ret = utils_lib.run_cmd(self, cmd, ret_status=True, msg='check kdump is active') if ret == 0: break src_dir = self.data_dir + "/guest-images/" data_file = "cmdline_params.lst" lines = filter(None, (line.rstrip() for line in open(os.path.join(src_dir, data_file)))) cmd = "cat /proc/cmdline" output = utils_lib.run_cmd(self, cmd, expect_ret=0, msg="cat /proc/cmdline") for line in lines: self.assertIn(line, output, "%s is not in boot parameters" % line) # crashkernel product_id = utils_lib.get_product_id(self) if float(product_id) >= 9.0: cmd = "sudo kdumpctl get-default-crashkernel" tmp_output = utils_lib.run_cmd( self, cmd, expect_ret=0, msg="kdumpctl get-default-crashkernel") line = "crashkernel=" + tmp_output.rstrip('.')[0] else: line = "crashkernel=auto" self.assertIn(line, output, "%s is not in boot parameters" % line)
def test_check_partitions(self): """ case_name: test_check_partitions component: rhel-guest-image bugzilla_id: 1673094 is_customer_case: False maintainer: [email protected] description: check whether root partition is identified by UUID in fstab key_steps: 1. lsblk expect_result: #TODO """ cmd = "df / | tail -n 1" output = utils_lib.run_cmd(self, cmd, expect_ret=0, msg="df output for rootfs") m = re.search('/dev/([a-z]+)[0-9].*', output) self.assertTrue(m, "root dev name not found: %s" % output) dev_name = m.group(1) cmd = "sudo lsblk" output = utils_lib.run_cmd(self, cmd, expect_ret=0, msg="lsblk") count = 0 for line in output.splitlines(): if re.search(dev_name, line): count = count + 1 product_id = utils_lib.get_product_id(self) if float(product_id) >= 9.0: expected_partitions = 5 if utils_lib.is_arch(self, arch='s390x'): expected_partitions = 3 elif utils_lib.is_arch(self, arch='aarch64') or utils_lib.is_arch( self, arch='ppc64le'): expected_partitions = 4 elif float(product_id) <= 7.0: expected_partitions = 2 else: expected_partitions = 4 if utils_lib.is_arch(self, arch='s390x'): expected_partitions = 2 elif utils_lib.is_arch(self, arch='aarch64') or utils_lib.is_arch( self, arch='ppc64le'): expected_partitions = 3 self.assertEqual(expected_partitions, count, "More than one partition exists:\n %s" % output)
def test_check_cmdline_crashkernel(self): ''' crashkernel should be enabled in image ''' product_id = utils_lib.get_product_id(self) if float(product_id) < float('9'): expect_kw = 'crashkernel=auto' else: # rhbz: 1942398 if utils_lib.is_arch(self, arch='x86_64'): expect_kw = 'crashkernel=1G-4G:192M,4G-64G:256M,64G-:512M' else: expect_kw = 'crashkernel=2G-:448M' utils_lib.run_cmd(self, "sudo cat /proc/cmdline", expect_ret=0, expect_kw=expect_kw, msg='check crashkernel is enabled')
def test_check_product_id(self): ''' bz: 1938930 issue: RHELPLAN-60817 check if product id matches /etc/redhat-release ''' product_id = utils_lib.get_product_id(self) if float(product_id) < float('8'): cmd = 'sudo rpm -q redhat-release-server' else: cmd = 'sudo rpm -q redhat-release' utils_lib.run_cmd(self,cmd, cancel_ret='0', msg='get redhat-release-server version') cmd = 'sudo rct cat-cert /etc/pki/product-default/*.pem' utils_lib.run_cmd(self,cmd, expect_ret=0, expect_kw="Version: {}".format(product_id), msg='check product certificate')
def test_check_selinux_contexts(self): """ case_name: test_check_selinux_contexts component: rhel-guest-image bugzilla_id: N/A is_customer_case: False maintainer: [email protected] description: check selinux contexts key_steps: 1. restorecon -R -v -n expect_result: all files labeled with existing selinux contexts """ selinux_now = "/tmp/" + "selinux.now" product_id = utils_lib.get_product_id(self) data_file = "selinux.el%s.lst" % product_id.split('.')[0] src_path = self.data_dir + '/guest-images/' + data_file dest_path = "/tmp/" + data_file self.log.info("Copy {} to remote".format(src_path)) self.SSH.put_file(local_file=src_path, rmt_file=dest_path) cmd = "sudo restorecon -R -v -n / -e /mnt -e /proc -e /sys \ -e /tmp -e /var/tmp -e /run >{0}".format(selinux_now) output = utils_lib.run_cmd( self, cmd, expect_ret=0, msg="check selinux label through restorecon") cmd = "grep -vxFf {0} {1} > /tmp/cmp".format(dest_path, selinux_now) output = utils_lib.run_cmd(self, cmd, msg="check differences through grep") cmd = "cat /tmp/cmp" output = utils_lib.run_cmd(self, cmd, expect_ret=0, msg="check diff content") self.assertEqual( "", output, "Found extra SELinux contexts have been modified:\n%s" % output)
def test_check_pkg_wanted(self): ''' Verify requied pkgs are installed. ''' pkgs_wanted = '''redhat-release-eula,\ google-cloud-sdk,google-compute-engine,\ gce-disk-expand,google-osconfig-agent,\ tar,NetworkManager,gdisk,acpid,dnf-automatic,\ grub2-tools,firewalld,chrony,net-tools,rng-tools''' product_id = utils_lib.get_product_id(self) if float(product_id) < float('8'): pkgs_wanted += ''',dhclient''' else: pkgs_wanted += ''',insights-client,dhcp-client''' pkgs_wanted_list = pkgs_wanted.split(',') for pkg in pkgs_wanted_list: cmd = 'rpm -q {}'.format(pkg) utils_lib.run_cmd(self, cmd, expect_ret=0, msg='check {} installed'.format(pkg))
def test_check_files_controlled_by_rpm(self): """ case_name: test_check_files_controlled_by_rpm component: rhel-guest-image bugzilla_id: N/A is_customer_case: False maintainer: [email protected] description: check files is controlled by rpm pkg key_steps: 1. rpm -ql expect_result: No unexpected orphan files """ product_id = utils_lib.get_product_id(self) data_file = "rogue.el%s.lst" % product_id.split('.')[0] utils_script = "rogue.sh" src_path = self.data_dir + '/guest-images/' + utils_script dest_path = '/tmp/' + utils_script self.SSH.put_file(local_file=src_path, rmt_file=dest_path) cmd = "sudo sh -c 'chmod 755 %s && %s'" % (dest_path, dest_path) output = utils_lib.run_cmd(self, cmd, expect_ret=0, timeout=300, msg="run rogue.sh") src_path = self.data_dir + '/guest-images/' + data_file dest_path = '/tmp/' + data_file self.SSH.put_file(local_file=src_path, rmt_file=dest_path) cmd = "grep -vxFf %s %s" % (dest_path, "/tmp/rogue") output = utils_lib.run_cmd(self, cmd, msg="check differences through grep") self.assertEqual( "", output.rstrip('\n'), "Found extra files not controlled by rpm:\n%s" % output)
def test_check_subscription_manager_auto_config(self): ''' bz: 1932802, 1905398 Verify auto_registration is enabled in the image ''' product_id = utils_lib.get_product_id(self) # if float(product_id) < float('8.4'): # self.skipTest('skip in earlier than el8.4') # cmd = "sudo rpm -qa|grep rhui" # ret = utils_lib.run_cmd(self, cmd, ret_status=True, msg='Check if it is a RHUI image') # if ret == 0: # # RHUI image # expect_kw="auto_registration = 0,manage_repos = 1" # else: # # SCA image # expect_kw="auto_registration = 1,manage_repos = 1" expect_kw="auto_registration = 1,manage_repos = 0" cmd = "sudo cat /etc/rhsm/rhsm.conf" utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw=expect_kw, msg='try to check subscription-manager config') cmd = "sudo systemctl is-enabled rhsmcertd" utils_lib.run_cmd(self, cmd, expect_ret=0, msg='try to check rhsmcertd enabled')
def test_kdump_each_cpu(self): """ case_tag: kdump case_name: test_kdump_each_cpu case_file: os_tests.tests.test_lifecycle.test_kdump_each_cpu component: kdump bugzilla_id: 1396554 is_customer_case: False customer_case_id: N/A testplan: N/A maintainer: [email protected] description: Test kdump on each cpu core key_steps: | 1. Triger crash on each cpu core 2. Check if kdump is working and dump file will be generated expect_result: kdump is working and dump file will be generated debug_want: N/A debug_want: N/A """ utils_lib.run_cmd(self, 'lscpu', expect_ret=0) product_id = utils_lib.get_product_id(self) if utils_lib.is_arch(self, 'aarch64') and not utils_lib.is_metal(self) and float(product_id) < 8.6: self.skipTest("Cancel as bug 1654962 in arm guest earlier than 8.6 2082405" ) cmd = "grep processor /proc/cpuinfo | wc -l" cpu_counts = int(utils_lib.run_cmd(self, cmd, expect_ret=0, msg = "Get cpu counts")) for core_num in range(cpu_counts): self.log.info("Trigger kdump on core %d" % core_num) cmd = "systemctl is-active kdump || sudo systemctl start kdump" utils_lib.run_cmd(self, cmd, expect_ret=0, msg="check kdump service status") utils_lib.run_cmd(self, "sudo rm -rf /var/crash/*", expect_ret=0, msg="clean /var/crash") utils_lib.run_cmd(self, "sudo sync", expect_ret=0) self.log.info("Before system crash") res_before = utils_lib.run_cmd(self, "find /var/crash", expect_ret=0, msg="list /var/crash before crash") cmd = "sudo bash -c 'taskset -c %d echo c > /proc/sysrq-trigger'" % core_num utils_lib.run_cmd(self, cmd, msg='trigger crash') time.sleep(30) self.params['remote_node'] = self.vm.floating_ip utils_lib.init_connection(self, timeout=self.ssh_timeout) self.log.info("After system crash") res_after = utils_lib.run_cmd(self, "find /var/crash", expect_ret=0, msg="list /var/crash after crash") self.assertNotEqual(res_after, res_before, "Test failed as no crash dump file found") cmd = "sudo cat /var/crash/*/vmcore-dmesg.txt|tail -50" utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw="write_sysrq_trigger", msg="Check if crash happened")
def test_check_file_content_integrity(self): """ case_name: test_check_file_content_integrity component: rhel-guest-image bugzilla_id: N/A is_customer_case: False maintainer: [email protected] description: check file content integrity key_steps: 1. rpm -Va expect_result: No unexpected file modifications """ product_id = utils_lib.get_product_id(self) data_file = "rpm_va.el%s.lst" % product_id.split('.')[0] # cmd = "sudo prelink -amR" # output = utils_lib.run_cmd(self, cmd, expect_ret=0, msg="prelink -amR") src_path = self.data_dir + '/guest-images/' + data_file dest_path = '/tmp/' + data_file self.SSH.put_file(local_file=src_path, rmt_file=dest_path) cmd = "sudo rpm -Va | grep -vxFf {0} | grep -Ev \ '/boot/initramfs|/boot/System.map'".format(dest_path) output = utils_lib.run_cmd(self, cmd, timeout=240, msg="check integrity through rpm -Va") self.assertEqual("", output, "Found extra files has been modified:\n%s" % output) # Continue to compare every single file under local # "data/vendor/file_cmp" src_dir = self.data_dir + "/guest-images/file_cmp.el%s/" % product_id.split( '.')[0] if os.path.isdir(src_dir): for f in os.listdir(src_dir): m = re.match(r"^(%.*%)(.*)\.el(\d)$", f) if m: f_name = m.group(2) f_ver = m.group(3) f_name_l = m.group(1).replace('%', '/') + f_name if self.rhel_ver.split('.')[0] != f_ver: continue else: m = re.match(r"^(%.*%)(.*)$", f) f_name = m.group(2) f_name_l = f.replace('%', '/') src_path = src_dir + f dest_path = '/tmp/' + f_name self.log.info("Copy {} to remote".format(src_path)) self.SSH.put_file(local_file=src_path, rmt_file=dest_path) cmd = "grep -xv '^[[:space:]][[:space:]]*$' %s | diff \ -wB - %s" % (f_name_l, "/tmp/" + f_name) output = utils_lib.run_cmd(self, cmd, msg="compare through grep") self.assertEqual( "", output, "Found %s has been modified:\n%s" % (f_name, output))
def test_subscription_manager_auto(self): """ case_name: test_subscription_manager_auto component: subscription-manager bugzilla_id: 1932802, 1905398 is_customer_case: <optional: True or False> maintainer: [email protected] description: test if subscription-manager can run normally in AWS and Azure, RHEL system without timeout. key_steps: 1.sudo subscription-manager config --rhsmcertd.auto_registration=1 --rhsm.manage_repos=0 --rhsmcertd.auto_registration_interval=1 2.sudo systemctl restart rhsmcertd 3.sudo subscription-manager config 4.sudo systemctl is-active rhsmcertd 5.sudo cat /var/log/rhsm/rhsmcertd.log 6.sudo cat /var/log/rhsm/rhsm.log 7.sudo subscription-manager identity 8.sudo subscription-manager list --installed 9.sudo subscription-manager status 10.sudo insights-client --register expect_result: all registration successfully without timeout failure. debug_want: N/A """ product_name = utils_lib.get_os_release_info(self, field='NAME') if 'Red Hat Enterprise Linux' not in product_name: self.skipTest('Only for RHEL test.') if not (utils_lib.is_aws(self) or utils_lib.is_azure(self)): self.skipTest( 'Auto registeration only supports AWS and Azure platforms for now.' ) product_id = utils_lib.get_product_id(self) if float(product_id) < 8.4: self.skipTest('skip in earlier than el8.4') cmd = "sudo subscription-manager config --rhsmcertd.auto_registration=1 --rhsm.manage_repos=0 --rhsmcertd.auto_registration_interval=1" utils_lib.run_cmd( self, cmd, expect_ret=0, msg= 'try to enable auto_registration, disable managed_repos and change inverval from 60mins to 1min' ) cmd = "sudo systemctl restart rhsmcertd" utils_lib.run_cmd(self, cmd, expect_ret=0, msg='try to restart rhsmcertd service') cmd = "sudo subscription-manager config" utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw="auto_registration = 1,manage_repos = 0", msg='try to check subscription-manager config') cmd = "sudo systemctl is-active rhsmcertd" utils_lib.run_cmd(self, cmd, expect_ret=0, msg='try to check rhsmcertd enabled') start_time = time.time() timeout = 600 interval = 60 while True: cmd = 'sudo cat /var/log/rhsm/rhsmcertd.log' utils_lib.run_cmd(self, cmd, msg='try to check rhsmcertd.log') cmd = 'sudo cat /var/log/rhsm/rhsm.log' utils_lib.run_cmd(self, cmd, msg='try to check rhsm.log') cmd = "sudo subscription-manager identity" out = utils_lib.run_cmd(self, cmd, msg='try to check subscription identity') cmd = "sudo subscription-manager list --installed" out = utils_lib.run_cmd( self, cmd, msg='try to list currently installed on the system') cmd = "sudo subscription-manager status" out = utils_lib.run_cmd(self, cmd, msg='try to check subscription status') if 'Red Hat Enterprise Linux' in out or 'Simple Content Access' in out: self.log.info("auto subscription registered completed") cmd = "sudo insights-client --register" utils_lib.run_cmd( self, cmd, msg='check if insights-client can register successfully') break end_time = time.time() if end_time - start_time > timeout: cmd = "sudo insights-client --register" utils_lib.run_cmd( self, cmd, msg='check if insights-client can register successfully') self.fail( "timeout({}s) to wait auto subscription registered completed" .format(timeout)) self.log.info( 'wait {}s and try to check again, timeout {}s'.format( interval, timeout)) time.sleep(interval)
def test_z_nitro_enclaves(self): ''' case_name: test_check_nitro_enclaves case_priority: 1 component: kernel bugzilla_id: 2011739 polarion_id: n/a maintainer: [email protected] description: Test whether nitro enclave works when it is enabled. key_steps: 1.$ sudo dnf groupinstall "Development Tools" 2.$ for version under 9 add centos docker repo and for rhel9 add fedora34 docker repo 3.$ sudo dnf install docker-ce docker-ce-cli containerd.io -y 4.$ sudo systemctl start docker 5.$ sudo systemctl enable docker 6.$ sudo usermod -aG docker $USER and re-login 7.$ git clone https://github.com/aws/aws-nitro-enclaves-cli.git 8.$ cd aws-nitro-enclaves-cli/ and change bootstrap/nitro-cli-config,bootstrap/env.sh,Makefile 9.$export NITRO_CLI_INSTALL_DIR=/ 10.$make nitro-cli 11.$make vsock-proxy 12.$sudo make NITRO_CLI_INSTALL_DIR=/ install 13.$source /etc/profile.d/nitro-cli-env.sh 14.$echo source /etc/profile.d/nitro-cli-env.sh >> ~/.bashrc 15.$nitro-cli-config -i 16.$sudo systemctl enable nitro-enclaves-allocator.service && sudo systemctl start nitro-enclaves-allocator.service 17.$nitro-cli build-enclave --docker-dir /usr/share/nitro_enclaves/examples/hello --docker-uri hello:latest --output-file hello.eif 18.$nitro-cli run-enclave --cpu-count 2 --memory 512 --enclave-cid 16 --eif-path hello.eif --debug-mode 19.$nitro-cli describe-enclaves 20.$nitro-cli console --enclave-id $EnclaveID 21.$nitro-cli terminate-enclave --enclave-id $EnclaveID expected_result: Enclave can be started and terminated successfully. ''' if not utils_lib.is_aws(self): self.skipTest('encalve is only for aws platform') cmd = 'ls -l /sys/devices/virtual/misc/nitro_enclaves/dev' utils_lib.run_cmd(self, cmd, cancel_not_kw='cannot access', msg='check instance enclave is enabled') output = utils_lib.run_cmd(self, "sudo nitro-cli -V", msg='check if nitro-cli exists') if 'Nitro CLI' in output: cli_installed = True else: cli_installed = False product_name = utils_lib.get_os_release_info(self, field='NAME') if 'Red Hat Enterprise Linux' not in product_name: self.skipTest('Only support run in RHEL for now.') update_files = [ "aws-nitro-enclaves-cli/bootstrap/nitro-cli-config", "aws-nitro-enclaves-cli/bootstrap/env.sh", "aws-nitro-enclaves-cli/Makefile" ] first = [ "# Remove an older driver if it is inserted.", "lsmod | grep -q nitro_enclaves || ", "install: install-tools nitro_enclaves" ] end = [ '[ "$(lsmod | grep -cw $DRIVER_NAME)" -eq 1 ] || fail "The driver is not visible."', 'sudo insmod ${NITRO_CLI_INSTALL_DIR}/lib/modules/extra/nitro_enclaves/nitro_enclaves.ko', '${NITRO_CLI_INSTALL_DIR}/lib/modules/$(uname -r)/extra/nitro_enclaves/nitro_enclaves.ko' ] utils_lib.run_cmd(self, 'sudo dnf groupinstall "Development Tools" -y', msg='install development tools', timeout=300) product_id = utils_lib.get_product_id(self) if float(product_id) < 9: utils_lib.run_cmd( self, 'sudo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo', msg='add docker repo') utils_lib.run_cmd( self, 'sudo dnf config-manager --save --setopt=*docker*.gpgcheck=0') utils_lib.run_cmd( self, 'sudo dnf install docker-ce docker-ce-cli containerd.io --allowerasing -y', expect_ret=0, msg='install docker', timeout=300) else: utils_lib.run_cmd( self, 'echo -e "[fedora]\nname=Fedora 34 - \$basearch\nbaseurl=https://download-ib01.fedoraproject.org/pub/fedora/linux/releases/34/Everything/\$basearch/os/" |sudo tee /etc/yum.repos.d/fedora34.repo' ) utils_lib.run_cmd( self, 'sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo', msg='add docker repo') utils_lib.run_cmd( self, "sudo sed -i 's/$releasever/34/g' /etc/yum.repos.d/docker-ce.repo" ) utils_lib.run_cmd( self, 'sudo dnf config-manager --save --setopt=*docker*.gpgcheck=0') utils_lib.run_cmd(self, 'sudo dnf module disable container-tools -y') utils_lib.run_cmd( self, 'sudo dnf install docker-ce docker-ce-cli containerd.io --nogpgcheck --allowerasing -y', expect_ret=0, msg='install docker', timeout=300) utils_lib.run_cmd(self, 'sudo dnf module enable container-tools -y') utils_lib.run_cmd(self, 'sudo dnf config-manager --disable fedora') utils_lib.run_cmd(self, 'sudo systemctl enable --now docker', expect_ret=0, msg='enable and start docker') utils_lib.run_cmd(self, 'sudo usermod -aG docker $USER', msg='add to the docker group') utils_lib.run_cmd(self, 'sudo chmod a+rw /var/run/docker.sock') utils_lib.run_cmd( self, 'git clone https://github.com/aws/aws-nitro-enclaves-cli.git', msg='clone nitro-enclaves-cli ') if not cli_installed: for i in range(3): first_row = int( utils_lib.run_cmd( self, f"grep -n '{first[i]}' {update_files[i]} | cut -f1 -d:" )) end_row = int( utils_lib.run_cmd( self, f"grep -n '{end[i]}' {update_files[i]} | cut -f1 -d:")) utils_lib.run_cmd( self, f"sed -i '{first_row}, {end_row}d' {update_files[i]}") utils_lib.run_cmd( self, f"sed -i '{first_row}i\install: install-tools' aws-nitro-enclaves-cli/Makefile " ) utils_lib.run_cmd( self, 'cd aws-nitro-enclaves-cli ; export NITRO_CLI_INSTALL_DIR=/ && make nitro-cli && make vsock-proxy;sudo make NITRO_CLI_INSTALL_DIR=/ install', timeout=1200, msg='make and install nitro-cli') cmd = 'ls -l /etc/profile.d/nitro-cli-env.sh' utils_lib.run_cmd(self, cmd, expect_ret=0, msg='check nitro-cli-env.sh exists') utils_lib.run_cmd( self, 'echo source /etc/profile.d/nitro-cli-env.sh >> ~/.bashrc') utils_lib.run_cmd( self, 'source /etc/profile.d/nitro-cli-env.sh ; cd aws-nitro-enclaves-cli ; timeout 5 nitro-cli-config -i', expect_not_kw='Could not') utils_lib.run_cmd( self, 'sudo systemctl enable nitro-enclaves-allocator.service && sudo systemctl start nitro-enclaves-allocator.service' ) utils_lib.run_cmd( self, 'nitro-cli build-enclave --docker-dir /usr/share/nitro_enclaves/examples/hello --docker-uri hello:latest --output-file hello.eif', msg='build enclave') utils_lib.run_cmd( self, 'sudo nitro-cli run-enclave --cpu-count 2 --memory 512 --enclave-cid 16 --eif-path hello.eif --debug-mode', expect_kw='Started', msg='run enclave') EnclaveID = utils_lib.run_cmd( self, 'nitro-cli describe-enclaves |grep EnclaveID', msg='get EnclaveID') EnclaveID = EnclaveID[18:-3] utils_lib.run_cmd( self, f'timeout 10 nitro-cli console --enclave-id {EnclaveID}', expect_kw='Successfully', msg='get the console') utils_lib.run_cmd( self, f'sudo nitro-cli terminate-enclave --enclave-id {EnclaveID}', expect_kw='"Terminated": true', msg='terminate enclave')
def rhel_x_version(self): product_id = utils_lib.get_product_id(self) return int(product_id.split('.')[0])
def test_subscription_manager_auto(self): ''' bz: 1932802, 1905398 ''' cmd = "sudo rpm -qa|grep rhui" utils_lib.run_cmd(self, cmd, cancel_ret='0', msg='skip test if rhui is not installed') product_id = utils_lib.get_product_id(self) if float(product_id) < 8.4: self.skipTest('skip in earlier than el8.4') self.log.info("Auto registeration only supports aws platform for now.") cmd = "sudo subscription-manager config" utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw="auto_registration = 1,manage_repos = 0", msg='try to check subscription-manager config') cmd = "sudo systemctl is-enabled rhsmcertd" utils_lib.run_cmd(self, cmd, expect_ret=0, msg='try to check rhsmcertd enabled') cmd = "sudo subscription-manager config --rhsmcertd.auto_registration_interval=1" utils_lib.run_cmd( self, cmd, expect_ret=0, msg= 'try to change rhsmcertd.auto_registration_interval from 60min to 1min' ) cmd = "sudo systemctl restart rhsmcertd" utils_lib.run_cmd(self, cmd, expect_ret=0, msg='restart rhsmcertd') start_time = time.time() timeout = 600 interval = 60 while True: cmd = 'sudo cat /var/log/rhsm/rhsmcertd.log' utils_lib.run_cmd(self, cmd, msg='try to check rhsmcertd.log') cmd = 'sudo cat /var/log/rhsm/rhsm.log' utils_lib.run_cmd(self, cmd, msg='try to check rhsm.log') cmd = "sudo subscription-manager identity" out = utils_lib.run_cmd(self, cmd, msg='try to check subscription identity') cmd = "sudo subscription-manager list --installed" out = utils_lib.run_cmd( self, cmd, msg='try to list currently installed on the system') cmd = "sudo subscription-manager status" out = utils_lib.run_cmd(self, cmd, msg='try to check subscription status') if 'Red Hat Enterprise Linux' in out or 'Simple Content Access' in out: self.log.info("auto subscription registered completed") cmd = "sudo insights-client --register" utils_lib.run_cmd( self, cmd, msg='check if insights-client can register successfully') break end_time = time.time() if end_time - start_time > timeout: cmd = "sudo insights-client --register" utils_lib.run_cmd( self, cmd, msg='check if insights-client can register successfully') self.fail( "timeout({}s) to wait auto subscription registered completed" .format(timeout)) self.log.info( 'wait {}s and try to check again, timeout {}s'.format( interval, timeout)) time.sleep(interval)
def test_ethtool_S_xdp(self): ''' case_name: test_ethtool_S_xdp case_priority: 2 component: kernel bugzilla_id: 1908542 polarion_id: n/a maintainer: [email protected] description: Use ethtool to query the specified network device xdp statistics. key_steps: 1. # ethtool -S $nic |grep xdp expected_result: xdp status found eg. # ethtool -S eth0 |grep xdp queue_0_rx_xdp_aborted: 0 queue_0_rx_xdp_drop: 0 queue_0_rx_xdp_pass: 0 queue_0_rx_xdp_tx: 0 queue_0_rx_xdp_invalid: 0 queue_0_rx_xdp_redirect: 0 ''' product_id = utils_lib.get_product_id(self) cmd = "sudo ethtool -i {}".format(self.nic) output = utils_lib.run_cmd(self, cmd, expect_ret=0) if 'ena' in output: self.log.info('ena driver found!') if float(product_id) > 8.4: cmd = "ethtool -S {}|grep xdp".format(self.nic) utils_lib.run_cmd(self, cmd, expect_ret=0, msg='Check if have xdp information') else: self.skipTest('ena driver does not support xdp prior to 8.4') else: cmd = "ethtool -S {}|grep xdp".format(self.nic) utils_lib.run_cmd(self, cmd, cancel_ret='0', msg='Check if have xdp support') if float(product_id) > 8.3 and utils_lib.is_arch(self, arch='x86_64'): utils_lib.is_cmd_exist(self, 'xdp-loader') cmd = 'sudo xdp-loader status' utils_lib.run_cmd(self, cmd, expect_ret=0, msg='Check xdp-loader status') cmd = 'sudo xdp-loader unload -a {}'.format(self.nic) utils_lib.run_cmd(self, cmd, msg='unload xdp-filter if have') cmd = 'sudo xdp-filter load --mode skb {}'.format(self.nic) utils_lib.run_cmd(self, cmd, expect_ret=0, msg='load xdp-filter') cmd = 'sudo xdp-loader status' utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw='XDP_PASS', msg='Check xdp-loader status again') cmd = 'sudo xdp-loader unload -a {}'.format(self.nic) utils_lib.run_cmd(self, cmd, expect_ret=0, msg='unload xdp-filter') cmd = 'sudo xdp-loader status' utils_lib.run_cmd(self, cmd, expect_ret=0, expect_not_kw='XDP_PASS', msg='Check xdp-loader status again')