def setUp(self):
     self.cloud = Setup(self.params, self.name)
     self.vm = self.cloud.vm
     self.session = self.cloud.init_vm(pre_delete=False, pre_stop=False)
     self.rhel_ver = self.params.get('rhel_ver', '*/VM/*', '')
     self.pwd = os.path.abspath(os.path.dirname(__file__))
     self.dest_dir = "/tmp/"
 def setUp(self):
     self.casestatus = False
     account = AzureAccount(self.params)
     account.login()
     cloud = Setup(self.params, self.name)
     self.vm = cloud.vm
     self.session = cloud.init_vm()
     status, output = self.session.cmd_status_output('sudo su -')
     self.assertEqual(status, 0,
                      "User [root] login failed\n{}".format(str(output)))
     if self.name.name.endswith("test_chrony"):
         status, output = self.session.cmd_status_output(
             'systemctl is-enabled chronyd')
         self.assertEqual(status, 0, "There isn't chrony installed")
         self.assertEqual(output.strip(), "enabled", "Chrony isn't enabled")
     elif self.name.name.endswith("test_clocksource_performance"):
         status, output = self.session.cmd_status_output('which gcc')
         if status != 0:
             status, output = self.session.cmd_status_output(
                 'rpm -qa | grep rhui-azure')
             if status != 0:
                 status, output = self.session.cmd_status_output(
                     'rpm -ivh `ls /root/rhui-azure*`')
                 self.assertEqual(status, 0, "Failed to install rhui-azure")
             status, output = self.session.cmd_status_output(
                 'yum -y install gcc', timeout=400)
             self.assertEqual(status, 0, "Failed to install gcc")
Esempio n. 3
0
def init_test(test_ins, instance_index=0):
    '''
    Prepare VMs before start test.
    test_ins: Test class instance
    instamce_index: get specified instance
    '''
    cloud = Setup(test_ins.params, test_ins.name)
    test_ins.vm = cloud.vm
    test_ins.snap = None
    test_ins.kdump_status = False
    test_ins.cpu_count = 0
    test_ins.ssh_wait_timeout = set_ssh_wait_timeout(test_ins.vm)

    pre_delete = False
    pre_stop = False
    if test_ins.name.name.endswith("test_create_vm"):
        pre_delete = True

    test_ins.log.info("Test tempdir: %s" % test_ins.teststmpdir)
    test_ins.vm.instance_id = get_exists_resource_id(
        test_ins.teststmpdir,
        test_ins.vm.instance_type,
        resource_index=instance_index)
    query_resource_blacklist(test_ins)
    if test_ins.vm.instance_id is not None:
        if test_ins.vm.reuse_init(test_ins.vm.instance_id):
            test_ins.log.info("Reuse existing instance %s!" %
                              test_ins.vm.instance_id)
            if pre_delete:
                test_ins.log.info("Test needs no reuse!")
                test_ins.vm.delete(wait=True)
                cleanup_stored(test_ins.teststmpdir,
                               test_ins.params,
                               resource_id=test_ins.vm.instance_id)
                test_ins.vm = None
        else:
            test_ins.log.info(
                "No match existing instance, will create new one!")
            cleanup_stored(test_ins.teststmpdir,
                           test_ins.params,
                           resource_id=test_ins.vm.instance_id)
            pre_delete = True
    if test_ins.name.name.endswith("test_start_vm"):
        pre_stop = True
    if test_ins.vm is None:
        cloud = Setup(test_ins.params, test_ins.name)
        test_ins.vm = cloud.vm
        test_ins.snap = None
        test_ins.kdump_status = False
        test_ins.cpu_count = 0
        test_ins.ssh_wait_timeout = set_ssh_wait_timeout(test_ins.vm)
    test_ins.session = cloud.init_vm(pre_delete=pre_delete, pre_stop=pre_stop)
    # query_resource_blacklist(test_ins)
    test_ins.log.info("Instance id is %s" % test_ins.vm.instance_id)
    save_exists_resource_id(test_ins.teststmpdir, test_ins.vm)
    if test_ins.vm.is_stopped() and not pre_stop:
        test_ins.vm.start()
    if not test_ins.name.name.endswith("test_cleanup") and not pre_stop:
        check_session(test_ins)
    def test_check_firstlaunch_compare(self):
        '''
        :avocado: tags=test_check_firstlaunch_compare
        polarion_id:
        bz#: 1862930
        compare the first launch boot time with Amazon Linux 2 and Ubuntu.
        '''
        self.log.info("3 Nodes (RHEL, AMZ, Ubuntu) needed!")
        max_boot_time = self.params.get('max_boot_time')
        rhel_boot_time_sec = utils_lib.getboottime(self)
        self.rhel_session = self.session
        self.rhel_vm = self.vm
        self.amz_session = None
        self.amz_vm = None
        self.ubuntu_session = None
        self.ubuntu_vm = None
        if utils_lib.is_arm(self):
            cloud = Setup(self.params, self.name, vendor="amzn2_arm")
        else:
            cloud = Setup(self.params, self.name, vendor="amzn2_x86")
        self.amz_vm = cloud.vm
        self.amz_session = cloud.init_vm()
        utils_lib.run_cmd(self,
                          'uname -a',
                          vm=self.amz_vm,
                          session=self.amz_session,
                          msg="Get Amazon Linux 2 version")
        amz_boot_time_sec = utils_lib.getboottime(self,
                                                  vm=self.amz_vm,
                                                  session=self.amz_session)
        self.amz_vm.delete()

        if utils_lib.is_arm(self):
            cloud = Setup(self.params, self.name, vendor="ubuntu_arm")
        else:
            cloud = Setup(self.params, self.name, vendor="ubuntu_x86")
        self.ubuntu_vm = cloud.vm
        self.ubuntu_session = cloud.init_vm()
        utils_lib.run_cmd(self,
                          'uname -a',
                          vm=self.ubuntu_vm,
                          session=self.ubuntu_session,
                          msg="Get Ubuntu version")
        ubuntu_boot_time_sec = utils_lib.getboottime(
            self, vm=self.ubuntu_vm, session=self.ubuntu_session)
        self.ubuntu_vm.delete()
        ratio = self.params.get('boottime_max_ratio')
        utils_lib.compare_nums(self,
                               num1=rhel_boot_time_sec,
                               num2=amz_boot_time_sec,
                               ratio=ratio,
                               msg="Compare with Amazon Linux 2 boot time")
        utils_lib.compare_nums(self,
                               num1=rhel_boot_time_sec,
                               num2=ubuntu_boot_time_sec,
                               ratio=ratio,
                               msg="Compare with Ubuntu boot time")
Esempio n. 5
0
 def setUp(self):
     account = AzureAccount(self.params)
     account.login()
     self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
     self.project = self.params.get("rhel_ver", "*/VM/*")
     cloud = Setup(self.params, self.name, size="D2_v2")
     self.vm = cloud.vm
     self.session = cloud.init_vm()
     self.session.cmd_output("sudo su -")
     self.username = self.vm.vm_username
 def setUp(self):
     self.casestatus = False
     account = AzureAccount(self.params)
     account.login()
     self.pwd = os.path.abspath(os.path.dirname(__file__))
     cloud = Setup(self.params, self.name)
     self.vm = cloud.vm
     self.session = cloud.init_vm(pre_delete=True)
     self.packages = self.params.get("packages", "*/Other/*")
     self.package_list = self.packages.split(',')
     self.log.debug("Package list: {}".format(self.package_list))
     self.with_wala = self.params.get("with_wala", "*/others/*", False)
Esempio n. 7
0
 def setUp(self):
     account = AzureAccount(self.params)
     account.login()
     self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
     cloud = Setup(self.params, self.name)
     self.vm = cloud.vm
     self.session = cloud.init_vm()
     if "interrupt_ctrl_c" in self.name.name:
         self.session1 = cloud.init_session()
         self.session1.connect()
     status, output = self.session.cmd_status_output('sudo su -')
     self.assertEqual(status, 0,
                      "User [root] login failed\n{}".format(str(output)))
 def setUp(self):
     self.cloud = Setup(self.params, self.name, create_timeout=300)
     self.vm = self.cloud.vm
     self.ssh_wait_timeout = 600
     pre_delete = False
     pre_stop = False
     if self.name.name.endswith(
             "test_cloudinit_create_vm_login_repeatedly"):
         return
     if self.name.name.endswith("test_cloudinit_login_with_publickey"):
         pre_delete = True
     self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                       pre_stop=pre_stop)
 def setUp(self):
     self.cloud = Setup(self.params, self.name)
     self.vm = self.cloud.vm
     pre_delete = False
     pre_stop = False
     if self.name.name.endswith("test_coldplug_nics"):
         pre_stop = True
     if not self.vm.nic_count or self.vm.nic_count < 2:
         self.cancel("No nic count. Skip this case.")
     self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                       pre_stop=pre_stop)
     if self.name.name.endswith("test_hotplug_nics") or \
        self.name.name.endswith("test_coldplug_nics"):
         self.cloud.init_nics(self.vm.nic_count)
         self.primary_nic_id = self.cloud.primary_nic_id
 def setUp(self):
     account = AzureAccount(self.params)
     account.login()
     self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
     self.project = self.params.get("rhel_ver", "*/VM/*")
     if LooseVersion(self.project) == LooseVersion("8.0.0") or \
             LooseVersion(self.project) == LooseVersion("8.0"):
         self.cancel(
             "Azure Linux Extensions are not supported in RHEL-8.0. Skip.")
     cloud = Setup(self.params, self.name)
     self.vm = cloud.vm
     self.session = cloud.init_vm()
     self.session.cmd_output("sudo su -")
     self.username = self.vm.vm_username
     self.new_username = self.username + "new"
Esempio n. 11
0
 def setUp(self):
     self.cloud = Setup(self.params, self.name)
     self.vm = self.cloud.vm
     pre_delete = False
     pre_stop = False
     if self.name.name.endswith("test_create_vm_password"):
         if self.vm.exists():
             self.vm.delete(wait=True)
         self.session = self.cloud.init_session()
         return
     if self.name.name.endswith("test_create_vm_sshkey"):
         pre_delete = True
     if self.name.name.endswith("test_start_vm"):
         pre_stop = True
     self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                       pre_stop=pre_stop)
 def setUp(self):
     account = AzureAccount(self.params)
     account.login()
     self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
     cloud = Setup(self.params, self.name)
     self.vm = cloud.vm
     self.session = cloud.init_vm()
     if "interrupt_ctrl_c" in self.name.name:
         self.session1 = cloud.init_session()
         self.session1.connect()
     status, output = self.session.cmd_status_output('sudo su -')
     self.assertEqual(status, 0,
                      "User [root] login failed\n{}".format(str(output)))
     # Must stop NetworkManager or it will regenerate /etc/resolv.conf in RHEL-8.4
     if "test_waagent_depro" in self.case_short_name:
         self.session.cmd_output("systemctl stop NetworkManager")
    def setUp(self):
        self.account = IbmcloudAccount(self.params)
        self.account.login()
        self.service = Service(self.params)
        self.service.target()
        self.project = self.params.get("rhel_ver", "*/VM/*")
        self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
        self.pwd = os.path.abspath(os.path.dirname(__file__))

        self.cloud = Setup(self.params, self.name, create_timeout=300)
        self.vm = self.cloud.vm
        self.ssh_wait_timeout = 600
        pre_delete = False
        pre_stop = False
        if self.name.name.endswith("test_cloudinit_login_with_publickey"):
            pre_delete = True

        self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                          pre_stop=pre_stop)
 def setUp(self):
     account = AzureAccount(self.params)
     account.login()
     self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
     self.project = self.params.get("rhel_ver", "*/VM/*")
     if self.case_short_name == "test_verify_storage_rule_gen2":
         # cloud.vm.vm_name += "-gen2"
         # self.image = AzureImage(self.params, generation="V2")
         # if not self.image.exists():
         #     self.image.create()
         # cloud.vm.image = self.image.name
         # cloud.vm.use_unmanaged_disk = False
         size = "DC2s"
     else:
         size = "DS2_v2"
     cloud = Setup(self.params, self.name, size=size)
     self.vm = cloud.vm
     self.session = cloud.init_vm()
     self.session.cmd_output("sudo su -")
     self.username = self.vm.vm_username
 def setUp(self):
     self.cloud = Setup(self.params, self.name)
     self.vm = self.cloud.vm
     pre_delete = False
     pre_stop = False
     if self.name.name.endswith(
             "test_offline_attach_detach_cloud_disks"
     ) or self.name.name.endswith(
             "test_offline_attach_detach_scsi_cloud_disks"):
         pre_stop = True
     self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                       pre_stop=pre_stop)
     self.cloud_disk_count = self.params.get('cloud_disk_count', '*/Disk/*')
     self.cloud_disk_size = self.params.get('cloud_disk_size', '*/Disk/*')
     self.local_disk_count = self.params.get(
         'disk_count', '*/{0}/*'.format(self.vm.flavor), 0)
     self.local_disk_size = self.params.get(
         'disk_size', '*/{0}/*'.format(self.vm.flavor), 0)
     self.local_disk_type = self.params.get(
         'disk_type', '*/{0}/*'.format(self.vm.flavor), "")
     if self.name.name.endswith("test_local_disks"):
         if self.local_disk_count == 0:
             self.cancel("No local disk. Skip this case.")
     self.disk_ids = self.cloud.init_cloud_disks(self.cloud_disk_count)
     if self.cloud.cloud_provider == "huawei" and \
        self.params.get('virt', '*/{0}/*'.format(self.vm.flavor)) == "kvm":
         self.scsi_disk = True
         self.disk_ids_scsi = self.cloud.init_cloud_disks(
             self.cloud_disk_count, scsi=True)
     if self.cloud.cloud_provider == "alibaba":
         self.dev_name = "vd"
     elif self.cloud.cloud_provider == "huawei" and \
             self.params.get(
                 'virt', '*/{0}/*'.format(self.vm.flavor)) == "xen":
         self.dev_name = "xvd"
     elif self.cloud.cloud_provider == "huawei" and \
             self.params.get(
                 'virt', '*/{0}/*'.format(self.vm.flavor)) == "kvm":
         self.dev_name = "vd"
     else:
         self.dev_name = "vd"
Esempio n. 16
0
 def setUp(self):
     account = AzureAccount(self.params)
     account.login()
     cloud = Setup(self.params, self.name)
     self.vm = cloud.vm
     pre_delete = False
     pre_stop = False
     self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
     if self.case_short_name == "test_create_vm_all":
         cloud.vm.vm_name += "-all"
         cloud.vm.authentication_type = "all"
         self.vm.create()
         self.session = cloud.init_session()
         return
     if self.case_short_name == "test_create_vm_password":
         self.vm.vm_name += "-password"
         self.vm.authentication_type = "password"
         self.vm.generate_ssh_keys = False
         self.vm.ssh_key_value = None
         self.vm.create()
         self.session = cloud.init_session()
         return
     if self.case_short_name == "test_create_vm_sshkey":
         if self.vm.exists():
             self.vm.delete(wait=True)
         self.vm.create(wait=True)
         self.session = cloud.init_session()
     if self.case_short_name == "test_start_vm":
         pre_stop = True
     self.session = cloud.init_vm(pre_delete=pre_delete, pre_stop=pre_stop)
 def setUp(self):
     self.cloud = Setup(self.params, self.name)
     self.vm = self.cloud.vm
     pre_delete = False
     pre_stop = False
     if self.name.name.endswith(
             "test_offline_attach_detach_cloud_disks"
     ) or self.name.name.endswith(
             "test_offline_attach_detach_scsi_cloud_disks"):
         pre_stop = True
     self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                       pre_stop=pre_stop)
     self.cloud_disk_count = self.params.get('cloud_disk_count', '*/Disk/*')
     self.cloud_disk_size = self.params.get('cloud_disk_size', '*/Disk/*')
     self.local_disk_count = self.vm.disk_count
     self.local_disk_size = self.vm.disk_size
     self.local_disk_type = self.vm.disk_type
     if self.name.name.endswith("test_local_disks"):
         if self.local_disk_count == 0:
             self.cancel("No local disk. Skip this case.")
     self.disk_ids = self.cloud.init_cloud_disks(self.cloud_disk_count)
     self.dev_name = "vd"
 def setUp(self):
     cloud = Setup(self.params, self.name)
     self.vm = cloud.vm
     self.session = cloud.init_vm(pre_delete=False, pre_stop=False)
class CloudDiskTest(Test):
    def setUp(self):
        self.cloud = Setup(self.params, self.name)
        self.vm = self.cloud.vm
        pre_delete = False
        pre_stop = False
        if self.name.name.endswith(
                "test_offline_attach_detach_cloud_disks"
        ) or self.name.name.endswith(
                "test_offline_attach_detach_scsi_cloud_disks"):
            pre_stop = True
        self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                          pre_stop=pre_stop)
        self.cloud_disk_count = self.params.get('cloud_disk_count', '*/Disk/*')
        self.cloud_disk_size = self.params.get('cloud_disk_size', '*/Disk/*')
        self.local_disk_count = self.vm.disk_count
        self.local_disk_size = self.vm.disk_size
        self.local_disk_type = self.vm.disk_type
        if self.name.name.endswith("test_local_disks"):
            if self.local_disk_count == 0:
                self.cancel("No local disk. Skip this case.")
        self.disk_ids = self.cloud.init_cloud_disks(self.cloud_disk_count)
        self.dev_name = "vd"

    def _cloud_disk_test(self,
                         initial="b",
                         disk_count=None,
                         disk_type=None,
                         disk_size=None):
        if not disk_count:
            disk_count = self.cloud_disk_count
        if not disk_size:
            disk_size = self.cloud_disk_size
        for i in range(1, disk_count + 1):
            if disk_type == "nvme":
                dev_fullname = "nvme%sn1" % (i - 1)
            else:
                if disk_type == "scsi":
                    dev_name = "sd"
                else:
                    dev_name = self.dev_name
                delta = ord(initial) - 97 + i
                if delta <= 26:
                    idx = chr(96 + delta)
                else:
                    idx = 'a' + chr(96 + delta % 26)
                dev_fullname = dev_name + idx
            self._verify_disk(dev_fullname, disk_size)

    def _verify_disk(self, dev_fullname, disk_size):
        cmd = "fdisk -l /dev/{0} | grep /dev/{0}"
        output = self.session.cmd_output(cmd.format(dev_fullname))

        # WORKAROUND: Alibaba local volume untrimmed issue
        if 'GPT' in output:
            self.log.info('WORKAROUND: Alibaba local volume untrimmed issue.')
            self.session.cmd_output(
                'dd if=/dev/zero of=/dev/{0} bs=5000 count=1'.format(
                    dev_fullname))
            output = self.session.cmd_output(cmd.format(dev_fullname))

        # if output.split(',')[0].split(' ')[3] == "GB":
        #     expected_size = float(disk_size)*(1.024**3)
        # elif output.split(',')[0].split(' ')[3] == "GiB":
        #     expected_size = float(disk_size)
        # else:
        #     self.fail("Attach disk size unit is not GB or GiB.")
        #
        # real_size = float(output.split(',')[0].split(' ')[2])

        # Get the real size in bytes
        # The outputs for `fdisk -l /dev/{0} | grep /dev/{0}`:
        # (RHEL7.6)
        # Disk /dev/vdd: 107.4 GB, 107374182400 bytes, 209715200 sectors
        # (RHEL8.0)
        # Disk /dev/vdb: 1.8 TiB, 1919850381312 bytes, 3749707776 sectors
        if output.split(',')[1].split(' ')[2] == "bytes":
            real_size = int(output.split(',')[1].split(' ')[1])
        else:
            self.fail("Fail to get the real disk size.")

        # The disk_size was specified in GiB, should covert to bytes
        expected_size = int(disk_size) * (1024**3)

        self.log.info(
            "real_size: {0}; expected_size: {1}; delta: 1/1000.".format(
                real_size, expected_size))

        self.assertAlmostEqual(first=real_size,
                               second=expected_size,
                               delta=expected_size / 1000.0,
                               msg="Attach disk size is not as expected.\n\
Real: {0}; Expected: {1}".format(real_size, expected_size))

        # Make a 10GB partition in case the whole disk is too large (1800G for
        # local disk)
        cmd = "parted /dev/{0} mklabel msdos -s"
        self.session.cmd_output(cmd.format(dev_fullname))
        cmd = "parted /dev/{0} mkpart primary ext4 0 10GB -s"
        self.session.cmd_output(cmd.format(dev_fullname))
        # cmd = "fdisk -l /dev/{0}|grep -o '^/dev/[a-z0-9]*'|cut -b 6-"
        # part_fullname = self.session.cmd_output(cmd.format(dev_fullname))
        cmd = "fdisk -l /dev/{0}|grep -o '^/dev/[a-z0-9]*'"
        part_fullname = self.session.cmd_output(
            cmd.format(dev_fullname)).strip().split(" ")[0].split("/")[2]
        cmd = "[[ -d /mnt/{0} ]] || mkdir /mnt/{0}"
        self.session.cmd_output(cmd.format(part_fullname))
        cmd = "mkfs.ext4 -F /dev/{0};mount /dev/{0} /mnt/{0} && \
echo test_content > /mnt/{0}/test_file"

        self.session.cmd_output(cmd.format(part_fullname), timeout=60)
        cmd = "cat /mnt/{0}/test_file"
        output = self.session.cmd_output(cmd.format(part_fullname))
        self.assertEqual(
            output, "test_content",
            "Cannot write files on attached disk.\n {0}".format(output))
        cmd = "umount /mnt/{0}"
        self.session.cmd_output(cmd.format(part_fullname))
        cmd = "parted /dev/{0} rm 1"
        self.session.cmd_output(cmd.format(dev_fullname))

    def test_online_attach_detach_cloud_disks(self):
        self.log.info("Online attach a cloud disk to VM")
        vols = self.vm.query_cloud_disks()
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower(), u'available',
                             "Disk status is not available")
        for disk_id in self.disk_ids:
            if self.dev_name == "xvd":
                dev = "sd"
            else:
                dev = "vd"
            if self.local_disk_type == "scsi" and dev == "sd":
                self.vm.attach_cloud_disks(
                    disk_id=disk_id,
                    dev=dev,
                    local_disk_count=self.local_disk_count,
                    wait=True)
            else:
                self.vm.attach_cloud_disks(disk_id=disk_id, dev=dev, wait=True)
        vols = self.vm.query_cloud_disks()
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower().replace('_', '-'), u"in-use",
                             "Disk status is not in-use")
        self.session.cmd_output('sudo su -')

        if self.local_disk_type in ('ssd', 'hdd'):  # for alibaba
            self._cloud_disk_test(initial=chr(98 + self.local_disk_count))
        else:
            self._cloud_disk_test()

        self.log.info("Online detach a cloud disk to VM")
        for disk_id in self.disk_ids:
            self.vm.detach_cloud_disks(disk_id=disk_id, wait=True)
        vols = self.vm.query_cloud_disks()
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower(), u'available',
                             "Disk status is not available")
        for i in range(1, self.cloud_disk_count + 1):
            delta = self.local_disk_count + i
            if delta <= 25:
                idx = chr(97 + delta)
            else:
                idx = 'a' + chr(97 - 1 + delta % 25)
            cmd = "fdisk -l | grep /dev/%s%s"
            output = self.session.cmd_output(cmd % (self.dev_name, idx))
            self.assertEqual(output, "",
                             "Disk not detached.\n {0}".format(output))

    def test_online_attach_detach_scsi_cloud_disks(self):
        self.dev_name = "sd"
        if self.params.get('virt', '*/{0}/*'.format(self.vm.flavor)) != "kvm":
            self.log.info(
                "SCSI disk attach/detach only supported on KVM hypervisor")
            raise TestSkipError
        self.log.info("Online attach a scsi cloud disk to VM")
        vols = self.vm.query_cloud_disks(scsi=True)
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower(), u'available',
                             "Disk status is not available")
        for disk_id in self.disk_ids_scsi:
            dev = "sd"
            if self.local_disk_type == "scsi":
                self.vm.attach_cloud_disks(
                    disk_id=disk_id,
                    dev=dev,
                    local_disk_count=self.local_disk_count,
                    wait=True)
            else:
                self.vm.attach_cloud_disks(disk_id=disk_id, dev=dev, wait=True)
        vols = self.vm.query_cloud_disks(scsi=True)
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower().replace('_', '-'), u"in-use",
                             "Disk status is not in-use")
        self.session.cmd_output('sudo su -')
        if self.local_disk_type == "scsi":
            self._cloud_disk_test(initial=chr(97 + self.local_disk_count))
        else:
            self._cloud_disk_test(initial="a")

        self.log.info("Online detach a scsi cloud disk to VM")
        for disk_id in self.disk_ids_scsi:
            self.vm.detach_cloud_disks(disk_id=disk_id, wait=True, scsi=True)
        vols = self.vm.query_cloud_disks(scsi=True)
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower(), u'available',
                             "Disk status is not available")
        for i in range(1, self.cloud_disk_count + 1):
            delta = self.local_disk_count + i
            if delta <= 25:
                idx = chr(97 + delta)
            else:
                idx = 'a' + chr(97 - 1 + delta % 25)
            cmd = "fdisk -l | grep /dev/%s%s"
            output = self.session.cmd_output(cmd % (self.dev_name, idx))
            self.assertEqual(output, "",
                             "Disk not detached.\n {0}".format(output))

    def test_offline_attach_detach_cloud_disks(self):
        # Set timeout for Alibaba baremetal
        if self.vm.flavor == 'ecs.ebmg5s.24xlarge':
            connect_timeout = 600
        else:
            connect_timeout = 120

        self.log.info("Offline attach a cloud disk to VM")
        vols = self.vm.query_cloud_disks()
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower(), u'available',
                             "Disk status is not available")
        for disk_id in self.disk_ids:
            if self.dev_name == "xvd":
                dev = "sd"
            else:
                dev = "vd"
            if self.local_disk_type == "scsi" and dev == "sd":
                self.vm.attach_cloud_disks(
                    disk_id=disk_id,
                    dev=dev,
                    local_disk_count=self.local_disk_count,
                    wait=True)
            else:
                self.vm.attach_cloud_disks(disk_id=disk_id, dev=dev, wait=True)
        vols = self.vm.query_cloud_disks()
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower().replace('_', '-'), u"in-use",
                             "Disk status is not in-use")
        self.vm.start(wait=True)
        self.session.connect(timeout=connect_timeout)
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output,
            "Start VM error: output of cmd `who` unexpected -> %s" % output)
        self.session.cmd_output('sudo su -')

        if self.local_disk_type in ('ssd', 'hdd'):  # for alibaba
            self._cloud_disk_test(initial=chr(98 + self.local_disk_count))
        else:
            self._cloud_disk_test()

        self.log.info("Offline detach a cloud disk to VM")
        self.vm.stop(wait=True)
        self.assertTrue(self.vm.is_stopped(),
                        "Stop VM error: VM status is not SHUTOFF")
        for disk_id in self.disk_ids:
            self.vm.detach_cloud_disks(disk_id=disk_id, wait=True)
        vols = self.vm.query_cloud_disks()
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower(), u'available',
                             "Disk status is not available")
        self.vm.start(wait=True)
        self.session.connect(timeout=connect_timeout)
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output,
            "Start VM error: output of cmd `who` unexpected -> %s" % output)
        self.session.cmd_output('sudo su -')
        for i in range(1, self.cloud_disk_count + 1):
            delta = self.local_disk_count + i
            if delta <= 25:
                idx = chr(97 + delta)
            else:
                idx = 'a' + chr(97 - 1 + delta % 25)
            cmd = "fdisk -l | grep /dev/%s%s"
            output = self.session.cmd_output(cmd % (self.dev_name, idx))
            self.assertEqual(output, "",
                             "Disk not detached.\n {0}".format(output))

    def test_offline_attach_detach_scsi_cloud_disks(self):
        # Set timeout for Alibaba baremetal
        if self.vm.flavor == 'ecs.ebmg5s.24xlarge':
            connect_timeout = 600
        else:
            connect_timeout = 120

        self.dev_name = "sd"
        if self.params.get('virt', '*/{0}/*'.format(self.vm.flavor)) != "kvm":
            self.log.info(
                "SCSI disk attach/detach only supported on KVM hypervisor")
            raise TestSkipError
        self.log.info("Offline attach a cloud disk to VM")
        vols = self.vm.query_cloud_disks(scsi=True)
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower(), u'available',
                             "Disk status is not available")
        for disk_id in self.disk_ids_scsi:
            dev = "sd"
            if self.local_disk_type == "scsi":
                self.vm.attach_cloud_disks(
                    disk_id=disk_id,
                    dev=dev,
                    local_disk_count=self.local_disk_count,
                    wait=True)
            else:
                self.vm.attach_cloud_disks(disk_id=disk_id, dev=dev, wait=True)
        vols = self.vm.query_cloud_disks(scsi=True)
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower().replace('_', '-'), u"in-use",
                             "Disk status is not in-use")
        self.vm.start(wait=True)
        self.session.connect(timeout=connect_timeout)
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output,
            "Start VM error: output of cmd `who` unexpected -> %s" % output)
        self.session.cmd_output('sudo su -')
        if self.local_disk_type == "scsi":
            self._cloud_disk_test(initial=chr(97 + self.local_disk_count))
        else:
            self._cloud_disk_test(initial="a")

        self.log.info("Offline detach a cloud disk to VM")
        self.vm.stop(wait=True)
        self.assertTrue(self.vm.is_stopped(),
                        "Stop VM error: VM status is not SHUTOFF")
        for disk_id in self.disk_ids_scsi:
            self.vm.detach_cloud_disks(disk_id=disk_id, wait=True, scsi=True)
        vols = self.vm.query_cloud_disks(scsi=True)
        for vol in vols:
            s = vol.get("status") or vol.get("Status")
            self.assertEqual(s.lower(), u'available',
                             "Disk status is not available")
        self.vm.start(wait=True)
        self.session.connect(timeout=connect_timeout)
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output,
            "Start VM error: output of cmd `who` unexpected -> %s" % output)
        self.session.cmd_output('sudo su -')
        for i in range(1, self.cloud_disk_count + 1):
            delta = self.local_disk_count + i
            if delta <= 25:
                idx = chr(97 + delta)
            else:
                idx = 'a' + chr(97 - 1 + delta % 25)
            cmd = "fdisk -l | grep /dev/%s%s"
            output = self.session.cmd_output(cmd % (self.dev_name, idx))
            self.assertEqual(output, "",
                             "Disk not detached.\n {0}".format(output))

    def test_local_disks(self):
        self.log.info("Test local disks on VM")
        self.session.cmd_output('sudo su -')
        initial = 'b'
        self._cloud_disk_test(initial=initial,
                              disk_count=self.local_disk_count,
                              disk_type=self.local_disk_type,
                              disk_size=self.local_disk_size)

    def tearDown(self):
        self.log.info("TearDown")
class CloudinitTest(Test):
    def setUp(self):
        self.account = IbmcloudAccount(self.params)
        self.account.login()
        self.service = Service(self.params)
        self.service.target()
        self.project = self.params.get("rhel_ver", "*/VM/*")
        self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
        self.pwd = os.path.abspath(os.path.dirname(__file__))

        self.cloud = Setup(self.params, self.name, create_timeout=300)
        self.vm = self.cloud.vm
        self.ssh_wait_timeout = 600
        pre_delete = False
        pre_stop = False
        if self.name.name.endswith("test_cloudinit_login_with_publickey"):
            pre_delete = True

        self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                          pre_stop=pre_stop)

    @property
    def _postfix(self):
        from datetime import datetime
        return datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")

    def test_cloudinit_login_with_publickey(self):
        """
        :avocado: tags=tier1,cloudinit
        RHEL7-103831 - CLOUDINIT-TC: VM can successfully login
        after provisioning(with public key authentication)
        1. Create a VM with only public key authentication
        2. Login with publickey, should have sudo privilege
        """
        self.log.info(
            "RHEL7-103831 - CLOUDINIT-TC: VM can successfully login after provisioning(with public key authentication)"
        )
        self.session.connect(authentication="publickey")
        self.assertEqual(self.vm.vm_username,
                         self.session.cmd_output("whoami"),
                         "Fail to login with publickey")
        self.assertIn(
            "%s ALL=(ALL) NOPASSWD:ALL" % self.vm.vm_username,
            self.session.cmd_output(
                "sudo cat /etc/sudoers.d/90-cloud-init-users"),
            "No sudo privilege")
        # Collect /var/log/cloud-init.log and /var/log/messages
        try:
            self.session.cmd_output("mkdir -p /tmp/logs")
            self.session.cmd_output(
                "sudo cp /var/log/cloud-init.log /tmp/logs/")
            self.session.cmd_output("sudo cp /var/log/messages /tmp/logs/")
            self.session.cmd_output("sudo chmod 644 /tmp/logs/*")
            host_logpath = os.path.dirname(self.job.logfile) + "/logs"
            command("mkdir -p {}".format(host_logpath))
            self.session.copy_files_from("/tmp/logs/*", host_logpath)
        except:
            pass

    #def test_cloudinit_sdk_function(self):
    #self.vm.exists()

    def test_cloudinit_check_hostname(self):
        """
        :avocado: tags=tier1,cloudinit
        RHEL7-103833 - CLOUDINIT-TC: Successfully set VM hostname
        """
        output = self.session.cmd_output("hostname").split('.')[0]
        self.assertEqual(output, self.vm.vm_name.replace('_', '-'),
                         "The hostname is wrong")

    def test_cloudinit_check_service_status(self):
        """
        :avocado: tags=tier1,cloudinit
        RHEL-188130: WALA-TC: [Cloudinit] Check cloud-init service status
        The 4 cloud-init services status should be "active"
        """
        self.log.info(
            "RHEL-188130: WALA-TC: [Cloudinit] Check cloud-init service status"
        )
        service_list = [
            'cloud-init-local', 'cloud-init', 'cloud-config', 'cloud-final'
        ]
        for service in service_list:
            output = self.session.cmd_output(
                "sudo systemctl is-active {}".format(service))
            self.assertEqual(
                output, 'active',
                "{} status is not correct: {}".format(service, output))

    def _check_cloudinit_log(self, not_expect_msg):
        self.session.connect(timeout=self.ssh_wait_timeout)
        cmd = 'sudo cat /var/log/cloud-init.log'
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_not_kw=not_expect_msg,
                          msg='check /var/log/cloud-init.log',
                          is_get_console=False)
        if 'release 7' not in utils_lib.run_cmd(self,
                                                'sudo cat /etc/redhat-release',
                                                is_get_console=False):
            cmd = 'sudo cat /var/log/cloud-init-output.log'
            utils_lib.run_cmd(self,
                              cmd,
                              expect_ret=0,
                              expect_not_kw=not_expect_msg,
                              msg='check /var/log/cloud-init-output.log',
                              is_get_console=False)

    def test_cloudinit_check_log_no_traceback(self):
        '''
        :avocado: tags=tier2,cloudinit
        RHEL-188134 - CLOUDINIT-TC: Check no "Traceback" keyword in /var/log/cloud-init.log
        check no traceback log in cloudinit logs
        '''
        self._check_cloudinit_log("Traceback")

    def test_cloudinit_check_log_no_unexpected(self):
        '''
        :avocado: tags=tier2,cloudinit
        RHEL-188135 - CLOUDINIT-TC: Check no "unexpected" keyword in /var/log/cloud-init.log
        bz#: 1827207
        check no unexpected error log in cloudinit logs
        '''
        self._check_cloudinit_log("unexpected")

    def test_cloudinit_check_log_no_critical(self):
        '''
        :avocado: tags=tier1,cloudinit
        RHEL-188131 - CLOUDINIT-TC: Check no "CRITICAL" level message in /var/log/cloud-init.log
        bz#: 1827207
        check no critical log in cloudinit logs
        '''
        self._check_cloudinit_log("CRITICAL")

    def test_cloudinit_check_log_no_warn(self):
        '''
        :avocado: tags=tier2,cloudinit
        RHEL-188133 - CLOUDINIT-TC: Check no "WARNING" level message in /var/log/cloud-init.log
        bz#: 1821999
        check no warning log in cloudinit logs
        '''
        self._check_cloudinit_log("WARNING")

    def test_cloudinit_check_log_no_error(self):
        '''
        :avocado: tags=tier2,cloudinit
        RHEL-188132 - CLOUDINIT-TC: Check no "ERROR" level message in /var/log/cloud-init.log
        bz#: 1821999
        check no error log in cloudinit logs
        '''
        self._check_cloudinit_log("ERROR")

    def tearDown(self):
        self.account.logout()
Esempio n. 21
0
 def setUp(self):
     account = AzureAccount(self.params)
     account.login()
     self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
     self.project = self.params.get("rhel_ver", "*/VM/*")
     if self.case_short_name == "test_connectivity_check":
         if LooseVersion(self.project) >= LooseVersion("8.0"):
             self.cancel("RHEL-8 doesn't have network service. Skip.")
     cloud = Setup(self.params, self.name)
     self.vm = cloud.vm
     if self.case_short_name == "test_provision_vm_with_multiple_nics":
         self.vm.vm_name += "2nics"
         if self.vm.exists():
             self.vm.delete()
         publicip_name = self.vm.vm_name + "publicip"
         publicip = AzurePublicIP(self.params, name=publicip_name)
         if not publicip.exists():
             publicip.create()
         nic_name_list = []
         for n in range(0, 2):
             nic_name = "{}nic{}".format(self.vm.vm_name, n)
             subnet = self.vm.subnet if n == 0 else self.vm.subnet + str(n)
             n_publicip = publicip_name if n == 0 else None
             nic = AzureNIC(self.params,
                            name=nic_name,
                            subnet=subnet,
                            vnet=self.vm.vnet_name,
                            publicip=n_publicip)
             if not nic.exists():
                 nic.create()
             nic_name_list.append(nic_name)
         self.vm.nics = ' '.join(nic_name_list)
         self.session = cloud.init_session()
         return
     if self.case_short_name == "test_provision_vm_with_sriov_nic":
         self.vm.vm_name += "sriov"
         if self.vm.exists():
             self.vm.delete()
         publicip_name = self.vm.vm_name + "publicip"
         publicip = AzurePublicIP(self.params, name=publicip_name)
         if not publicip.exists():
             publicip.create()
         self.vm.nics = "{}nic".format(self.vm.vm_name)
         nic = AzureNIC(self.params,
                        name=self.vm.nics,
                        subnet=self.vm.subnet,
                        vnet=self.vm.vnet_name,
                        publicip=publicip_name,
                        sriov=True)
         if not nic.exists():
             nic.create()
         self.session = cloud.init_session()
         self.vm.size = "Standard_D3_v2"
         return
     if self.name.name.endswith("test_provision_vm_with_ipv6"):
         self.vm.vm_name += "ipv6"
         # if self.vm.exists():
         #     self.vm.delete()
         publicip_name = self.vm.vm_name + "publicip"
         publicip = AzurePublicIP(self.params,
                                  name=publicip_name)
         if not publicip.exists():
             publicip.create()
         self.vm.nics = "{}nic".format(self.vm.vm_name)
         nic = AzureNIC(self.params,
                        name=self.vm.nics,
                        subnet=self.vm.subnet,
                        vnet=self.vm.vnet_name,
                        publicip=publicip_name)
         if not nic.exists():
             nic.create()
         ipv6_config = AzureNicIpConfig(self.params,
                                        name=self.vm.nics+"ipv6",
                                        nic_name=self.vm.nics,
                                        ip_version="IPv6")
         if not ipv6_config.exists():
             ipv6_config.create()
         self.session = cloud.init_session()
         return
     self.session = cloud.init_vm()
     self.session.cmd_output("sudo su -")
     self.username = self.vm.vm_username
class GeneralTest(Test):
    def setUp(self):
        self.cloud = Setup(self.params, self.name)
        self.vm = self.cloud.vm
        self.session = self.cloud.init_vm(pre_delete=False, pre_stop=False)
        self.rhel_ver = self.params.get('rhel_ver', '*/VM/*', '')
        self.pwd = os.path.abspath(os.path.dirname(__file__))
        self.dest_dir = "/tmp/"

    def test_validation(self):
        self.log.info("Validation test")
        # Login instnace, get CPU, memory, cpu flags, boot time. Save these
        # data and copy to host
        guest_path = self.session.cmd_output("echo $HOME") + "/workspace"
        guest_logpath = guest_path + "/log"
        host_logpath = os.path.dirname(self.job.logfile) + "/validation_data"
        self.session.cmd_output("mkdir -p {0}".format(guest_logpath))
        if "no lspci" in self.session.cmd_output("which lspci"):
            self.session.copy_files_to(
                local_path="{0}/../../data/openstack/pciutils*".format(
                    self.pwd),
                remote_path=guest_path)
            self.session.cmd_output(
                "sudo rpm -ivh {0}/pciutils*".format(guest_path))
        flavor = self.vm.flavor
        self.session.copy_files_to(
            local_path="{0}/../../scripts/test_validation_*.sh".format(
                self.pwd),
            remote_path=guest_path)
        self.log.info("Flavor: %s" % flavor)
        # Cleanup $HOME/workspace/log
        self.session.cmd_output("rm -rf {0}".format(guest_logpath))
        # Collect cpu/memory/cpu flags
        self.session.cmd_output(
            "bash {0}/test_validation_resource_information.sh "
            "{1}".format(guest_path, flavor),
            timeout=180)
        # If RHEL-7 and future versions, collect bootup time
        if int(self.rhel_ver.split('.')[0]) >= 7:
            # Collect bootup time after created
            self.session.cmd_output("bash {0}/test_validation_boot_time.sh "
                                    "{1} create".format(guest_path, flavor))
            # Reboot VM and then collect bootup time after rebooting
            self.session.send_line("sudo reboot")
            time.sleep(10)
            self.session.connect()
            self.session.cmd_output(
                "bash {0}/test_validation_boot_time.sh,timeout=120 "
                "{1} reboot".format(guest_path, flavor))
        # Copy dmesg.log to workspace
        self.session.cmd_output("cp /var/log/dmesg {0}/dmesg_{1}.log".format(
            guest_logpath, flavor))
        # Copy logs to host
        process.run(cmd="mkdir -p " + host_logpath,
                    timeout=20,
                    verbose=False,
                    ignore_status=False,
                    shell=True)
        self.log.debug("Copying logs to host...")
        self.session.copy_files_from(
            local_path=host_logpath,
            remote_path="{0}/*.log".format(guest_logpath))
        self.log.info("Copy logs to {0} successfully.".format(host_logpath))
        # Cleanup scripts and logs
        self.session.cmd_output("rm -rf " + guest_path)

    def test_check_boot_message(self):
        self.log.info("Check the boot messages with no errors")
        if self.rhel_ver.split('.')[0] == '8':
            data_file = "journalctl.el8.lst"
        elif self.rhel_ver.split('.')[0] == '7':
            data_file = "var.log.message.el7.lst"
        elif self.rhel_ver.split('.')[0] == '6':
            data_file = "var.log.message.el6.lst"
        else:
            self.fail("RHEL version is unknown: %s" % self.rhel_ver)
        self.session.copy_data_to_guest(self.cloud.cloud_provider, data_file)
        if float(self.rhel_ver) >= 8.0:
            cmd = "sudo journalctl -b | grep -iE '(error|fail)' \
| grep -vFf '%s'" % os.path.join(self.dest_dir, data_file)
        else:
            cmd = "sudo cat /var/log/messages | grep -iE '(error|fail)' \
| grep -vFf '%s'" % os.path.join(self.dest_dir, data_file)
        output = self.session.cmd_output(cmd)
        self.assertEqual(
            "", output,
            "There're error logs in /var/log/messages:\n%s" % output)

    # RHBZ#1006883
    def test_check_fstab(self):
        fs_spec = ""
        output = self.session.cmd_output("cat /etc/fstab")
        for line in output.splitlines():
            li = line.strip()
            if not li.startswith("#") and li:
                if li.split()[1] == '/':
                    fs_spec = li.split()[0]
        self.assertTrue(
            re.match(r"UUID=\w{8}-\w{4}-\w{4}-\w{4}-\w{8}", fs_spec),
            "rootfs in /etc/fstab is not present by UUID -> %s" % fs_spec)

    # RHBZ#1673094
    def test_check_partitions(self):
        output = self.session.cmd_output("sudo lsblk")
        count = 0
        for line in output.splitlines():
            if re.search("vda", line):
                count = count + 1
        expected_partitions = 2
        if self.vm.arch == "ppc64le":
            expected_partitions = 3
        if self.vm.arch == "aarch64":
            expected_partitions = 3
        self.assertEqual(expected_partitions, count,
                         "More than one partition exists:\n %s" % output)

    # RHBZ#1032169
    def test_check_bash_prompt(self):
        output = self.session.cmd_output("echo $PS1")
        self.assertEqual(output, r"[\u@\h \W]\$",
                         "Bash prompt is not OK -> %s" % output)

    # RHBZ#970820 cloud-init
    # RHBZ#993027 heat-cfntools
    # ovirt-guest-agent-common
    # cloud-utils-growpart
    def test_check_installed_packages(self):
        packages_el8 = ['cloud-init', 'cloud-utils-growpart']
        packages_el7 = [
            'cloud-init', 'heat-cfntools', 'ovirt-guest-agent-common',
            'cloud-utils-growpart'
        ]
        packages_el6 = [
            'cloud-init', 'heat-cfntools', 'rhevm-guest-agent-common',
            'cloud-utils-growpart'
        ]
        packages = []
        if self.vm.image_name[17] == '8':
            packages = packages_el8
        if self.vm.image_name[17] == '7':
            packages = packages_el7
        if self.vm.image_name[17] == '6':
            packages = packages_el6
        cmd = "rpm -qa --qf '%{NAME}\\n'"
        output = self.session.cmd_output(cmd)
        for package in packages:
            self.assertIn(package, output, "Missing package -> %s" % package)

    # RHBZ#1028889
    def test_check_redhat_release(self):
        output = self.session.cmd_output("cat /etc/redhat-release")
        match = re.search(r"\d\.?\d+", output).group(0)
        self.assertEqual(
            self.rhel_ver, match,
            "Release version mismatch in /etc/redhat-release -> %s" % output)
        if self.rhel_ver.split('.')[0] == '8':
            output = self.session.cmd_output("rpm -q redhat-release")
            match = re.search(r"redhat-release-(\d\.?\d+)", output).group(1)
        if self.rhel_ver.split('.')[0] == '7':
            output = self.session.cmd_output("rpm -q redhat-release-server")
            match = re.search(r"redhat-release-server-(\d\.?\d+)",
                              output).group(1)
        if self.rhel_ver.split('.')[0] == '6':
            output = self.session.cmd_output("rpm -q redhat-release-server")
            match = re.search(r"redhat-release-server-6Server-(\d\.?\d+)",
                              output).group(1)

        self.assertEqual(
            self.rhel_ver, match,
            "Release version mismatch on redhat-release-server -> %s" % output)

    # RHBZ#1045242
    def test_check_size_of_rootfs(self):
        fs_size = 0
        output = self.session.cmd_output("df -h")
        for line in output.splitlines():
            if line.split()[5] == '/':
                fs_size = float(
                    utils_misc.normalize_data_size(line.split()[1],
                                                   order_magnitude='G'))
        vm_size = float(
            utils_misc.normalize_data_size(self.vm.size, order_magnitude='G'))
        self.assertTrue(
            vm_size * 0.9 <= fs_size <= vm_size,
            "Size of rootfs is lower than 90%% of disk size -> %s" % fs_size)

    # RHBZ#1032175
    def test_check_password_hash_for_root(self):
        sp_pwdp = ""
        output = self.session.cmd_output("sudo cat /etc/shadow")
        for line in output.splitlines():
            if line.split(':')[0] == "root":
                sp_pwdp = line.split(':')[1]
        self.assertEqual(
            "!!", sp_pwdp,
            "Encrypted password for root in /etc/shadow is bad -> %s" %
            sp_pwdp)

    def test_check_selinux_status(self):
        self.assertEqual(self.session.cmd_output("getenforce"), "Enforcing",
                         "SELinux is not enforcing")
        output = self.session.cmd_output(
            "cat /etc/selinux/config|grep SELINUX=")
        keyword = ""
        for line in output.splitlines():
            if '#' not in line:
                keyword = line.split('=')[1]
        self.assertEqual(keyword, "enforcing", "SELinux is not enforcing")

    def test_check_selinux_contexts(self):
        self.log.info(
            "Check all files confiled by SELinux has the correct contexts")
        selinux_now = self.dest_dir + "selinux.now"
        if self.rhel_ver.split('.')[0] == '8':
            data_file = "selinux.el8.lst"
        elif self.rhel_ver.split('.')[0] == '7':
            data_file = "selinux.el7.lst"
        elif self.rhel_ver.split('.')[0] == '6':
            data_file = "selinux.el6.lst"
        else:
            self.fail("RHEL version is unknown: %s" % self.rhel_ver)
        self.session.copy_data_to_guest(self.cloud.cloud_provider, data_file)
        self.session.cmd_output("rm -f {0}".format(selinux_now))
        cmd = "sudo restorecon -R -v -n / -e /mnt -e /proc -e /sys \
-e /tmp -e /var/tmp -e /run >{0}".format(selinux_now)
        self.session.cmd_output(cmd, timeout=60)
        self.session.cmd_output("grep -vxFf {0} {1} > /tmp/cmp".format(
            os.path.join(self.dest_dir, data_file), selinux_now))
        output = self.session.cmd_output("cat /tmp/cmp")
        self.assertEqual(
            "", output,
            "Found extra SELinux contexts have been modified:\n%s" % output)

    def test_check_files_controlled_by_rpm(self):
        self.log.info(
            "Check all files on the disk is controlled by rpm packages")
        utils_script = "rogue.sh"
        if self.rhel_ver.split('.')[0] == '8':
            data_file = "rogue.el8.lst"
        elif self.rhel_ver.split('.')[0] == '7':
            data_file = "rogue.el7.lst"
        elif self.rhel_ver.split('.')[0] == '6':
            data_file = "rogue.el6.lst"
        else:
            self.fail("RHEL version is unknown: %s" % self.rhel_ver)
        self.session.copy_scripts_to_guest(utils_script)
        self.session.copy_data_to_guest(self.cloud.cloud_provider, data_file)
        self.session.cmd_output("sudo sh -c 'chmod 755 %s && %s'" %
                                (os.path.join(self.dest_dir, utils_script),
                                 os.path.join(self.dest_dir, utils_script)),
                                timeout=720)
        output = self.session.cmd_output("grep -vxFf %s %s" % (os.path.join(
            self.dest_dir, data_file), os.path.join(self.dest_dir, "rogue")))
        self.assertEqual(
            "", output,
            "Found extra files not controlled by rpm:\n%s" % output)

    def test_check_file_content_integrity(self):
        self.log.info("Check file content integrity by rpm -Va")
        if self.rhel_ver.split('.')[0] == '8':
            data_file = "rpm_va.el8.lst"
        elif self.rhel_ver.split('.')[0] == '7':
            data_file = "rpm_va.el7.lst"
        elif self.rhel_ver.split('.')[0] == '6':
            data_file = "rpm_va.el6.lst"
        else:
            self.fail("RHEL version is unknown: %s" % self.rhel_ver)
        self.session.cmd_output("sudo prelink -amR")
        self.session.copy_data_to_guest(self.cloud.cloud_provider, data_file)

        # Workaround for block issue BZ1658092
        # cmd = "sudo rpm -Va | grep -vxFf {0} | grep -Ev \
        # '/boot/initramfs|/boot/System.map'"
        self.log.info('WORKAROUND: block issue BZ1658092, \
will not check kernel-devel package.')
        cmd = "sudo rpm -V `rpm -qa | grep -v kernel-devel` | grep -Ev \
'/boot/initramfs|/boot/System.map' | grep -vxFf {0}"

        output = self.session.cmd_output(cmd.format(
            os.path.join(self.dest_dir, data_file)),
                                         timeout=240)
        self.assertEqual("", output,
                         "Found extra files has been modified:\n%s" % output)
        # Continue to compare every single file under local
        # "data/vendor/file_cmp"
        root_path = os.path.dirname(os.path.dirname(self.pwd))
        src_dir = os.path.join(os.path.join(root_path, "data"),
                               self.cloud.cloud_provider)
        if os.path.isdir(os.path.join(src_dir, "file_cmp")):
            for f in os.listdir(os.path.join(src_dir, "file_cmp")):
                m = re.match(r"^(%.*%)(.*)\.el(\d)$", f)
                if m:
                    f_name = m.group(2)
                    f_ver = m.group(3)
                    f_name_l = m.group(1).replace('%', '/') + f_name
                    if self.rhel_ver.split('.')[0] != f_ver:
                        continue
                else:
                    m = re.match(r"^(%.*%)(.*)$", f)
                    f_name = m.group(2)
                    f_name_l = f.replace('%', '/')
                self.session.copy_files_to(
                    os.path.join(os.path.join(src_dir, "file_cmp"), f),
                    "/tmp/" + f_name)
                cmd = "grep -xv '^[[:space:]][[:space:]]*$' %s | diff \
-wB - %s" % (f_name_l, "/tmp/" + f_name)
                output = self.session.cmd_output(cmd)
                self.assertEqual(
                    "", output,
                    "Found %s has been modified:\n%s" % (f_name, output))

    # RHBZ#1144155
    def test_check_boot_cmdline_parameters(self):
        root_path = os.path.dirname(os.path.dirname(self.pwd))
        src_dir = os.path.join(os.path.join(root_path, "data"),
                               self.cloud.cloud_provider)
        data_file = "cmdline_params.lst"
        lines = filter(None,
                       (line.rstrip()
                        for line in open(os.path.join(src_dir, data_file))))
        output = self.session.cmd_output("cat /proc/cmdline")
        for line in lines:
            self.assertIn(line, output, "%s is not in boot parameters" % line)

    # RHBZ#1033780
    def test_check_product_certificate(self):
        output_tmp = self.session.cmd_output(
            "rpm -qf /etc/pki/product-default/230.pem")
        htb = rhel = False
        if output_tmp.startswith("redhat-release"):
            htb = True

        if self.vm.arch == "x86_64" and self.rhel_ver.split('.')[0] != '8':
            output = self.session.cmd_output(
                "rpm -qf /etc/pki/product-default/69.pem")
            if output.startswith("redhat-release"):
                rhel = True

        if self.vm.arch == "x86_64" and self.rhel_ver.split('.')[0] == '8':
            output = self.session.cmd_output(
                "rpm -qf /etc/pki/product-default/479.pem")
            if output.startswith("redhat-release"):
                rhel = True

        if self.vm.arch == "ppc64le":
            output = self.session.cmd_output(
                "rpm -qf /etc/pki/product-default/279.pem")
            if output.startswith("redhat-release"):
                rhel = True

        if htb and not rhel:
            self.error(
                "69.pem/279.pem is not found but 230.pem is found, if current "
                "phase is snapshot, probably it's OK due to the HTB program")
        if not htb and not rhel:
            self.fail("Product certificate is not found")

    def test_check_package_signature(self):
        data_file = "rpm_sign.lst"
        self.session.copy_data_to_guest(self.cloud.cloud_provider, data_file)
        cmd = "rpm -qa --qf '%{name}-%{version}-%{release}.%{arch} \
(%{SIGPGP:pgpsig})\n'|grep -v 'Key ID'"

        output = self.session.cmd_output(
            cmd + "|grep -vFf %s" % os.path.join(self.dest_dir, data_file))

        # cheshi, newline characters are not supported in aexpect, so need a
        # workaroud here
        if output.find('|grep -vFf /tmp/rpm_sign.lst') != -1:
            output = "".join(output.splitlines(True)[1:])

        self.assertEqual(
            "", output,
            "There're packages that are not signed.\n {0}".format(output))

    def test_check_hostname(self):
        output = self.session.cmd_output("hostname").split('.')[0]
        self.assertEqual(output, self.vm.vm_name.replace('_', '-'),
                         "The hostname is wrong")

    # RHBZ#974554
    def test_check_services_status(self):
        status, _ = self.session.cmd_status_output("service tuned status")
        self.assertEqual(0, status, "Tuned service abnormal")
        output = ""
        if self.vm.image_name[17] == '8' or self.vm.image_name[17] == '7':
            output = self.session.cmd_output("cat /etc/tuned/active_profile")
        if self.vm.image_name[17] == '6':
            output = self.session.cmd_output(
                "cat /etc/tune-profiles/active-profile")
        self.assertEqual("virtual-guest", output, "Tuned service abnormal")

    # RHBZ#983611
    def test_check_network_cfg(self):
        flag = False
        output = self.session.cmd_output("cat /etc/sysconfig/network")
        for line in output.splitlines():
            if line == "NOZEROCONF=yes":
                flag = True
        if self.rhel_ver.split('.')[0] == '6':
            self.assertTrue(flag,
                            "NOZEROCONF=yes not in /etc/sysconfig/network")

    # RHBZ#1011013
    def test_check_persistent_dhclient(self):
        flag = False
        output = self.session.cmd_output(
            "ps -ef | grep dhclient | grep -v grep")
        for i in output.split()[7:]:
            if i == "-1":
                flag = True
        self.assertFalse(
            flag,
            "Found '-1     Try to get a lease once.' in dhclient args -> %s" %
            output)

    def test_check_virt_what(self):
        self.log.info("Check the virt-what")
        if self.vm.flavor == 'ecs.ebmg5s.24xlarge':
            self.cancel("Alibaba baremetal, skip this case.")
        virt_type = self.params.get('virt', '*/{0}/*'.format(self.vm.flavor),
                                    'kvm')
        self.assertIn(virt_type, self.session.cmd_output("sudo virt-what"),
                      "virt-what result is not %s" % virt_type)

    def test_check_pv_drivers(self):
        self.log.info("Check pv drivers in VM")
        virt_type = self.params.get('virt', '*/{0}/*'.format(self.vm.flavor),
                                    'kvm')
        if virt_type == 'xen':
            module_list = ["xen_blkfront", "xen_netfront"]
            output = self.session.cmd_output("lsmod|grep 'xen'")
        elif virt_type == 'kvm':
            module_list = ["virtio_net", "virtio_blk"]
            output = self.session.cmd_output("lsmod|grep 'virtio'")
        else:
            self.fail("Virt is not xen or kvm: %s" % virt_type)
        for module in module_list:
            self.assertIn(module, output, "%s module doesn't exist" % module)

    def test_check_subscription_manager(self):
        pass

    def test_vm_check(self):
        """Test case for avocado framework.

        case_name:
            Get VM Check results. (Just collection)

        description:
            Gathering basic information from the instance.

        bugzilla_id:
            n/a

        polarion_id:
            n/a

        maintainer:
            [email protected]

        case_priority:
            0

        case_component:
            checkup

        key_steps:
            1. Deliver vm_check.sh to the instance
            2. Run vm_check.sh to collect information
            3. Deliver the test results to local

        pass_criteria:
            n/a
        """
        self.log.info("VM Check")

        guest_path = self.session.cmd_output("echo $HOME") + "/workspace"
        guest_logpath = guest_path + "/log"
        host_logpath = os.path.dirname(self.job.logfile) + "/validation_data"
        self.session.cmd_output("mkdir -p {0}".format(guest_logpath))

        flavor = self.vm.flavor
        self.session.copy_files_to(
            local_path="{0}/../../scripts/vm_check.sh".format(self.pwd),
            remote_path=guest_path)
        self.log.info("Flavor: %s" % flavor)

        # Cleanup $HOME/workspace/log
        self.session.cmd_output("rm -rf {0}/*".format(guest_logpath))

        # Run vm_check.sh
        self.session.cmd_output("bash {0}/vm_check.sh".format(guest_path),
                                timeout=300)

        # Tar logs
        # self.session.cmd_output(
        #     "cd {0} && tar -zcf vm_check_results_{1}.tar.gz .".format(
        #         guest_logpath, flavor))

        # Copy logs to host
        process.run(cmd="mkdir -p " + host_logpath,
                    timeout=20,
                    verbose=False,
                    ignore_status=False,
                    shell=True)
        self.log.debug("Copying logs to host...")
        self.session.copy_files_from(local_path=host_logpath,
                                     remote_path="{0}/*".format(guest_logpath),
                                     timeout=600)
        self.log.info("Copy logs to {0} successfully.".format(host_logpath))

        # Cleanup scripts and logs
        self.session.cmd_output("rm -rf " + guest_path)

    def test_collect_metadata(self):
        """Test case for avocado framework.

        case_name:
            Collect metadata from cloud provider. (Just collection)

        description:
            Gathering the metadata from cloud providers's metadata server
            inside instance.

        bugzilla_id:
            n/a

        polarion_id:
            n/a

        maintainer:
            [email protected]

        case_priority:
            0

        case_component:
            checkup

        key_steps:
            1. Deliver traverse_metadata.sh to the instance
            2. Run traverse_metadata.sh to collect information
            3. Deliver the test results to local

        pass_criteria:
            n/a
        """
        self.log.info("Collect Metadata")

        guest_path = self.session.cmd_output("echo $HOME") + "/workspace"
        guest_logpath = guest_path + "/log"
        host_logpath = os.path.dirname(self.job.logfile) + "/validation_data"
        self.session.cmd_output("mkdir -p {0}".format(guest_logpath))

        flavor = self.vm.flavor
        self.session.copy_files_to(
            local_path="{0}/../../scripts/traverse_metadata.sh".format(
                self.pwd),
            remote_path=guest_path)
        self.log.info("Flavor: %s" % flavor)

        # Cleanup $HOME/workspace/log
        self.session.cmd_output("rm -rf {0}/*".format(guest_logpath))

        # Run traverse_metadata.sh
        self.session.cmd_output("bash {0}/traverse_metadata.sh \
> {1}/traverse_metadata_{2}_$(date +%Y%m%d%H%M%S).log".format(
            guest_path, guest_logpath, flavor))

        # Copy logs to host
        process.run(cmd="mkdir -p " + host_logpath,
                    timeout=20,
                    verbose=False,
                    ignore_status=False,
                    shell=True)
        self.log.debug("Copying logs to host...")
        self.session.copy_files_from(local_path=host_logpath,
                                     remote_path="{0}/*".format(guest_logpath))
        self.log.info("Copy logs to {0} successfully.".format(host_logpath))

        # Cleanup scripts and logs
        self.session.cmd_output("rm -rf " + guest_path)

    def test_check_cpu_count(self):
        """Test case for avocado framework.

        case_name:
            Check CPU count

        description:
            Check the CPU# inside the instance.

        bugzilla_id:
            n/a

        polarion_id:
            n/a

        maintainer:
            [email protected]

        case_priority:
            0

        case_component:
            checkup

        pass_criteria:
            n/a
        """
        guest_cpu = int(
            self.session.cmd_output(
                "lscpu | grep ^CPU.s.: | awk '{print $2}'"))
        expected_cpu = self.vm.cpu

        self.assertEqual(
            guest_cpu, expected_cpu,
            'CPU count is not as expect Real: {0}; Expected: {1}'.format(
                guest_cpu, expected_cpu))

    def test_check_mem_size(self):
        """Test case for avocado framework.

        case_name:
            Check memory size

        description:
            Check the memory size inside the instance.

        bugzilla_id:
            n/a

        polarion_id:
            n/a

        maintainer:
            [email protected]

        case_priority:
            0

        case_component:
            checkup

        pass_criteria:
            n/a
        """
        guest_mem = int(
            self.session.cmd_output("free -m | grep ^Mem: | awk '{print $2}'"))
        expected_mem = self.vm.memory * 1024

        self.assertAlmostEqual(
            first=guest_mem,
            second=expected_mem,
            delta=expected_mem * 0.25,
            msg="Memory Size is not as expect Real: {0}; Expected: {1}".format(
                guest_mem, expected_mem))

    def tearDown(self):
        self.session.close()
Esempio n. 23
0
class LifeCycleTest(Test):
    def setUp(self):
        self.cloud = Setup(self.params, self.name)
        self.vm = self.cloud.vm
        pre_delete = False
        pre_stop = False
        if self.name.name.endswith("test_create_vm_password"):
            if self.vm.exists():
                self.vm.delete(wait=True)
            self.session = self.cloud.init_session()
            return
        if self.name.name.endswith("test_create_vm_sshkey"):
            pre_delete = True
        if self.name.name.endswith("test_start_vm"):
            pre_stop = True
        self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                          pre_stop=pre_stop)

    # TODO Add test_modify_instance_type for Alibaba cloud
    def test_create_vm_password(self):
        import base64
        user_data = """\
#cloud-config

user: {0}
password: {1}
chpasswd: {{ expire: False }}

ssh_pwauth: 1
""".format(self.vm.vm_username, self.vm.vm_password)
        self.vm.user_data = base64.b64encode(user_data.encode())
        self.vm.keypair = None
        self.vm.create(wait=True)
        if self.vm.is_stopped():
            self.vm.start(wait=True)
        self.session.connect(authentication="password")
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output, "Create VM with password error: \
output of cmd `who` unexpected -> %s" % output)
        # Test change password for Alibaba cloud
        if self.cloud.cloud_provider == 'alibaba':
            self.vm.vm_password = "******"
            self.vm.reset_password(new_password=self.vm.vm_password)
            self.vm.reboot(wait=True)
            self.session = self.cloud.init_session()
            self.session.connect(authentication="password")
            output = self.session.cmd_output('whoami')
            self.assertEqual(
                self.vm.vm_username, output, "Start VM error after change \
password: output of cmd `who` unexpected -> %s" % output)

    def test_create_vm_sshkey(self):
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output,
            "Create VM with sshkey error: output of cmd `who` unexpected -> %s"
            % output)

    def test_start_vm(self):
        self.vm.start(wait=True)
        self.session.connect(timeout=300)
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output,
            "Start VM error: output of cmd `who` unexpected -> %s" % output)

    def test_pause_unpause_vm(self):
        self.vm.pause(wait=True)
        self.vm.unpause(wait=True)
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output,
            "Pause/Unpause VM error: output of cmd `who` unexpected -> %s" %
            output)

    def test_reboot_vm(self):
        before = self.session.cmd_output('last reboot')
        self.vm.reboot(wait=True)
        self.session.connect(timeout=300)
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output,
            "Reboot VM error: output of cmd `who` unexpected -> %s" % output)
        after = self.session.cmd_output('last reboot')
        self.assertNotEqual(
            before, after,
            "Reboot VM error: before -> %s; after -> %s" % (before, after))

    def test_reboot_inside_vm(self):
        before = self.session.cmd_output('last reboot')
        self.session.send_line('sudo reboot')
        time.sleep(10)
        self.session.connect(timeout=300)
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output,
            "Reboot VM error: output of cmd `who` unexpected -> %s" % output)
        after = self.session.cmd_output('last reboot')
        self.assertNotEqual(
            before, after,
            "Reboot VM error: before -> %s; after -> %s" % (before, after))

    def test_stop_vm(self):
        self.vm.stop(wait=True)
        self.assertTrue(self.vm.is_stopped(),
                        "Stop VM error: VM status is not SHUTOFF")

    def test_delete_vm(self):
        self.vm.delete(wait=True)
        self.assertFalse(self.vm.exists(), "Delete VM error: VM still exists")

    def tearDown(self):
        if self.name.name.endswith("create_vm_password"):
            self.vm.delete(wait=True)
        self.session.close()
class NetworkTest(Test):
    def setUp(self):
        self.cloud = Setup(self.params, self.name)
        self.vm = self.cloud.vm
        self.pwd = os.path.abspath(os.path.dirname(__file__))
        pre_delete = False
        pre_stop = False
        if self.name.name.endswith("test_coldplug_nics"):
            pre_stop = True
        if not self.vm.nic_count or self.vm.nic_count < 2:
            self.cancel("No nic count. Skip this case.")
        self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                          pre_stop=pre_stop)
        if self.name.name.endswith("test_hotplug_nics") or \
           self.name.name.endswith("test_coldplug_nics"):
            self.cloud.init_nics(self.vm.nic_count)
            self.primary_nic_id = self.cloud.primary_nic_id

    def test_hotplug_nics(self):
        """
        1. Start VM. Attach max NICs and check all can get IP
        2. Add 1 more NIC. Should not be added
        3. Detach all NICs. Device should be removed inside guest
        """
        # 1. Attach max NICs and check all can get IP
        count = self.vm.nic_count - 1
        self.log.info("Step 1: Attach %s NICs." % count)
        self.vm.attach_nics(count, wait=True)
        self.assertEqual(len(self.vm.query_nics()), count + 1,
                         "Total NICs number is not %d" % (count + 1))

        guest_path = self.session.cmd_output("echo $HOME") + "/workspace"
        self.session.cmd_output("mkdir -p {0}".format(guest_path))

        self.session.copy_files_to(
            local_path="{0}/../../scripts/aliyun_enable_nics.sh".format(
                self.pwd),
            remote_path=guest_path)

        self.log.info("NIC Count: %s" % count)
        self.session.cmd_output("bash {0}/aliyun_enable_nics.sh {1}".format(
            guest_path, count),
                                timeout=180)

        self.session.cmd_output('ip addr', timeout=30)
        time.sleep(60)  # waiting for dhcp works
        self.session.cmd_output('ip addr', timeout=30)

        time.sleep(10)
        outside_ips = [
            str(self.vm.get_private_ip_address(nic))
            for nic in self.vm.query_nics()
        ]
        inside_ips = self.session.cmd_output("ip addr")
        for outside_ip in outside_ips:
            self.assertIn(
                outside_ip, inside_ips, "Some of NICs are not available. "
                "Outside IP: %s Inside IPs:\n %s" % (outside_ip, inside_ips))

        # 2. Add 1 more NIC. Should not be added
        self.log.info("Step 2: Add 1 more NIC, should not be added.")
        self.vm.attach_nics(1)
        self.assertEqual(
            len(self.vm.query_nics()), count + 1,
            "NICs number should not greater than %d" % (count + 1))

        # 3. Detach all NICs. NICs should be removed inside guest
        self.log.info("Step 3: Detach all NICs")

        self.session.copy_files_to(
            local_path="{0}/../../scripts/aliyun_disable_nics.sh".format(
                self.pwd),
            remote_path=guest_path)

        self.log.info("NIC Count: %s" % count)
        self.session.cmd_output("bash {0}/aliyun_disable_nics.sh {1}".format(
            guest_path, count),
                                timeout=180)

        nic_ids = [
            self.vm.get_nic_id(nic) for nic in self.vm.query_nics()
            if self.vm.get_nic_id(nic) != self.primary_nic_id
        ]
        self.vm.detach_nics(nic_ids, wait=True)
        self.assertEqual(len(self.vm.query_nics()), 1,
                         "Fail to remove all NICs outside guest")
        time.sleep(5)
        self.assertEqual(
            self.session.cmd_output(
                "ip addr | grep -e 'eth.*mtu' -e 'ens.*mtu' | wc -l"), "1",
            "Fail to remove all NICs inside guest")

        self.log.info("Detach all NICs successfully")

    def test_coldplug_nics(self):
        """
        1. Stop VM. Attach max NICs. Start VM and check all can get IP
        2. Stop VM. Add 1 more NIC. Should not be added
        3. Stop VM. Detach all NICs. Device should be removed inside guest
        """
        # Set timeout for Alibaba baremetal
        if 'ecs.ebm' in self.vm.flavor:
            connect_timeout = 600
        else:
            connect_timeout = 120

        # 1. Attach max NICs and check all can get IP
        count = self.vm.nic_count - 1
        self.log.info("Step 1: Attach %s NICs." % count)
        self.vm.attach_nics(count, wait=True)
        self.assertEqual(len(self.vm.query_nics()), count + 1,
                         "Total NICs number is not %d" % (count + 1))
        self.vm.start(wait=True)
        self.session.connect(timeout=connect_timeout)

        guest_path = self.session.cmd_output("echo $HOME") + "/workspace"
        self.session.cmd_output("mkdir -p {0}".format(guest_path))

        self.session.copy_files_to(
            local_path="{0}/../../scripts/aliyun_enable_nics.sh".format(
                self.pwd),
            remote_path=guest_path)

        self.log.info("NIC Count: %s" % count)
        self.session.cmd_output("bash {0}/aliyun_enable_nics.sh {1}".format(
            guest_path, count),
                                timeout=180)

        time.sleep(10)
        self.session.cmd_output('ip addr', timeout=30)

        outside_ips = [
            self.vm.get_private_ip_address(nic)
            for nic in self.vm.query_nics()
        ]
        inside_ips = self.session.cmd_output("ip addr")
        for outside_ip in outside_ips:
            self.assertIn(
                outside_ip, inside_ips,
                "Some of NICs are not available. Inside IPs: %s" % inside_ips)

        # 2. Add 1 more NIC. Should not be added
        self.log.info("Step 2: Add 1 more NIC, should not be added.")
        self.vm.stop(wait=True)
        self.assertTrue(self.vm.is_stopped(), "Fail to stop VM")
        self.vm.attach_nics(1)
        self.assertEqual(
            len(self.vm.query_nics()), count + 1,
            "NICs number should not greater than %d" % (count + 1))

        # 3. Detach all NICs. NICs should be removed inside guest
        self.log.info("Step 3: Detach all NICs.")
        nic_ids = [
            self.vm.get_nic_id(nic) for nic in self.vm.query_nics()
            if self.vm.get_nic_id(nic) != self.primary_nic_id
        ]
        self.vm.detach_nics(nic_ids, wait=True)
        self.assertEqual(len(self.vm.query_nics()), 1,
                         "Fail to remove all NICs outside guest")
        self.vm.start(wait=True)
        self.assertTrue(self.vm.is_started(), "Fail to start VM")
        self.session.connect(timeout=connect_timeout)
        guest_cmd = "ip addr | grep -e 'eth.*mtu' -e 'ens.*mtu' | wc -l"

        self.assertEqual(self.session.cmd_output(guest_cmd), "1",
                         "Fail to remove all NICs inside guest")
        self.log.info("Detach all NICs successfully")

    def tearDown(self):
        if self.name.name.endswith("test_hotplug_nics") or \
           self.name.name.endswith("test_coldplug_nics"):
            guest_cmd = """
primary_nic=$(ifconfig | grep "flags=.*\<UP\>" | cut -d: -f1 | \
grep -e eth -e ens | head -n 1)
device_name=$(echo $primary_nic | tr -d '[:digit:]')
ls /etc/sysconfig/network-scripts/ifcfg-${device_name}* | \
grep -v ${primary_nic} | xargs sudo rm -f
"""
            self.session.cmd_output(guest_cmd, timeout=180)
        self.session.close()
Esempio n. 25
0
class CloudinitTest(Test):
    def setUp(self):
        self.cloud = Setup(self.params, self.name, create_timeout=300)
        self.vm = self.cloud.vm
        self.ssh_wait_timeout = 600
        pre_delete = False
        pre_stop = False
        if self.name.name.endswith(
                "test_cloudinit_create_vm_login_repeatedly"):
            return
        if self.name.name.endswith("test_cloudinit_login_with_password"):
            if self.vm.exists():
                self.vm.delete(wait=True)
            self.session = self.cloud.init_session()
            return
        if self.name.name.endswith("test_cloudinit_login_with_publickey"):
            pre_delete = True
        self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                          pre_stop=pre_stop)

    def test_cloudinit_login_with_publickey(self):
        """
        :avocado: tags=tier1,cloudinit
        RHEL7-103831 - CLOUDINIT-TC: VM can successfully login
        after provisioning(with public key authentication)
        1. Create a VM with only public key authentication
        2. Login with publickey, should have sudo privilege
        """
        output = self.session.cmd_output('whoami')
        self.assertEqual(
            self.vm.vm_username, output,
            "Login VM with publickey error: output of cmd `whoami` unexpected -> %s"
            % output)
        self.assertIn(
            "%s ALL=(ALL) NOPASSWD:ALL" % self.vm.vm_username,
            self.session.cmd_output(
                "sudo cat /etc/sudoers.d/90-cloud-init-users"),
            "No sudo privilege")

    def test_cloudinit_check_hostname(self):
        """
        :avocado: tags=tier1,cloudinit
        RHEL7-103833 - CLOUDINIT-TC: Successfully set VM hostname
        """
        output = self.session.cmd_output("hostname").split('.')[0]
        self.assertEqual(output, self.vm.vm_name.replace('_', '-'),
                         "The hostname is wrong")

    def test_cloudinit_check_services_status(self):
        '''
        :avocado: tags=tier1,cloudinit
        RHEL-188130 - CLOUDINIT-TC: Check cloud-init services status
        check if four cloud-init services are active
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        utils_lib.run_cmd(self,
                          'cloud-init -v',
                          msg='Get cloud-init version',
                          is_get_console=False)
        cmd = "sudo systemctl is-active cloud-init-local.service"
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_kw='active',
                          is_get_console=False)
        cmd = "sudo systemctl is-active cloud-init.service"
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_kw='active',
                          is_get_console=False)
        cmd = "sudo systemctl is-active cloud-config.service"
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_kw='active',
                          is_get_console=False)
        cmd = "sudo systemctl is-active cloud-final.service"
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_kw='active',
                          is_get_console=False)

    def test_cloudinit_check_log_no_traceback(self):
        '''
        :avocado: tags=tier2,cloudinit
        RHEL-188134 - CLOUDINIT-TC: Check no "Traceback" keyword in /var/log/cloud-init.log
        check no traceback log in cloudinit logs
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        cmd = 'sudo cat /var/log/cloud-init.log'
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_not_kw='Traceback',
                          msg='check /var/log/cloud-init.log',
                          is_get_console=False)
        if 'release 7' not in utils_lib.run_cmd(self,
                                                'sudo cat /etc/redhat-release',
                                                is_get_console=False):
            cmd = 'sudo cat /var/log/cloud-init-output.log'
            utils_lib.run_cmd(self,
                              cmd,
                              expect_ret=0,
                              expect_not_kw='Traceback',
                              msg='check /var/log/cloud-init-output.log',
                              is_get_console=False)

    def test_cloudinit_check_log_no_unexpected(self):
        '''
        :avocado: tags=tier2,cloudinit
        RHEL-188135 - CLOUDINIT-TC: Check no "unexpected" keyword in /var/log/cloud-init.log
        bz#: 1827207
        check no unexpected error log in cloudinit logs
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        cmd = 'sudo cat /var/log/cloud-init.log'
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_not_kw='unexpected',
                          msg='check /var/log/cloud-init.log',
                          is_get_console=False)
        if 'release 7' not in utils_lib.run_cmd(self,
                                                'sudo cat /etc/redhat-release',
                                                is_get_console=False):
            cmd = 'sudo cat /var/log/cloud-init-output.log'
            utils_lib.run_cmd(self,
                              cmd,
                              expect_ret=0,
                              expect_not_kw='unexpected',
                              msg='check /var/log/cloud-init-output.log',
                              is_get_console=False)

    def test_cloudinit_check_log_no_critical(self):
        '''
        :avocado: tags=tier1,cloudinit
        RHEL-188131 - CLOUDINIT-TC: Check no "CRITICAL" level message in /var/log/cloud-init.log
        bz#: 1827207
        check no critical log in cloudinit logs
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        cmd = 'sudo cat /var/log/cloud-init.log'
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_not_kw='CRITICAL',
                          msg='check /var/log/cloud-init.log',
                          is_get_console=False)
        if 'release 7' not in utils_lib.run_cmd(self,
                                                'sudo cat /etc/redhat-release',
                                                is_get_console=False):
            cmd = 'sudo cat /var/log/cloud-init-output.log'
            utils_lib.run_cmd(self,
                              cmd,
                              expect_ret=0,
                              expect_not_kw='CRITICAL',
                              msg='check /var/log/cloud-init-output.log',
                              is_get_console=False)

    def test_cloudinit_check_log_no_warn(self):
        '''
        :avocado: tags=tier2,cloudinit
        RHEL-188133 - CLOUDINIT-TC: Check no "WARNING" level message in /var/log/cloud-init.log
        bz#: 1821999
        check no warning log in cloudinit logs
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        cmd = 'sudo cat /var/log/cloud-init.log'
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_not_kw='WARNING',
                          msg='check /var/log/cloud-init.log',
                          is_get_console=False)
        if 'release 7' not in utils_lib.run_cmd(self,
                                                'sudo cat /etc/redhat-release',
                                                is_get_console=False):
            cmd = 'sudo cat /var/log/cloud-init-output.log'
            utils_lib.run_cmd(self,
                              cmd,
                              expect_ret=0,
                              expect_not_kw='WARNING',
                              msg='check /var/log/cloud-init-output.log',
                              is_get_console=False)

    def test_cloudinit_check_log_no_error(self):
        '''
        :avocado: tags=tier2,cloudinit
        RHEL-188132 - CLOUDINIT-TC: Check no "ERROR" level message in /var/log/cloud-init.log
        bz#: 1821999
        check no error log in cloudinit logs
        '''
        self.session.connect(timeout=self.ssh_wait_timeout)
        cmd = 'sudo cat /var/log/cloud-init.log'
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_not_kw='ERROR',
                          msg='check /var/log/cloud-init.log',
                          is_get_console=False)
        if 'release 7' not in utils_lib.run_cmd(self,
                                                'sudo cat /etc/redhat-release',
                                                is_get_console=False):
            cmd = 'sudo cat /var/log/cloud-init-output.log'
            utils_lib.run_cmd(self,
                              cmd,
                              expect_ret=0,
                              expect_not_kw='ERROR',
                              msg='check /var/log/cloud-init-output.log',
                              is_get_console=False)

    def test_cloudinit_check_instance_data_json(self):
        """
        :avocado: tags=tier2,cloudinit
        RHEL-182312 - CLOUDINIT-TC:cloud-init can successfully write data to instance-data.json
        bz#: 1744526
        """
        self.session.connect(timeout=self.ssh_wait_timeout)
        cmd = 'ls -l /run/cloud-init/instance-data.json'
        utils_lib.run_cmd(self,
                          cmd,
                          expect_ret=0,
                          expect_not_kw='No such file or directory',
                          msg='check /run/cloud-init/instance-data.json',
                          is_get_console=False)

    def test_cloudinit_create_vm_login_repeatedly(self):
        """
        :avocado: tags=tier3,cloudinit,test_cloudinit_create_vm_login_repeatedly
        RHEL-188320 - CLOUDINIT-TC:create vm and login repeately
        bz#: 1803928
        create vm and login with ssh-key, run 50 times, because of race condition bug
        """
        pre_delete = True
        for x in range(50):
            self.log.info(str(x) + " run: create VM and login")
            self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                              pre_stop=False)
            output = self.session.cmd_output('whoami')
            self.assertEqual(
                self.vm.vm_username, output,
                str(x) +
                " run: Login VM with publickey error: output of cmd `whoami` unexpected -> %s"
                % output)
            time.sleep(30)

    def test_cloudutils_growpart_resize_partition_first_boot(self):
        """
        :avocado: tags=tier1,cloud_utils_growpart
        RHEL-188669: CLOUDINIT-TC:[cloud-utils-growpart]resize partition during VM first boot
        """
        self.log.info(
            "RHEL-188669: CLOUDINIT-TC:[cloud-utils-growpart]resize partition \
during VM first boot")
        self.session.cmd_output("sudo su -")
        device = "/dev/vda"
        # Partition Table: gpt, partition number is 3
        # Partition Table: msdos, partition number is 1
        part_type = self.session.cmd_output(
            "parted -s %s print|grep 'Partition Table'|awk '{print $3}'" %
            device)
        part_number = "3" if part_type == "gpt" else "1"
        # VM flavor m1.medium, size 40G
        self.assertEqual(
            "42.9GB",
            self.session.cmd_output(
                "parted -s %s print|grep ' %s '|awk '{print $3}'" %
                (device, part_number)),
            "Fail to resize partition during first boot")

    def _growpart_auto_resize_partition(self, label):
        """
        :param label: msdos/gpt
        """
        self.session.cmd_output("sudo su -")
        self.assertEqual(
            self.session.cmd_status_output("which growpart")[0], 0,
            "No growpart command.")

        device = "/tmp/testdisk"
        if os.path.exists(device):
            self.session.cmd_output("rm -f {}".format(device))
        self.session.cmd_output("truncate -s 2G {}".format(device))
        self.session.cmd_output("parted -s {} mklabel {}".format(
            device, label))
        part_type = "primary" if label == "msdos" else ""
        part_name = "xfs" if label == "gpt" else ""
        # 1 partition
        self.session.cmd_output("parted -s {} mkpart {} {} 0 1000".format(
            device, part_type, part_name))
        self.session.cmd_output("parted -s {} print".format(device))
        self.assertEqual(
            self.session.cmd_status_output("growpart {} 1".format(device))[0],
            0, "Fail to run growpart")
        self.assertEqual(
            "2147MB",
            self.session.cmd_output(
                "parted -s %s print|grep ' 1 '|awk '{print $3}'" % device),
            "Fail to resize partition")
        # 2 partitions
        self.session.cmd_output("parted -s {} rm 1".format(device))
        self.session.cmd_output("parted -s {} mkpart {} {} 0 1000".format(
            device, part_type, part_name))
        self.session.cmd_output("parted -s {} mkpart {} {} 1800 1900".format(
            device, part_type, part_name))
        self.session.cmd_output("parted -s {} print".format(device))
        exit_status, output = self.session.cmd_status_output(
            "growpart {} 1".format(device))
        self.assertEqual(exit_status, 0,
                         "Run growpart failed: {}".format(output))
        self.assertEqual(
            "1800MB",
            self.session.cmd_output(
                "parted -s %s print|grep ' 1 '|awk '{print $3}'" % device),
            "Fail to resize partition")

    def test_cloudutils_growpart_auto_resize_partition_in_gpt(self):
        """
        :avocado: tags=tier1,cloud_utils_growpart
        RHEL-171053: CLOUDINIT-TC: [cloud-utils-growpart] Auto resize\
                     partition in gpt
        BZ#1695091
        """
        self.log.info("RHEL-171053: CLOUDINIT-TC: [cloud-utils-growpart] \
Auto resize partition in gpt")
        self._growpart_auto_resize_partition("gpt")

    def test_cloudutils_growpart_auto_resize_partition_in_mbr(self):
        """
        :avocado: tags=tier1,cloud_utils_growpart
        RHEL-188633: CLOUDINIT-TC: [cloud-utils-growpart] Auto resize\
                     partition in MBR
        """
        self.log.info(
            "RHEL-188633: CLOUDINIT-TC: [cloud-utils-growpart] Auto resize\
                     partition in MBR")
        self._growpart_auto_resize_partition("msdos")

    def test_cloudinit_login_with_password(self):
        """
        :avocado: tags=tier1,cloudinit
        RHEL7-103830: CLOUDINIT-TC: VM can successfully login
        after provisioning(with password authentication)
        1. Create a VM with only password authentication
        2. Login with password, should have sudo privilege
        """
        import base64
        self.log.info("RHEL7-103830: CLOUDINIT-TC: VM can successfully login "
                      "after provisioning(with password authentication)")

        user_data = """\
#cloud-config

user: {0}
password: {1}
chpasswd: {{ expire: False }}
ssh_pwauth: 1
""".format(self.vm.vm_username, self.vm.vm_password)
        self.vm.user_data = base64.b64encode(
            user_data.encode('utf-8')).decode('utf-8')
        self.vm.keypair = None
        self.vm.create(wait=True)
        self.session.connect(authentication="password")
        self.assertEqual(self.vm.vm_username,
                         self.session.cmd_output("whoami"),
                         "Fail to login with password")
        self.assertIn(
            "%s ALL=(ALL) NOPASSWD:ALL" % self.vm.vm_username,
            self.session.cmd_output(
                "sudo cat /etc/sudoers.d/90-cloud-init-users"),
            "No sudo privilege")

    def tearDown(self):
        self.session.close()
Esempio n. 26
0
 def setUp(self):
     account = AzureAccount(self.params)
     account.login()
     self.project = self.params.get("rhel_ver", "*/VM/*")
     self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
     if self.case_short_name == "test_provision_gen2_vm":
         if LooseVersion(self.project) < LooseVersion('7.8'):
             self.cancel(
                 "Skip case because RHEL-{} ondemand image doesn't support gen2".format(self.project))
         cloud = Setup(self.params, self.name, size="DC2s")
     else:
         cloud = Setup(self.params, self.name)
     self.vm = cloud.vm
     pre_delete = False
     if self.case_short_name == "test_provision_with_2_keys":
         pre_delete = True
         self.vm.vm_name += "-2keys"
         key1 = "{}/.ssh/id_rsa.pub".format(os.path.expanduser('~'))
         key2 = "/tmp/newkey.pub"
         if not os.path.exists(key2):
             command("ssh-keygen -f {} -q -N ''".format(key2.split('.')[0]))
         self.assertTrue(os.path.exists(key1),
                         "Key {} doesn't exist".format(key1))
         self.assertTrue(os.path.exists(key2),
                         "Key {} doesn't exist".format(key2))
         self.vm.ssh_key_value = "{} {}".format(key1, key2)
         with open(key1) as f:
             self.key1_value = f.read().rsplit(' ', 1)[0]
         with open(key2) as f:
             self.key2_value = f.read().rsplit(' ', 1)[0]
     self.session = cloud.init_vm(pre_delete=pre_delete)
     self.username = self.vm.vm_username
     self.package = self.params.get("packages", "*/Other/*")
     if self.case_short_name == "test_install_uninstall_package":
         if self.session.cmd_status_output("ls /tmp/{}".format(self.package))[0] != 0:
             self.cancel("Package doesn't exist. Skip case.")
     if self.case_short_name.startswith("test_host_plugin"):
         self.session.cmd_output(
             "sudo /usr/bin/cp /etc/waagent.conf{,-bak}")
     if self.case_short_name == "test_upgrade_downgrade_package":
         rhel7_old_pkg_url = "http://download.eng.bos.redhat.com/brewroot/vol/rhel-7/packages/WALinuxAgent/2.2.32/1.el7/noarch/WALinuxAgent-2.2.32-1.el7.noarch.rpm"
         rhel8_old_pkg_url = "http://download.eng.bos.redhat.com/brewroot/vol/rhel-8/packages/WALinuxAgent/2.2.32/1.el8/noarch/WALinuxAgent-2.2.32-1.el8.noarch.rpm"
         try:
             self.assertEqual(0, self.session.cmd_status_output("ls /tmp/{}".format(self.package))[0],
                              "No new pakcage in guest VM")
             import requests
             if str(self.project).startswith('7'):
                 old_pkg_url = rhel7_old_pkg_url
             elif str(self.project).startswith('8'):
                 old_pkg_url = rhel8_old_pkg_url
             self.old_pkg = old_pkg_url.split('/')[-1]
             if not os.path.exists("/tmp/{}".format(self.old_pkg)):
                 r = requests.get(old_pkg_url, allow_redirects=True)
                 open("/tmp/{}".format(self.old_pkg), 'wb').write(r.content)
             self.session.copy_files_to(
                 local_path="/tmp/{}".format(self.old_pkg),
                 remote_path="/tmp/{}".format(self.old_pkg))
             self.assertEqual(0, self.session.cmd_status_output("ls /tmp/{}".format(self.old_pkg))[0],
                              "No old pakcage in guest VM")
         except:
             self.cancel(
                 "No old or new package in guest VM. Skip this case.")
Esempio n. 27
0
def init_test(test_ins, instance_index=0):
    '''
    Prepare VMs before start test.
    test_ins: Test class instance
    instamce_index: get specified instance
    '''
    cloud = Setup(test_ins.params, test_ins.name)
    test_ins.vm = cloud.vm
    test_ins.snap = None
    test_ins.kdump_status = False
    test_ins.cpu_count = 0
    test_ins.ssh_wait_timeout = set_ssh_wait_timeout(test_ins.vm)

    pre_delete = False
    pre_stop = False
    if test_ins.name.name.endswith(('test_check_firstlaunch_time')):
        pre_delete = True

    test_ins.log.info("Test tempdir: %s" % test_ins.teststmpdir)
    test_ins.vm.instance_id = get_exists_resource_id(
        test_ins.teststmpdir,
        test_ins.vm.instance_type,
        resource_index=instance_index)
    query_resource_blacklist(test_ins)
    if test_ins.vm.instance_id is not None:
        if test_ins.vm.reuse_init(test_ins.vm.instance_id):
            test_ins.log.info("Reuse existing instance %s!" %
                              test_ins.vm.instance_id)
            if test_ins.vm.is_started():
                test_ins.session = cloud.init_vm(pre_delete=False, pre_stop=False)
                if not check_session(test_ins):
                    test_ins.vm.delete(wait=True)
                    cleanup_stored(test_ins.teststmpdir,
                                   test_ins.params,
                                   resource_id=test_ins.vm.instance_id)
                    test_ins.vm = None
            if pre_delete:
                test_ins.log.info("Test needs no reuse!")
                test_ins.vm.delete(wait=True)
                cleanup_stored(test_ins.teststmpdir,
                               test_ins.params,
                               resource_id=test_ins.vm.instance_id)
                test_ins.vm = None
        else:
            test_ins.log.info(
                "No match existing instance, will create new one!")
            cleanup_stored(test_ins.teststmpdir,
                           test_ins.params,
                           resource_id=test_ins.vm.instance_id)
            pre_delete = True
    if test_ins.name.name.endswith("test_start_vm"):
        pre_stop = True
    #10:15:57 ERROR|     test_ins.params['HibernationOptions'] = False
    #10:15:57 ERROR| TypeError: 'AvocadoParams' object does not support item assignment
    #if test_ins.name.name.endswith("test_stop_vm_hibernate"):
    #    pre_delete = True
    #    test_ins.params['HibernationOptions'] = True
    #    test_ins.params['EbsEncrypted'] = True
    #    test_ins.params['EbsVolumeSize'] = 100
    #else:
    #    test_ins.params['HibernationOptions'] = False
    #    test_ins.params['EbsEncrypted'] = False
    #    test_ins.params['EbsVolumeSize'] = 10
    if test_ins.vm is None:
        cloud = Setup(test_ins.params, test_ins.name)
        test_ins.vm = cloud.vm
        test_ins.snap = None
        test_ins.kdump_status = False
        test_ins.cpu_count = 0
        test_ins.ssh_wait_timeout = set_ssh_wait_timeout(test_ins.vm)
    test_ins.session = cloud.init_vm(pre_delete=pre_delete, pre_stop=pre_stop)
    # query_resource_blacklist(test_ins)
    test_ins.log.info("Instance id is %s" % test_ins.vm.instance_id)
    save_exists_resource_id(test_ins.teststmpdir, test_ins.vm)
    if test_ins.vm.is_stopped() and not pre_stop:
        if not test_ins.vm.start():
            save_resource_blacklist(test_ins.teststmpdir, test_ins.vm.instance_type)
            test_ins.vm.delete()
            cleanup_stored(test_ins.teststmpdir, test_ins.params, resource_id=test_ins.vm.instance_id)
            test_ins.fail("Cannot start instance")
    if not test_ins.name.name.endswith("test_cleanup") and not pre_stop:
        check_session(test_ins)
Esempio n. 28
0
 def setUp(self):
     """Set up."""
     self.log.info("Cleanup Begain!")
     self.cloud = Setup(self.params, self.name)
     self.vm = self.cloud.vm
class NetworkTest(Test):
    def setUp(self):
        self.cloud = Setup(self.params, self.name)
        self.vm = self.cloud.vm
        pre_delete = False
        pre_stop = False
        if self.name.name.endswith("test_coldplug_nics"):
            pre_stop = True
        if not self.vm.nic_count or self.vm.nic_count < 2:
            self.cancel("No nic count. Skip this case.")
        self.session = self.cloud.init_vm(pre_delete=pre_delete,
                                          pre_stop=pre_stop)
        if self.name.name.endswith("test_hotplug_nics") or \
           self.name.name.endswith("test_coldplug_nics"):
            self.cloud.init_nics(self.vm.nic_count)
            self.primary_nic_id = self.cloud.primary_nic_id

    def test_hotplug_nics(self):
        """
        1. Start VM. Attach max NICs and check all can get IP
        2. Add 1 more NIC. Should not be added
        3. Detach all NICs. Device should be removed inside guest
        """
        # 1. Attach max NICs and check all can get IP
        count = self.vm.nic_count - 1
        self.log.info("Step 1: Attach %s NICs." % count)
        self.vm.attach_nics(count, wait=True)
        self.assertEqual(len(self.vm.query_nics()), count + 1,
                         "Total NICs number is not %d" % (count + 1))

        if self.cloud.cloud_provider == "alibaba":
            guest_cmd = """
primary_nic=$(ifconfig | grep "flags=.*\<UP\>" | cut -d: -f1 | \
grep -e eth -e ens | head -n 1)
device_name=$(echo $primary_nic | tr -d '[:digit:]')
device_numb=$(echo $primary_nic | tr -d '[:alpha:]')
[ "${device_name}" = "ens" ] && return    # ens* will up automatically
spath="/etc/sysconfig/network-scripts"
for offset in $(seq 1 %s); do
    device=${device_name}$((${device_numb}+${offset}))
    echo "STEP1: Create configure file ifcfg-${device}..."
    echo DEVICE=${device} | sudo tee $spath/ifcfg-${device}
    echo BOOTPROTO=dhcp   | sudo tee -a $spath/ifcfg-${device}
    echo ONBOOT=yes       | sudo tee -a $spath/ifcfg-${device}
    echo DEFROUTE=no      | sudo tee -a $spath/ifcfg-${device}
    echo "STEP2: 'ifup' this device..."
    sudo ifup ${device}
    sleep 2s
done
""" % count
            self.session.cmd_output(guest_cmd, timeout=180)
        else:
            self.session.cmd_output(
                "for i in {1..%s};do sudo cp "
                "/etc/sysconfig/network-scripts/ifcfg-eth0 "
                "/etc/sysconfig/network-scripts/ifcfg-eth$i; "
                "sudo sed -i -e \"s/eth0/eth$i/g\" "
                "-e '$a\DEFROUTE=no' -e '/HWADDR/d' "
                "/etc/sysconfig/network-scripts/ifcfg-eth$i; "
                "sudo ifup eth$i;sleep 2; done" % count,
                timeout=180)
        time.sleep(10)
        outside_ips = [
            str(self.vm.get_private_ip_address(nic))
            for nic in self.vm.query_nics()
        ]
        inside_ips = self.session.cmd_output("ip addr")
        for outside_ip in outside_ips:
            self.assertIn(
                outside_ip, inside_ips, "Some of NICs are not available. "
                "Outside IP: %s Inside IPs:\n %s" % (outside_ip, inside_ips))
        # 2. Add 1 more NIC. Should not be added
        self.log.info("Step 2: Add 1 more NIC, should not be added.")
        self.vm.attach_nics(1)
        self.assertEqual(
            len(self.vm.query_nics()), count + 1,
            "NICs number should not greater than %d" % (count + 1))
        # 3. Detach all NICs. NICs should be removed inside guest
        self.log.info("Step 3: Detach all NICs")

        if self.cloud.cloud_provider == "alibaba":
            guest_cmd = """
primary_nic=$(ifconfig | grep "flags=.*\<UP\>" | cut -d: -f1 | \
grep -e eth -e ens | head -n 1)
device_name=$(echo $primary_nic | tr -d '[:digit:]')
dev_list=$(ifconfig | grep "flags=.*\<UP\>" | cut -d: -f1 | \
grep $device_name | grep -v $primary_nic)
for dev in $dev_list; do
    echo "'ifdown' device $dev..."
    sudo ifdown $dev
    sleep 2s
done
"""
            self.session.cmd_output(guest_cmd)
        else:
            self.session.cmd_output(
                "for i in {1..%s};do sudo ifdown eth$i;done" % count)
        nic_ids = [
            self.vm.get_nic_id(nic) for nic in self.vm.query_nics()
            if self.vm.get_nic_id(nic) != self.primary_nic_id
        ]
        self.vm.detach_nics(nic_ids, wait=True)
        self.assertEqual(len(self.vm.query_nics()), 1,
                         "Fail to remove all NICs outside guest")
        time.sleep(5)
        if self.cloud.cloud_provider == "alibaba":
            self.assertEqual(
                self.session.cmd_output(
                    "ip addr | grep -e 'eth.*mtu' -e 'ens.*mtu' | wc -l"), "1",
                "Fail to remove all NICs inside guest")
        else:
            self.assertEqual(
                self.session.cmd_output("ip addr|grep 'eth.*mtu'|wc -l"), "1",
                "Fail to remove all NICs inside guest")
        self.log.info("Detach all NICs successfully")

    def test_coldplug_nics(self):
        """
        1. Stop VM. Attach max NICs. Start VM and check all can get IP
        2. Stop VM. Add 1 more NIC. Should not be added
        3. Stop VM. Detach all NICs. Device should be removed inside guest
        """
        # 1. Attach max NICs and check all can get IP
        count = self.vm.nic_count - 1
        self.log.info("Step 1: Attach %s NICs." % count)
        self.vm.attach_nics(count, wait=True)
        self.assertEqual(len(self.vm.query_nics()), count + 1,
                         "Total NICs number is not %d" % (count + 1))
        self.vm.start(wait=True)
        self.session.connect(timeout=180)

        if self.cloud.cloud_provider == "alibaba":
            guest_cmd = """
primary_nic=$(ifconfig | grep "flags=.*\<UP\>" | cut -d: -f1 | \
grep -e eth -e ens | head -n 1)
device_name=$(echo $primary_nic | tr -d '[:digit:]')
device_numb=$(echo $primary_nic | tr -d '[:alpha:]')
[ "${device_name}" = "ens" ] && return    # ens* will up automatically
spath="/etc/sysconfig/network-scripts"
for offset in $(seq 1 %s); do
    device=${device_name}$((${device_numb}+${offset}))
    echo "STEP1: Create configure file ifcfg-${device}..."
    echo DEVICE=${device} | sudo tee $spath/ifcfg-${device}
    echo BOOTPROTO=dhcp   | sudo tee -a $spath/ifcfg-${device}
    echo ONBOOT=yes       | sudo tee -a $spath/ifcfg-${device}
    echo DEFROUTE=no      | sudo tee -a $spath/ifcfg-${device}
    echo "STEP2: 'ifup' this device..."
    sudo ifup ${device}
    sleep 2s
done
""" % count
            self.session.cmd_output(guest_cmd, timeout=180)
        else:
            self.session.cmd_output(
                "for i in {1..%s};do sudo cp "
                "/etc/sysconfig/network-scripts/ifcfg-eth0 "
                "/etc/sysconfig/network-scripts/ifcfg-eth$i; "
                "sudo sed -i -e \"s/eth0/eth$i/g\" "
                "-e '$a\DEFROUTE=no' -e '/HWADDR/d' "
                "/etc/sysconfig/network-scripts/ifcfg-eth$i; "
                "sudo ifup eth$i;sleep 2; done" % count,
                timeout=180)

        time.sleep(10)
        outside_ips = [
            self.vm.get_private_ip_address(nic)
            for nic in self.vm.query_nics()
        ]
        inside_ips = self.session.cmd_output("ip addr")
        for outside_ip in outside_ips:
            self.assertIn(
                outside_ip, inside_ips,
                "Some of NICs are not available. Inside IPs: %s" % inside_ips)
        # 2. Add 1 more NIC. Should not be added
        self.log.info("Step 2: Add 1 more NIC, should not be added.")
        self.vm.stop(wait=True)
        self.assertTrue(self.vm.is_stopped(), "Fail to stop VM")
        self.vm.attach_nics(1)
        self.assertEqual(
            len(self.vm.query_nics()), count + 1,
            "NICs number should not greater than %d" % (count + 1))
        # 3. Detach all NICs. NICs should be removed inside guest
        self.log.info("Step 3: Detach all NICs.")
        nic_ids = [
            self.vm.get_nic_id(nic) for nic in self.vm.query_nics()
            if self.vm.get_nic_id(nic) != self.primary_nic_id
        ]
        self.vm.detach_nics(nic_ids, wait=True)
        self.assertEqual(len(self.vm.query_nics()), 1,
                         "Fail to remove all NICs outside guest")
        self.vm.start(wait=True)
        self.assertTrue(self.vm.is_started(), "Fail to start VM")
        self.session.connect()
        if self.cloud.cloud_provider == "alibaba":
            guest_cmd = "ip addr | grep -e 'eth.*mtu' -e 'ens.*mtu' | wc -l"
        else:
            guest_cmd = "ip addr|grep 'eth.*mtu'|wc -l"

        self.assertEqual(self.session.cmd_output(guest_cmd), "1",
                         "Fail to remove all NICs inside guest")
        self.log.info("Detach all NICs successfully")

    def tearDown(self):
        if self.name.name.endswith("test_hotplug_nics") or \
           self.name.name.endswith("test_coldplug_nics"):
            if self.cloud.cloud_provider == "alibaba":
                guest_cmd = """
primary_nic=$(ifconfig | grep "flags=.*\<UP\>" | cut -d: -f1 | \
grep -e eth -e ens | head -n 1)
device_name=$(echo $primary_nic | tr -d '[:digit:]')
ls /etc/sysconfig/network-scripts/ifcfg-${device_name}* | \
grep -v ${primary_nic} | xargs sudo rm -f
"""
            else:
                guest_cmd = "ls /etc/sysconfig/network-scripts/ifcfg-eth*|\
grep -v eth0|xargs rm -f"

            self.session.cmd_output(guest_cmd, timeout=180)
        self.session.close()
 def setUp(self):
     account = AzureAccount(self.params)
     account.login()
     self.project = self.params.get("rhel_ver", "*/VM/*")
     self.case_short_name = re.findall(r"Test.(.*)", self.name.name)[0]
     self.pwd = os.path.abspath(os.path.dirname(__file__))
     if self.case_short_name == "test_cloudinit_provision_gen2_vm":
         if LooseVersion(self.project) < LooseVersion('7.8'):
             self.cancel(
                 "Skip case because RHEL-{} ondemand image doesn't support gen2".format(self.project))
         cloud = Setup(self.params, self.name, size="DS2_v2")
     else:
         cloud = Setup(self.params, self.name)
     if self.case_short_name == "test_cloudinit_provision_gen2_vm":
         self.image = AzureImage(self.params, generation="V2")
         self.image.create()
         cloud.vm.image = self.image.name
         cloud.vm.vm_name += "-gen2"
         cloud.vm.use_unmanaged_disk = False
     self.vm = cloud.vm
     self.package = self.params.get("packages", "*/Other/*")
     if self.case_short_name in [
             "test_cloudinit_login_with_password",
             "test_cloudinit_login_with_publickey",
             "test_cloudinit_save_and_handle_customdata_script",
             "test_cloudinit_save_and_handle_customdata_cloudinit_config",
             "test_cloudinit_assign_identity",
     ]:
         if self.vm.exists():
             self.vm.delete()
         self.session = cloud.init_session()
         return
     if self.case_short_name == \
             "test_cloudinit_provision_vm_with_multiple_nics":
         self.vm.vm_name += "2nics"
         if self.vm.exists():
             self.vm.delete()
         publicip_name = self.vm.vm_name + "publicip"
         publicip = AzurePublicIP(self.params, name=publicip_name)
         if not publicip.exists():
             publicip.create()
         nic_name_list = []
         for n in range(0, 2):
             nic_name = "{}nic{}".format(self.vm.vm_name, n)
             subnet = self.vm.subnet if n == 0 else self.vm.subnet + str(n)
             n_publicip = publicip_name if n == 0 else None
             nic = AzureNIC(self.params,
                            name=nic_name,
                            subnet=subnet,
                            vnet=self.vm.vnet_name,
                            publicip=n_publicip)
             if not nic.exists():
                 nic.create()
             nic_name_list.append(nic_name)
         self.vm.nics = ' '.join(nic_name_list)
         self.session = cloud.init_session()
         return
     if self.case_short_name == "test_cloudinit_provision_vm_with_sriov_nic":
         self.vm.vm_name += "sriov"
         if self.vm.exists():
             self.vm.delete()
         publicip_name = self.vm.vm_name + "publicip"
         publicip = AzurePublicIP(self.params, name=publicip_name)
         if not publicip.exists():
             publicip.create()
         self.vm.nics = "{}nic".format(self.vm.vm_name)
         nic = AzureNIC(self.params,
                        name=self.vm.nics,
                        subnet=self.vm.subnet,
                        vnet=self.vm.vnet_name,
                        publicip=publicip_name,
                        sriov=True)
         if not nic.exists():
             nic.create()
         self.session = cloud.init_session()
         self.vm.size = "Standard_D3_v2"
         return
     if self.name.name.endswith("test_cloudinit_provision_vm_with_ipv6"):
         self.vm.vm_name += "ipv6"
         if self.vm.exists():
             self.vm.delete()
         publicip_name = self.vm.vm_name + "publicip"
         publicip = AzurePublicIP(self.params,
                                  name=publicip_name)
         if not publicip.exists():
             publicip.create()
         self.vm.nics = "{}nic".format(self.vm.vm_name)
         nic = AzureNIC(self.params,
                        name=self.vm.nics,
                        subnet=self.vm.subnet,
                        vnet=self.vm.vnet_name,
                        publicip=publicip_name)
         if not nic.exists():
             nic.create()
         ipv6_config = AzureNicIpConfig(self.params,
                                        name=self.vm.nics+"ipv6",
                                        nic_name=self.vm.nics,
                                        ip_version="IPv6")
         if not ipv6_config.exists():
             ipv6_config.create()
         self.session = cloud.init_session()
         return
     self.session = cloud.init_vm()
     if self.case_short_name == "test_cloudinit_upgrade_downgrade_package":
         rhel7_old_pkg_url = "http://download.eng.bos.redhat.com/brewroot/vol/rhel-7/packages/cloud-init/18.2/1.el7/x86_64/cloud-init-18.2-1.el7.x86_64.rpm"
         rhel8_old_pkg_url = "http://download.eng.bos.redhat.com/brewroot/vol/rhel-8/packages/cloud-init/18.2/1.el8/noarch/cloud-init-18.2-1.el8.noarch.rpm"
         try:
             self.assertEqual(0, self.session.cmd_status_output("ls /tmp/{}".format(self.package))[0],
                              "No new pakcage in guest VM")
             import requests
             if str(self.project).startswith('7'):
                 old_pkg_url = rhel7_old_pkg_url
             elif str(self.project).startswith('8'):
                 old_pkg_url = rhel8_old_pkg_url
             self.old_pkg = old_pkg_url.split('/')[-1]
             if not os.path.exists("/tmp/{}".format(self.old_pkg)):
                 r = requests.get(old_pkg_url, allow_redirects=True)
                 open("/tmp/{}".format(self.old_pkg), 'wb').write(r.content)
             self.session.copy_files_to(
                 local_path="/tmp/{}".format(self.old_pkg),
                 remote_path="/tmp/{}".format(self.old_pkg))
             self.assertEqual(0, self.session.cmd_status_output("ls /tmp/{}".format(self.old_pkg))[0],
                              "No old pakcage in guest VM")
         except:
             self.cancel(
                 "No old or new package in guest VM. Skip this case.")