def create_nic(self, wait=False): logging.debug("Create NIC") nic_id = self.ecs.create_nic().get("NetworkInterfaceId") if wait: for count in utils_lib.iterate_timeout( 300, "Timed out waiting for nics to be created.", wait=5): # nic_status = self.ecs.describe_nics( # nic_ids=[nic_id]).get("Status") # logging.debug( # 'Status: {0} / Wanted: "Available"'.format(nic_status)) # if nic_status == "Available": # break # Cannot check status with nic_ids because of bug # https://github.com/aliyun/aliyun-openapi-python-sdk/issues/78 # This is a workaround. All nics must not be Creating status available_count = creating_count = other_count = 0 for nic in self.list_nics(): if nic.get("Status") == "Available": available_count += 1 elif nic.get("Status") == "Creating": creating_count += 1 else: other_count += 1 logging.debug( 'Status: Available/Creating/Other: "{0}/{1}/{2}"'.format( available_count, creating_count, other_count)) if creating_count == 0: break
def set_cpu_passthrough(self, enabled=True): ''' Key steps: 1. Power off VM 2. Set CPU passthrough 3. Power on VM ''' if self.is_started(): self.stop(wait=True) if enabled: logging.info("Enable VM cpu passthrough.") res = self.cvm_cmd("acli vm.update %s cpu_passthrough=true" % self.data.get('uuid')) else: logging.info("Disable VM cpu passthrough.") res = self.cvm_cmd("acli vm.update %s cpu_passthrough=false" % self.data.get('uuid')) if "pending" in res.lower() and "cannot" not in res.lower(): logging.info("VM cpu passthrough has changed successfully.") self.start(wait=True) for count in utils_lib.iterate_timeout( 60, "Timed out waiting for verify cpu passthrough changing."): if enabled: if self.get_cpu_passthrough(enabled=True): break else: if self.get_cpu_passthrough(enabled=False): break
def _verify_fio_test(self, test_log): ''' Check if fio test is still alive. ''' self.log.info("Verify fio test") utils_lib.run_cmd(self, "cat %s" % test_log, expect_ret=0, expect_kw="nutanix", msg="Check fio test log") cmd = "ps -ef | grep -v grep | grep fio-test" utils_lib.run_cmd(self, cmd, expect_ret=0, expect_kw="nutanix", msg="Check if all fio test jobs are still alive") cmd = "ps -ef | grep -v grep | grep fio-test | wc -l" for count in utils_lib.iterate_timeout( 240, "Timed out waiting for complete fio test", wait=5): fio_jobs = int( utils_lib.run_cmd( self, cmd, expect_ret=0, msg="Check if all fio test jobs are still alive").strip()) if fio_jobs == 0: break cmd = "cat %s" % test_log utils_lib.run_cmd(self, cmd, expect_ret=0, msg="Check fio test log")
def update_vcpu_num(self, vcpu_num_target): ''' If target vCPU number is less than current, the key steps will be: 1. Power off VM 2. Update vCPU number 3. Power on VM ''' vcpu_num_current = self.get_vcpu_num() logging.info("Update vCPU number from %s to %s" % (vcpu_num_current, vcpu_num_target)) if vcpu_num_target >= vcpu_num_current: res = self.prism.update_vcpu(self.data.get('uuid'), vcpu_num_target) else: if self.is_started(): self.stop(wait=True) res = self.prism.update_vcpu(self.data.get('uuid'), vcpu_num_target) self.wait_for_status( res['task_uuid'], 60, "Timed out waiting for VM to complete vCPU number updating.") if self.is_stopped(): self.start(wait=True) for count in utils_lib.iterate_timeout( 60, "Timed out waiting for verify vCPU number updating."): if self.exists() and self.get_vcpu_num() == vcpu_num_target: break
def reboot(self, wait=False): self.conn.compute.reboot_server(self.data.id, 'SOFT') if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get rebooted."): if self.is_started(): break
def unpause(self, wait=False): self.conn.compute.unpause_server(self.data.id) if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get unpaused."): if self.is_started(): break
def stop(self, wait=False): self.conn.compute.stop_server(self.data.id) if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get stopped."): if self.is_stopped(): break
def update_memory_size(self, mem_gb_target): ''' If target memory size is less than current, the key steps: 1. Power off VM 2. Update memory size 3. Power on VM ''' mem_gb_current = self.get_memory_size() logging.info("Update memory capacity (GB) from %s to %s" % (mem_gb_current, mem_gb_target)) if mem_gb_target >= mem_gb_current: res = self.prism.update_memory(self.data.get('uuid'), mem_gb_target) else: if self.is_started(): self.stop(wait=True) res = self.prism.update_memory(self.data.get('uuid'), mem_gb_target) self.wait_for_status( res['task_uuid'], 60, "Timed out waiting for VM to complete memory capacity (GB) updating." ) if self.is_stopped(): self.start(wait=True) for count in utils_lib.iterate_timeout( 60, "Timed out waiting for verify memory capacity (GB) updating."): if self.exists() and self.get_memory_size() == mem_gb_target: break
def wait_for_status(self, task_uuid, timeout, error_message): for count in utils_lib.iterate_timeout(timeout, error_message): res = self.prism.list_tasks(task_uuid) if res['progress_status'] == 'Succeeded': break if res['progress_status'] == 'Failed': logging.error("progress status of task is Failed") break
def unpause(self, wait=False): dom = self.conn.lookupByUUIDString(self.data.get("uuid")) dom.resume() if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get unpaused."): if self.is_started(): break
def stop(self, wait=False): dom = self.conn.lookupByUUIDString(self.data.get("uuid")) dom.shutdown() if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get stopped."): if self.is_stopped(): break
def unpause(self, wait=False): subprocess.Popen('virtctl unpause vm %s' % self.vm_name, shell=True, stdout=FNULL).communicate() if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get unpaused."): if self.is_started(): break
def delete(self, wait=False): dom = self.conn.lookupByUUIDString(self.data.get("uuid")) if not self.is_stopped(): dom.destroy() dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get deleted."): if not self.exists(): break
def delete(self, wait=False): subprocess.Popen('oc delete vm %s' % self.vm_name, shell=True, stdout=FNULL).communicate() subprocess.Popen('oc delete svc ssh', shell=True, stdout=FNULL).communicate() if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get deleted."): if not self.exists(): break
def stop(self, wait=False): logging.info("Stop VM") res = self.prism.stop_vm(self.data.get('uuid')) if wait: self.wait_for_status( res['task_uuid'], 60, "Timed out waiting for server to get stopped.") for count in utils_lib.iterate_timeout( 30, "Timed out waiting for server to get stopped."): if self.is_stopped(): break
def reboot(self, wait=False): logging.info("Restart VM") res = self.prism.restart_vm(self.data.get('uuid')) if wait: self.wait_for_status( res['task_uuid'], 60, "Timed out waiting for server to get rebooted.") for count in utils_lib.iterate_timeout( 120, "Timed out waiting for getting IP address."): if self.exists() and self.floating_ip: break
def test_check_boot_cmdline_parameters(self): """ case_name: test_check_boot_cmdline_parameters component: rhel-guest-image bugzilla_id: 1144155 is_customer_case: False maintainer: [email protected] description: check cmdline parameters key_steps: 1. cat /proc/cmdline expect_result: no_timer_check console=tty0 console=ttyS0,115200n8 net.ifnames=0 crashkernel= """ for count in utils_lib.iterate_timeout( 120, "Timed out waiting for getting IP address."): cmd = 'sudo systemctl is-active kdump' ret = utils_lib.run_cmd(self, cmd, ret_status=True, msg='check kdump is active') if ret == 0: break src_dir = self.data_dir + "/guest-images/" data_file = "cmdline_params.lst" lines = filter(None, (line.rstrip() for line in open(os.path.join(src_dir, data_file)))) cmd = "cat /proc/cmdline" output = utils_lib.run_cmd(self, cmd, expect_ret=0, msg="cat /proc/cmdline") for line in lines: self.assertIn(line, output, "%s is not in boot parameters" % line) # crashkernel product_id = utils_lib.get_product_id(self) if float(product_id) >= 9.0: cmd = "sudo kdumpctl get-default-crashkernel" tmp_output = utils_lib.run_cmd( self, cmd, expect_ret=0, msg="kdumpctl get-default-crashkernel") line = "crashkernel=" + tmp_output.rstrip('.')[0] else: line = "crashkernel=auto" self.assertIn(line, output, "%s is not in boot parameters" % line)
def pause(self, wait=False): operation = self.service_beta.instances().suspend( project=self.project, zone=self.zone, instance=self.vm_name).execute() wait_for_operation(self.service_v1, self.project, self.zone, operation['name']) if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get paused."): if self.is_paused(): break
def start(self, wait=False): operation = self.service_v1.instances().start( project=self.project, zone=self.zone, instance=self.vm_name).execute() wait_for_operation(self.service_v1, self.project, self.zone, operation['name']) if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get started."): if self.is_started(): break self._data = None
def delete(self, wait=False): f_ip = self.floating_ip try: if f_ip and self.floating_network_id != '': f_ip_id = self.conn.network.find_ip(f_ip) self.conn.network.delete_ip(f_ip_id) except InvalidRequest as err: LOG.info(err) self.conn.compute.delete_server(self.data.id) if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get deleted."): if not self.exists(): break
def create(self, wait=True): root = ET.fromstring(dom_xml) if self.arch == "x86_64": root.find("os").find("type").set("arch", self.arch) root.find("os").find("type").set("machine", "pc") elif self.arch == "ppc64le": root.find("os").find("type").set("arch", self.arch) root.find("os").find("type").set("machine", "pseries") elif self.arch == "s390x": root.find("os").find("type").set("arch", self.arch) root.find("os").find("type").set("machine", "s390-ccw-virtio") elif self.arch == "aarch64": root.find("os").find("type").set("arch", self.arch) root.find("os").find("type").set("machine", "virt") sub_cpu = ET.fromstring( '<cpu mode="host-passthrough"><model fallback="allow" /></cpu>' ) root.insert(3, sub_cpu) sub_loader = ET.fromstring('<loader readonly="yes" type="pflash">\ /usr/share/OVMF/OVMF_CODE.secboot.fd</loader>') root.find("os").insert(0, sub_loader) sub_nvram = ET.fromstring( "<nvram template='/usr/share/OVMF/OVMF_VARS.fd'>\ %s/OVMF_VARS.fd</nvram>" % self.image_dir) root.find("os").insert(0, sub_nvram) root.find("devices").find("rng").find( "backend").text = "/dev/urandom" else: root.find("os").find("type").set("arch", self.arch) root.find("os").find("type").set("machine", "pc") root.find("name").text = self.vm_name root.find("vcpu").text = str(self.vcpus) root.find("memory").text = str(self.memory * 1024 * 1024) root.find("currentMemory").text = str(self.memory * 1024 * 1024) root.find("devices").find("disk[@device='disk']").find("source").set( "file", os.path.join(self.image_dir, self.image_name)) root.find("devices").find("disk[@device='cdrom']").find("source").set( "file", os.path.join(self.image_dir, self.nocloud_iso_name)) xmlconfig = ET.tostring(root).decode() dom = self.conn.defineXML(xmlconfig) dom.create() if wait: for count in utils_lib.iterate_timeout( 60, "Timed out waiting for server to get Created."): if self.exists() and self.floating_ip: break self._data = None
def wait_for_status(self, status, timeout=300): error_message = "Timed out waiting for server to get %s." % status for count in utils_lib.iterate_timeout(timeout, error_message, wait=20): current_status = self._get_status() logging.debug('Target: {0}, Current: {1}'.format( status, current_status)) if current_status == status: return True # Exceptions (detect wrong status to save time) if status == 'Running' and current_status not in ('Stopping', 'Starting'): logging.error('While waiting for the server to get Running, \ its status cannot be {0} rather than Stopping or Starting.'.format( current_status)) return False
def create(self, wait=False): self.pwd = os.path.abspath(os.path.dirname(__file__)) data_dir = os.path.join(os.path.dirname(self.pwd), 'data') with open(os.path.join(data_dir, 'guest-images/openshift.templ'), 'r') as f: try: vm_spec = yaml.load(f, Loader=yaml.FullLoader) except yaml.YAMLError as e: print(e) ssh_pubkey = utils_lib.get_public_key() userData = re.sub( 'ssh-rsa.*\n', ssh_pubkey, vm_spec['spec']['template']['spec'] ['volumes'][0]['cloudInitNoCloud']['userData']) vm_spec['spec']['template']['spec']['volumes'][0]['cloudInitNoCloud'][ 'userData'] = userData vm_spec['spec']['template']['spec']['volumes'][1]['containerDisk'][ 'image'] = self.image_name vm_spec['metadata']['name'] = self.vm_name vm_spec['spec']['template']['metadata']['labels'][ 'kubevirt.io/domain'] = self.vm_name vm_spec['spec']['template']['spec']['domain']['cpu'][ 'cores'] = self.vcpus vm_spec['spec']['template']['spec']['domain']['resources']['requests'][ 'memory'] = str(self.memory) + 'Gi' with open(os.path.join(data_dir, 'guest-images/openshift.yaml'), 'w') as file: yaml.dump(vm_spec, file) subprocess.Popen('oc apply -f %s' % os.path.join(data_dir, 'guest-images/openshift.yaml'), shell=True, stdout=FNULL).communicate() if wait: for count in utils_lib.iterate_timeout( 900, "Timed out waiting for server to get Created."): if self.is_started(): break self.port = self.vm_name self._data = None
def test_stop_start_vm(self): """ case_tag: Lifecycle case_name: test_stop_start_vm case_file: os_tests.tests.test_lifecycle.TestLifeCycle.test_stop_start_vm component: lifecycle bugzilla_id: N/A is_customer_case: False testplan: N/A maintainer: [email protected] description: Check user name after stop/start VM. key_steps: 1. Check user name after stop/start VM. expect_result: 1. Check user name is right after stop/start VM. debug_want: N/A """ if not self.vm: self.skipTest('no vm provider found') self.vm.stop(wait=True) self.assertTrue(self.vm.is_stopped(), "Stop VM error: VM status is not SHUTOFF") self._start_vm_and_check() utils_lib.run_cmd(self, 'sudo shutdown now') for count in utils_lib.iterate_timeout( 60, "Timed out waiting for getting server stopped."): if self.vm.is_stopped(): break self._start_vm_and_check()
def set_memory_vnuma(self, vnuma_num_target): ''' Key steps: 1. Power off VM 2. Set memory vnuma 3. Power on VM ''' vnuma_num_current = self.get_memory_vnuma() if self.is_started(): self.stop(wait=True) logging.info("Set VM vnuma nodes number from %s to %s" % (vnuma_num_current, vnuma_num_target)) res = self.cvm_cmd("acli vm.update %s num_vnuma_nodes=%s" % (self.data.get('uuid'), vnuma_num_target)) if "pending" in res.lower() and "cannot" not in res.lower(): logging.info("VM vnuma nodes number has changed successfully.") self.start(wait=True) for count in utils_lib.iterate_timeout( 60, "Timed out waiting for verify vnuma nodes number changing." ): if self.get_memory_vnuma() == vnuma_num_target: break
def update_core_num(self, core_num_target): ''' Key steps: 1. Power off VM 2. Update vCPU core number 3. Power on VM ''' vcpu_num_current = self.get_core_num() logging.info("Update core number per vCPU from %s to %s" % (vcpu_num_current, core_num_target)) if self.is_started(): self.stop(wait=True) res = self.prism.update_core(self.data.get('uuid'), core_num_target) self.wait_for_status( res['task_uuid'], 60, "Timed out waiting for VM to complete core number per vCPU updating." ) self.start(wait=True) for count in utils_lib.iterate_timeout( 60, "Timed out waiting for verify core number per vCPU updating."): if self.exists() and self.get_core_num() == core_num_target: break