def reboot(self, wait=False): self.conn.compute.reboot_server(self.data.id, 'SOFT') if wait: for count in utils_misc.iterate_timeout( 60, "Timed out waiting for server to get rebooted."): if self.is_started(): break
def create_nic(self, wait=False): logging.debug("Create NIC") nic_id = self.ecs.create_nic().get("NetworkInterfaceId") if wait: for count in utils_misc.iterate_timeout( 300, "Timed out waiting for nics to be created.", wait=5): # nic_status = self.ecs.describe_nics( # nic_ids=[nic_id]).get("Status") # logging.debug( # 'Status: {0} / Wanted: "Available"'.format(nic_status)) # if nic_status == "Available": # break # Cannot check status with nic_ids because of bug # https://github.com/aliyun/aliyun-openapi-python-sdk/issues/78 # This is a workaround. All nics must not be Creating status available_count = creating_count = other_count = 0 for nic in self.list_nics(): if nic.get("Status") == "Available": available_count += 1 elif nic.get("Status") == "Creating": creating_count += 1 else: other_count += 1 logging.debug( 'Status: Available/Creating/Other: "{0}/{1}/{2}"'.format( available_count, creating_count, other_count)) if creating_count == 0: break
def unpause(self, wait=False): self.conn.compute.unpause_server(self.data.id) if wait: for count in utils_misc.iterate_timeout( 60, "Timed out waiting for server to get unpaused."): if self.is_started(): break
def detach_nics(self, nic_ids, wait=False): logging.info("Detach NICs from ECS") if nic_ids is None or nic_ids == []: return if not isinstance(nic_ids, list): nic_ids = [nic_ids] origin_count = len(self.query_nics()) forks = 10 if len(nic_ids) > forks: # When detaching more than 20 disks at the same time, # some of them will be failed, this is the workaround. logging.debug("Detaching first {0} from {1} NIC(s)...".format( forks, len(nic_ids))) self.detach_nics(nic_ids[:forks], True) self.detach_nics(nic_ids[forks:], True) else: for nic_id in nic_ids: self.ecs.detach_nic(self.id, nic_id) if wait: for count in utils_misc.iterate_timeout( 300, "Timed out waiting for nics to be detached", wait=20): detached_count = origin_count - len(self.query_nics()) logging.debug("Detached: {0} / Wanted: {1}".format( detached_count, len(nic_ids))) if detached_count >= len(nic_ids): break
def stop(self, wait=False): self.conn.compute.stop_server(self.data.id) if wait: for count in utils_misc.iterate_timeout( 60, "Timed out waiting for server to get stopped."): if self.is_stopped(): break
def wait_for_status(self, status, timeout=300): error_message = "Timed out waiting for server to get %s." % status for count in utils_misc.iterate_timeout(timeout, error_message, wait=10): if self._get_status() == status: break
def stop(self, wait=False): dom = self.conn.lookupByUUIDString(self.data.get("uuid")) dom.shutdown() if wait: for count in utils_misc.iterate_timeout( 60, "Timed out waiting for server to get stopped."): if self.is_stopped(): break
def unpause(self, wait=False): dom = self.conn.lookupByUUIDString(self.data.get("uuid")) dom.resume() if wait: for count in utils_misc.iterate_timeout( 60, "Timed out waiting for server to get unpaused."): if self.is_started(): break
def delete_cloud_disk(self, disk_id, wait=False): logging.info("Delete an EVS disk") self.ecs.delete_evs(disk_id) if wait: for count in utils_misc.iterate_timeout( 60, "Timed out waiting for cloud disk to be deleted"): res = self.query_cloud_disks(disk_id=disk_id.encode("ascii")) if len(res) == 0: break
def delete_cloud_disk(self, disk_id, wait=False): logging.info("Delete a cloud disk") self.ecs.delete_disk(disk_id) if wait: for count in utils_misc.iterate_timeout( 180, "Timed out waiting for cloud disk to be deleted", wait=5): res = self.query_cloud_disks(disk_id=disk_id) if res is None: break
def delete(self, wait=False): dom = self.conn.lookupByUUIDString(self.data.get("uuid")) if not self.is_stopped(): dom.destroy() dom.undefine() if wait: for count in utils_misc.iterate_timeout( 60, "Timed out waiting for server to get deleted."): if not self.exists(): break
def detach_cloud_disks(self, disk_id, wait=False, scsi=False): logging.info("Detach an EVS disk from ECS") self.ecs.detach_volume(self.data.get('id'), disk_id) if wait: time.sleep(5) for count in utils_misc.iterate_timeout( 60, "Timed out waiting for cloud disk to be detached"): res = self.query_cloud_disks(disk_id=disk_id, scsi=scsi) status = res[0].get("status") if status == u"available": break
def delete_cloud_disk(self, disk_id, wait=False): """Delete specified cloud disk.""" logging.info("Delete a cloud disk") disk_id = disk_id.encode('ascii') self.ecs.delete_disk(disk_id) if wait: for count in utils_misc.iterate_timeout( 300, "Timed out waiting for cloud disk to be deleted", wait=5): res = self.query_cloud_disks(disk_id=disk_id) if res == []: break
def create_cloud_disk(self, wait=False, **args): logging.info("Create cloud disk") output = self.ecs.create_disk() diskid = output.get("DiskId").encode("ascii") if wait: for count in utils_misc.iterate_timeout( 300, "Timed out waiting for cloud disk to be created.", wait=5): if self.query_cloud_disks( disk_id=diskid)[0].get("Status") == u'Available': break return output
def detach_cloud_disks(self, disk_id=None, wait=False, **args): logging.info("Detach cloud disk to VM") disk_id = disk_id.encode("ascii") output = self.ecs.detach_disk(self.id, disk_id) if wait: for count in utils_misc.iterate_timeout( 300, "Timed out waiting for cloud disk to be detached.", wait=5): if self.query_cloud_disks( disk_id=disk_id)[0].get("Status") == u"Available": break return output
def delete(self, wait=False): f_ip = self.floating_ip if f_ip and self.floating_network_id != '': f_ip_id = self.conn.network.find_ip(f_ip) self.conn.network.delete_ip(f_ip_id) self.conn.compute.delete_server(self.data.id) if wait: for count in utils_misc.iterate_timeout( 60, "Timed out waiting for server to get deleted."): if not self.exists(): break
def delete(self, wait=True): cmd = 'az vm delete --name "{}" --resource-group "{}" --yes'.format( self.vm_name, self.resource_group) if not wait: cmd += " --no-wait" command(cmd) # Sometimes VM still exists for a while after cli finished if wait: error_message = "Timed out waiting for server to get deleted." for count in utils_misc.iterate_timeout(100, error_message, wait=10): if not self.exists(): break
def stop(self, wait=True): cmd = self.basecli + '-stop "{}"'.format(self.vm_name) ret = command(cmd) # waiting for VM is stopped if wait: error_message = "Timed out waiting for server to be stopped." for count in utils_misc.iterate_timeout(100, error_message, wait=10): self.show() if self.is_stopped(): return True # not active or not wait, just return the VM info if len(ret.stdout): return self.show()
def delete(self, wait=False): """ This helps to delete a VM The VM can be deleted only if the status is stopped(sdk/cli only) """ logging.info("Delete VM") if not self.is_stopped(): self.stop(wait=True) self.ecs.delete_instance(self.id) if wait: for count in utils_misc.iterate_timeout( 300, "Timed out waiting for server to get deleted.", wait=10): if not self.exists(): break
def delete_nics(self, nic_name='default', wait=False): """Delete the specified NICs by the name.""" logging.debug("Delete NICs (Name: {0})".format(nic_name)) nics = self.ecs.describe_nics(nic_name=nic_name).get( "NetworkInterfaceSets").get("NetworkInterfaceSet") for nic in nics: self.delete_nic(nic['NetworkInterfaceId']) if wait: for count in utils_misc.iterate_timeout( 300, "Timed out waiting for nics to be deleted.", wait=1): remaining = len( self.ecs.describe_nics(nic_name=nic_name).get( "NetworkInterfaceSets").get("NetworkInterfaceSet")) logging.debug( 'Remaining {0} NIC(s) to be deleted.'.format(remaining)) if remaining == 0: break
def wait_for_status(self, status, timeout=300): error_message = "Timed out waiting for server to get %s." % status for count in utils_misc.iterate_timeout(timeout, error_message, wait=20): current_status = self._get_status() logging.debug('Target: {0}, Current: {1}'.format( status, current_status)) if current_status == status: break # Exceptions (detect wrong status to save time) if status == 'Running' and current_status not in ('Stopping', 'Starting'): raise Exception('While waiting for the server to get Running, \ its status cannot be {0} rather than Stopping or Starting.'.format( current_status))
def delete(self, wait=True): name = self.id if self.id is not None else self.vm_name cmd = self.basecli + '-delete "{}"'.format(name) try: command(cmd) except: return False # Sometimes VM still exists for a while after cli finished time.sleep(30) if wait: error_message = "Timed out waiting for server to get deleted." for count in utils_misc.iterate_timeout(100, error_message, wait=10): if not self.exists(): time.sleep( 60) # waiting for other resource (network) release break
def create(self, wait=False): root = ET.fromstring(dom_xml) if self.arch == "x86_64": root.find("os").find("type").set("arch", self.arch) root.find("os").find("type").set("machine", "pc") elif self.arch == "ppc64le": root.find("os").find("type").set("arch", self.arch) root.find("os").find("type").set("machine", "pseries") elif self.arch == "s390x": root.find("os").find("type").set("arch", self.arch) root.find("os").find("type").set("machine", "s390-ccw-virtio") elif self.arch == "aarch64": root.find("os").find("type").set("arch", self.arch) root.find("os").find("type").set("machine", "virt") sub_cpu = ET.fromstring( '<cpu mode="host-passthrough"><model fallback="allow" /></cpu>' ) root.insert(3, sub_cpu) sub_loader = ET.fromstring('<loader readonly="yes" type="pflash">\ /usr/share/AAVMF/AAVMF_CODE.verbose.fd</loader>') root.find("os").insert(0, sub_loader) sub_nvram = ET.fromstring( '<nvram>/usr/share/AAVMF/AAVMF_VARS.fd</nvram>') root.find("os").insert(0, sub_nvram) root.find("devices").find("rng").find( "backend").text = "/dev/urandom" else: root.find("os").find("type").set("arch", self.arch) root.find("os").find("type").set("machine", "pc") root.find("name").text = self.vm_name root.find("vcpu").text = str(self.vcpus) root.find("memory").text = str(self.memory * 1024 * 1024) root.find("currentMemory").text = str(self.memory * 1024 * 1024) root.find("devices").find("disk").find("source").set( "file", "/var/lib/libvirt/images/" + self.image_name) xmlconfig = ET.tostring(root).decode() dom = self.conn.defineXML(xmlconfig) dom.create() if wait: for count in utils_misc.iterate_timeout( 60, "Timed out waiting for server to get Created."): if self.exists() and self.floating_ip: break self._data = None
def create(self, wait=True): cmd = self.basecli + '-create "{}" --image "{}" --json'\ .format(self.vm_name, self.image) if self.memory: cmd += ' --memory {}'.format(self.memory) if self.network: cmd += ' --network {}'.format(self.network) if self.ssh_key_name: cmd += ' --key-name {}'.format(self.ssh_key_name) # if self.processors: # cmd += ' --processors "{}"'.format(self.processors) if self.processor_type: cmd += ' --processor-type "{}"'.format(self.processor_type) # if self.volumes: # cmd += ' --volumes {}'.format(self.volumes) # if self.sys_type: # cmd += ' --sys-type "{}"'.format(self.sys_type) # if self.storage_type: # cmd += ' --storage-type {}'.format(self.storage_type) try: ret = command(cmd) except: return False # waiting for VM is active if len(ret.stdout): info = json.loads(ret.stdout) self.properties = info[0] self.id = self.properties.get("pvmInstanceID") if wait: error_message = "Timed out waiting for server to be active." time.sleep(60) for count in utils_misc.iterate_timeout(100, error_message, wait=10): if self.show(): if self.is_started(): return True else: return self.show() # not active or not wait, just return the VM info return False
def attach_nics(self, nic_count, wait=False): logging.debug("Attach %s NICs to ECS" % nic_count) origin_count = len(self.query_nics()) nics_list = self.list_nics() if len(nics_list) >= nic_count: for nic in nics_list[0:nic_count]: self.ecs.attach_nic(self.id, nic.get("NetworkInterfaceId")) else: raise Exception("No enough NICs. Need: %s; Exists: %s" % (nic_count, len(nics_list))) if wait: for count in utils_misc.iterate_timeout( 300, "Timed out waiting for nics to be attached.", wait=20): attached_count = len(self.query_nics()) - origin_count logging.debug("Attached: {0} / Wanted: {1}".format( attached_count, nic_count)) if attached_count >= nic_count: break
def capture(self, wait=True): capture_image_name = self.vm_name + "_capture_image" cmd = self.basecli + '-capture "{}" --destination image-catalog --name "{}" --json'\ .format(self.vm_name, capture_image_name) # if self.volumes: # cmd += ' --volumes {}'.format(self.volumes) ret = command(cmd) capture_image = BootImage(self.params, name=capture_image_name) # waiting for captured image is active if wait: error_message = "Timed out waiting for image to be active." for count in utils_misc.iterate_timeout(100, error_message, wait=10): capture_image.show() if capture_image.is_active(): return True # not active or not wait, just return the captured image info if len(ret.stdout): return capture_image.show()
def wait_for_status(self, job_id, timeout, error_message, endpoint='ecs'): for count in utils_misc.iterate_timeout(timeout, error_message): res = self.ecs.query_task_status(job_id, endpoint=endpoint) if res['status'] == 'SUCCESS': break