def attach_vif_to_vswitch(self, inst_name, eth_index, vswitch=None, domain=None, update_flag=True): """ Attach the guest interface to a vSwitch inst_name: Guest VM name eth_index: Network interface number in guest, e.g. eth<eth_index> """ if self.is_instance_running(inst_name): raise StandardError('VM %s cannot be configured when running' % inst_name) if vswitch is None: log.error("vSwitch must be specified") return False if not isinstance(eth_index, int): log.error("Param <eth_index> must be an int type") return False dom_hdl = self._get_domain_handler(inst_name) try: dom_xml = dom_hdl.XMLDesc() except libvirtError, err: log.debug(str(err)) log.error("Failed to fetch the defined XML for %s", inst_name) return False
def __init__(self, user="******", passwd="admin"): super(HostDbDriver, self).__init__(user, passwd) self.db_host = DB_HOST self.login_url = LOGIN_URL self.logout_url = LOGOUT_URL self.url = None login_data = {'username': self.user, 'password': self.passwd} try: self.session = requests.Session() login_res = self.session.post(self.login_url, data=login_data) res_content = json.loads(login_res.content) if res_content[ 'status'] == 1: # the success check depend on the login html log.debug("Login url [%s] check with username [%s] success.", self.db_host, self.user) else: log.error("Login url [%s] check with username [%s] failed.", self.db_host, self.user) self.session = None except requests.exceptions.ConnectionError as connerror: log.exception("Connection exception: %s", connerror) self.session = None except Exception as error: log.exception("Exception when init session: %s", error) self.session = None
def power_on_vm(self, inst_name): """ @summary: power on vm with name label inst_name """ log.debug("Start power on VM [%s].", inst_name) if self.is_instance_running(inst_name): log.info("VM [%s] is already running.", inst_name) return True handler = self.get_handler() if handler is not None: vm_ref = handler.xenapi.VM.get_by_name_label(inst_name)[0] vm_state = handler.xenapi.VM.get_record(vm_ref)['power_state'] try: if vm_state == "Suspended": handler.xenapi.VM.resume( vm_ref, False, True) # start_paused = False; force = True elif vm_state == "Paused": handler.xenapi.VM.unpause(vm_ref) else: # vm_state == "Halted" handler.xenapi.VM.start(vm_ref, False, True) time.sleep(1) except Exception, error: log.error("Raise exception:'%s' while power on vm:%s", error, inst_name) return False
def get_all_disk(self, inst_name): """ {'0': {'disk_size': 20.0, 'device_name': 'xvda'}, '3': {'disk_size': 0, 'device_name': 'xvdd'}} :param inst_name: :return: return a dict with infor about all the virtual disk number, eg, 1,2, etc and its name in guest, eg:vda """ disk_list = self.__get_disk_elements_list(inst_name=inst_name) all_disk_info = {} for disk_num, disk_elment in enumerate(disk_list): device_name = disk_elment.find('target').get('dev') file_path = disk_elment.find('source').get('file') log.debug("Disks on domain [%s]: disk path: %s", inst_name, file_path) try: volume_info = self._hypervisor_handler.storageVolLookupByPath(file_path).info() except libvirtError as error: log.warn("Exception raise in get all disk when look up storage vol by path: %s", error) continue disk_dize = volume_info[1] / 1024.0 / 1024.0 / 1024.0 disk_free = (volume_info[1] - volume_info[2]) / 1024.0 / 1024.0 / 1024.0 disk_free = float("%.3f" % disk_free) all_disk_info[disk_num] = {'disk_size': disk_dize, 'device_name': device_name, 'disk_free': disk_free} return all_disk_info
def config_max_memory(self, inst_name, static_max=None, dynamic_max=None): """ Memory limits must satisfy: static_min <= dynamic_min <= dynamic_max <= static_max :param inst_name: :param static_max: :param dynamic_max: :return: """ log.debug( "config max memory in VM [%s]: static max:%s, dynamic max:%s", inst_name, static_max, dynamic_max) if static_max: log.info("Start to config the static max memory to VM [%s]", inst_name) ret = self.virt_driver.set_vm_static_memory(inst_name, memory_max=static_max) if not ret: return False if dynamic_max: log.info("Start to config the dynamic max memory to VM [%s]", inst_name) ret = self.virt_driver.set_vm_dynamic_memory( inst_name, memory_max=dynamic_max) if not ret: return False return True
def delete_instance(self, inst_name): ''' undefine:If the domain is running, it's converted to transient domain, without stopping it. If the domain is inactive, the domain configuration is removed. ''' domain = self._get_domain_handler(inst_name) if not domain: return True if domain.isActive(): domain.destroy( ) # It will shutdown the domain force, if it is already shutdown, libvirtError will raise try: ret = domain.undefine() if ret == 0: target_disk = VM_HOUSE + inst_name + ".qcow2" cmd = "rm -f %s" % target_disk log.debug("remove the disk file for %s: %s", inst_name, cmd) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) _, perr = p.communicate() if perr: log.error("Deleting the disk for vm %s meet an error:%s", inst_name, perr) return False return ret == 0 except Exception, error: log.exception(error) return False
def _get_root_handler(self): """ Return the root handler of libvirt """ if self._hypervisor_root_handler: return self._hypervisor_root_handler if self.hostname is None: hostname = "localhost" else: hostname = self.hostname url = "{0}{1}{2}".format('qemu+tls://', hostname, '/system') old = signal.signal(signal.SIGALRM, self.timeout_handler) signal.alarm(4) # connetctions timeout set to 4 secs try: self._hypervisor_root_handler = libvirt.openAuth(url, self._auth, 0) except Exception as error: log.debug("Can not connect to %s, error: %s. Retrying...", url, error) url = "{0}{1}{2}".format('qemu+tcp://', hostname, '/system') signal.alarm(4) try: self._hypervisor_root_handler = libvirt.openAuth(url, self._auth, 0) except Exception as error: log.error("Can not connect to url:%s, error: %s", url, error) return None finally: signal.alarm(0) signal.signal(signal.SIGALRM, old) if self._hypervisor_root_handler: return self._hypervisor_root_handler return None
def get_host_plat_info(self): """ Return HV platform info This needs root permission to do """ ret_plat_dict = {} hv_root_hdl = self._get_root_handler() try: sys_info_xml = hv_root_hdl.getSysinfo(0) except libvirtError as e: log.debug(str(e)) log.warn("Could not get platform/system info") return ret_plat_dict finally: self._delete_root_handler() # Return as XML format sys_node = xmlEtree.ElementTree(xmlEtree.fromstring(sys_info_xml)).find('system') for item in sys_node: if item.attrib.get('name') == 'manufacturer': ret_plat_dict['vendor_name'] = item.text if item.attrib.get('name') == 'product': ret_plat_dict['product_name'] = item.text if item.attrib.get('name') == 'serial': ret_plat_dict['serial_number'] = item.text return ret_plat_dict
def get_vif_ip(self, inst_name, vif_index): """ Note: in Xenserver the device index is not same as the eth index in guest internal.eg: 0,3 will be eth0, eth1 :param inst_name: :param vif_index: device index, not the eth index in guest internal :return: None or IP """ if self._hypervisor_handler is None: self._hypervisor_handler = self.get_handler() try: vm_ref = self._hypervisor_handler.xenapi.VM.get_by_name_label(inst_name)[0] except IndexError: log.error("No VM with name [%s].", inst_name) return None try: all_vifs = self.get_all_vifs_indexes(inst_name) guest_metrics_ref = self._hypervisor_handler.xenapi.VM.get_guest_metrics(vm_ref) network_dict = self._hypervisor_handler.xenapi.VM_guest_metrics.get_networks(guest_metrics_ref) except Exception as error: log.debug("Except in get_vif_ip: %s", error) return None if str(vif_index) not in all_vifs: log.error("Vif index does not exist.") return None # by test, if there are vif 0,1,3, and each device with a ip, then the dic are about "2/ip: 192.168.1.122; 1/ip: 192.168.1.200; 0/ip: 10.143.248.253;" # if there are vif 0,1,2,3, with ip attached to 0,1,3, then the dic looks like "3/ip: 192.168.1.122;1/ip: 192.168.1.200; 0/ip: 10.143.248.253;" # but if remove the device 2, before reboot, the ip keep same, but after reboot, the device index are keep 0,1,3, but ip change to "2/ip:, 1/ip:,0/ip:" # assume the vif device are added in sequence, and device index keep same as eth index in vm, so use the device_index to get ip return network_dict.get(str(vif_index)+"/ip", None)
def get_handler(self): ''' return the handler of the virt_driver ''' if self._hypervisor_handler is not None: return self._hypervisor_handler if self.hostname is None: self._hypervisor_handler = XenAPI.xapi_local() #no __nonzero__, can not use if/not for bool test else: log.debug("connecting to %s with user:%s,passwd:%s", "http://" + str(self.hostname), self.user, self.passwd) self._hypervisor_handler = XenAPI.Session("http://" + str(self.hostname)) old = signal.signal(signal.SIGALRM, self.timeout_handler) signal.alarm(4) # connetctions timeout set to 5 secs try: self._hypervisor_handler.xenapi.login_with_password(self.user, self.passwd, API_VERSION_1_1, 'XenVirtDriver') except Exception as error: log.warn("Exception raised: %s when get handler.", error) log.info("Retry connecting to :%s", "https://" + str(self.hostname)) self._hypervisor_handler = XenAPI.Session("https://" + str(self.hostname)) signal.alarm(4) try: self._hypervisor_handler.xenapi.login_with_password(self.user, self.passwd, API_VERSION_1_1, 'XenVirtDriver') except Exception as errors: log.exception("Exception errors:%s when get handler", errors) return None finally: signal.alarm(0) signal.signal(signal.SIGALRM, old) log.debug("Get handler ID in vnet driver: %s", id(self._hypervisor_handler)) return self._hypervisor_handler
def get_host_cpu_info(self): """ Return HV CPU info """ ret_cpu_dict = {} hv_handler = self.get_handler() try: # https://libvirt.org/docs/libvirt-appdev-guide-python/en-US/html/ch03s04s03.html hv_info = hv_handler.getInfo() except libvirtError as e: log.debug(str(e)) log.warn("Could not get CPU info") return ret_cpu_dict ret_cpu_dict['cpu_model'] = str(hv_info[0]) ret_cpu_dict['cpu_cores'] = hv_info[2] # return MHz ret_cpu_dict['cpu_speed'] = int(hv_info[3]) # number of NUMA nodes * number of sockets per node ret_cpu_dict['cpu_sockets'] = int(hv_info[4]) * int(hv_info[5]) ret_cpu_dict['cores_per_socket'] = int(hv_info[6]) # number of threads per core ret_cpu_dict['thread_per_core'] = int(hv_info[7]) return ret_cpu_dict
def find_default_server(hosts, role, config_dict): """ :param hosts: server's ip list :return: a default server's Ip """ server_infors = get_server_infors(hosts) # each item is tuple (server_ip, [logicFreeNoOverCommit, physic-free-mem, logic-free-mem, physic-free-disk, logic-free-disk]) sorted_servers = sorted(server_infors.iteritems(), key=lambda (k, v): operator.itemgetter(0, 1, 2)(v), reverse=True) for item in sorted_servers: log.debug("%s", item) for ip, info in sorted_servers: # find a server's physical memory at least has 10GB free memory to start a new vm, # and logic free memory with over commit fit the logic memory size in new vm, # and the disk pool has at least 100G free if (info[1] > 10) and (info[2] - config_dict[role]['memory'] > 0) and (info[3] > 100): default_server = ip break else: log.error("No server is available for the new vm, please confirm it.") return None log.info("Schedual to server: %s", default_server) return default_server
def power_off_vm(self, inst_name): """ @see: void shutdown (session ref session_id, VM ref vm), it will attempts to first clean shutdown a VM and if it should fail then perform a hard shutdown on it. """ log.debug("Start power off vm [%s].", inst_name) if self.is_instance_halted(inst_name): log.info("VM [%s] is already not running.", inst_name) return True handler = self.get_handler() if handler is None: log.error("Can not get handler when try to power off VM [%s].", inst_name) return False vm_ref = handler.xenapi.VM.get_by_name_label(inst_name)[0] try: handler.xenapi.VM.shutdown(vm_ref) time.sleep(0.5) except Exception, error: log.exception("Exception raised: %s when shutdown VM [%s].", error, inst_name) return False
def get_handler(self): ''' return the handler of the virt_driver ''' if self._hypervisor_handler: return self._hypervisor_handler old = signal.signal(signal.SIGALRM, self.timeout_handler) signal.alarm(4) # connetctions timeout set to 4 secs try: if self.hostname is None: url = DEFAULT_HV self._hypervisor_handler = libvirt.open(url) else: url = "{0}{1}{2}".format('qemu+tls://', self.hostname, '/system') self._hypervisor_handler = libvirt.openAuth(url, self._auth, 0) except Exception as error: log.debug("Can not connect to url: %s, error: %s. Retrying...", url, error) signal.alarm(4) try: url = "{0}{1}{2}".format('qemu+tcp://', self.hostname, '/system') self._hypervisor_handler = libvirt.openAuth(url, self._auth, 0) except Exception as error: log.error("Can not connect to url: %s, error: %s ", url, error) return None finally: signal.alarm(0) signal.signal(signal.SIGALRM, old) if not self._hypervisor_handler: return None return self._hypervisor_handler
def create_new_vif(self, inst_name, vif_index, device_name=None, network=None, bridge=None, MAC=None): """ @param inst_name: name of the guest VM @param vif_index: index of interface in guest VM @param device_name: device name on the host which the network belong to @:param network: network name defined by libvirt @:param bridge: bridge name, may be linux bridge or openvswitch bridge @return: a reference to virtual interface in guest VM to change the MTU, please set it in other-config:mtu=9000 """ record = { 'MAC': '', 'MAC_autogenerated': True, 'MTU': '0', 'other_config': {}, 'qos_algorithm_params': {}, 'qos_algorithm_type': '' } handler = self.get_handler() vm_ref_list = handler.xenapi.VM.get_by_name_label(inst_name) if not vm_ref_list: log.error("No instance with name [%s].", inst_name) return None network_ref = None if device_name is not None: network_ref = self._get_network_ref_by_device(device_name) elif network is not None: network_ref = self._get_network_ref_by_bridge(bridge_name=network) else: network_ref = self._get_network_ref_by_bridge(bridge_name=bridge) if network_ref is None: log.error("No valid network found with params: NIC:%s, bridge:%s.", device_name, network) return None allows_index = handler.xenapi.VM.get_allowed_VIF_devices( vm_ref_list[0]) if str(vif_index) not in allows_index: log.error( "Virtual interface device [%s] is not allowed, allowed:%s", vif_index, allows_index) return None record['VM'] = vm_ref_list[0] record['network'] = network_ref record['device'] = str(vif_index) if MAC is not None: #if MAC given, MAC_autogenerated will be False record['MAC'] = MAC log.debug("create new vif with record:%s", str(record)) new_vif = handler.xenapi.VIF.create(record) return new_vif
def __del__(self): try: if self._hypervisor_handler is not None: log.debug("Release handler in vnet driver, ID:%s", id(self._hypervisor_handler)) self._hypervisor_handler.xenapi.session.logout() self._hypervisor_handler = None except Exception as error: log.debug(error)
def create_new_vif(self, inst_name, vif_index, device_name=None, network=None, ip=None): """ create a new virtual interface on the target VM @param inst_name: Vm name @param device_name: vswitch (with the host-device attached) which the vif attach to @param vif_index: vif index @param network: bridge name @param ip """ log.info( "Start to add a new virtual interface device with index:[%s] to VM [%s]", vif_index, inst_name) if ip: mac_strs = ['%02x' % int(num) for num in ip.split(".")] mac_addr = VM_MAC_PREFIX + ":%s:%s:%s:%s" % tuple(mac_strs) else: mac_addr = None log.debug("Create VIF [%s] with IP: %s, MAC: %s.", vif_index, ip, mac_addr) new_vif = self.vnet_driver.create_new_vif(inst_name, vif_index, device_name, network, MAC=mac_addr) if new_vif is not None: self.update_ip_infor_to_database(inst_name, vif_index=vif_index, ip=ip) if self.virt_driver.is_instance_running(inst_name): ret = self.vnet_driver.plug_vif_to_vm(inst_name, vif_index) if ret: log.info( "New virtual interface device [%s] attached to VM [%s] successfully.", vif_index, inst_name) return True else: log.error( "New virtual interface device attached failed to VM [%s].", inst_name) return False else: log.info( "New virtual interface device created successfully, but didn't plugin as VM is power off." ) return True log.error("Can not create new virtual interface device [%s].", vif_index) return False
def create_instance(self, vm_name, reference_vm): ''' ''' log.debug("enter create_instance %s", vm_name) # copy the disk first target_disk = ''.join( (VM_HOUSE, vm_name, ".qcow2")) # qcow2 is recommand reference_disk = ''.join((VM_HOUSE, reference_vm, ".qcow2")) cmd = "\cp -f %s %s" % (reference_disk, target_disk) log.debug("%s", cmd) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) _, perr = p.communicate() if perr: log.error("Create domain %s meet an error when copy disk: %s", vm_name, str(perr)) return False # change the xml target_xml = TEMPLATE_CFG_POOL + vm_name + ".xml" reference_xml = "".join((VM_HOUSE, reference_vm, ".xml")) cmd = "cp %s %s && sed -i 's/%s/%s/g' %s" % ( reference_xml, target_xml, reference_vm, vm_name, target_xml) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) _, perr = p.communicate() if perr: log.error("Create domain %s meet an error when change xml:%s", vm_name, str(perr)) return False hv_handler = self.get_handler() if not hv_handler: log.error("Can not connect to host: %s when create domain %s.", self.hostname, vm_name) return False if vm_name in [dom.name() for dom in hv_handler.listAllDomains()]: log.info("Vm %s is registered.", vm_name) return True with open(target_xml) as xml_file: xml = xml_file.read() try: # if failed it will raise libvirtError, return value is always a Domain object new_dom = hv_handler.defineXML(xml) except libvirtError: log.error("Create domain %s failed when define by xml.", vm_name) return False new_dom.setAutostart(1) return True
def is_IP_available(self, vif_ip=None, vif_netmask=None, device=None, network=None, bridge=None): """ check if a IP and Netmask usable """ # No ip , don't need to check if not vif_ip: return True dest_metmask = "" dest_gateway = None if device is not None: try: device_info = self.vnet_driver.get_device_infor( device_name=device) dest_metmask = device_info["netmask"] dest_gateway = device_info['gateway'] except KeyError as error: log.exception(str(error)) elif network is not None or bridge is not None: # TODO: need to add API to get network infor accroding to network or bridge pass if vif_netmask: if dest_metmask and dest_metmask != vif_netmask: log.error( "Netmask [%s] is not corresponding with the target network.", vif_netmask) return False else: # get the netmask on device as the default one vif_netmask = dest_metmask log.debug("VIF IP is: %s, netmask is: %s", vif_ip, vif_netmask) if not vif_netmask: # No default netmask and no given log.warn("No netmask given, the default one is '255.255.255.0'.") else: vif_gateway = dest_gateway if dest_gateway else None if not IpCheck.is_valid_ipv4_parameter( vif_ip, vif_netmask, gateway=vif_gateway): return False # First check it from database if self.check_ip_used(vif_ip): log.error("Ip address [%s] already in used.(Check from database).", vif_ip) return False # This ping test take a second, put it at last. if is_IP_pingable(vif_ip): log.error("Ipaddress [%s] is already be used(Ping test).", vif_ip) return False return True
def __is_kvm_available(self, xmlfile): """ :param xmlfile: :return: return True is kvm is supported, else False """ etree = xmlEtree.fromstring(xmlfile) if etree.find("guest/arch/domain/[@type='kvm']") is not None: log.debug("host capabilities: type=kvm found, host support KVM") return True else: return False
def set_mac_address(self, inst_name, eth_index, new_mac): """ <mac address='52:54:00:68:43:c2'/> """ domain = self._get_domain_handler(domain_name=inst_name) if not domain: log.error("Domain %s doesn't exist, set mac failed.", inst_name) return False if domain.isActive(): log.warn("New MAC will take effect after domain reboot.") vif_list = self._get_dom_interfaces_elements_list(inst_name) try: interface = vif_list[eth_index] mac_element = interface.find("mac") old_mac = mac_element.get("address") except IndexError: log.exception("No interfaces at index [%s] find in domain [%s]", eth_index, inst_name) return False tree = xmlEtree.fromstring(domain.XMLDesc()) mac_list = tree.findall('devices/interface/mac') try: for mac_element in mac_list: if mac_element.get("address") == old_mac: log.debug( "Change old mac [%s] to new [%s] on interface index %s", old_mac, new_mac, eth_index) mac_element.set("address", new_mac) except ValueError as error: log.exception("Exception when set mac: %s on domain: [%s]", error, inst_name) return False domain_xml = xmlEtree.tostring(tree) # after change the xml, redeine it hv_handler = self.get_handler() if not hv_handler: log.error("Can not connect to host: %s when create domain %s.", self.hostname, inst_name) return False try: # if failed it will raise libvirtError, return value is always a Domain object _ = hv_handler.defineXML(domain_xml) except libvirtError: log.error( "Create domain %s failed when define by xml after set MAC.", inst_name) return False return True
def delete_handler(self): ''' release the session ''' try: if self._hypervisor_handler is not None: log.debug("Release handler manually in vnet driver, ID:%s", id(self._hypervisor_handler)) self._hypervisor_handler.xenapi.session.logout() self._hypervisor_handler = None except Exception as error: log.debug(error)
def __init__(self, hostname=None, user=None, passwd=None): VirtDriver.__init__(self, hostname, user, passwd) self._auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], self._request_cred, None] # conn = libvirt.open(name) need root username # conn = libvirt.openReadOnly(name) has not write promission # self._hypervisor_root_handler = libvirt.openAuth("{0}{1}{2}".format('qemu+tcp://', self.hostname, '/system'), self._auth, 0) self._hypervisor_root_handler = None log.debug("Try to connect to libvirt in host: %s", self.hostname) self._hypervisor_handler = self.get_handler()
def get_host_sw_ver(self, short_name=True): """ Return the HV SW version """ hv_handler = self.get_handler() try: hv_ver = hv_handler.getVersion() lib_ver = hv_handler.getLibVersion() except libvirtError, e: log.debug(str(e)) log.warn("Could not get HV version") return None
def detach_disk_from_domain(self, inst_name, target_volume=None, force=False): """ deactivate volume from pool, if force is True, physically remove the volume :param inst_name: :param target_volume: the full path name of disk file :param force: True to physically remove volume :return: True or False """ dom = self._get_domain_handler(domain_name=inst_name) if dom is None: log.error("No domain named %s.", inst_name) return False xmlstr = dom.XMLDesc() tree = xmlEtree.fromstring(xmlstr) device_elment = tree.find("devices") disk_list = device_elment.findall("disk[@device='disk']") ret = None for disk_element in disk_list: source = disk_element.find("source") if source is None: continue else: file_path = source.get("file") if target_volume and file_path != target_volume: continue log.info("Detach device: %s", file_path) if dom.isActive(): ret = dom.detachDeviceFlags( xmlEtree.tostring(disk_element), libvirt.VIR_DOMAIN_AFFECT_LIVE | libvirt.VIR_DOMAIN_AFFECT_CONFIG) else: ret = dom.detachDeviceFlags(xmlEtree.tostring(disk_element)) # remove disk from host if force and ret == 0: log.debug("Physically remove disk: %s", file_path) self._delete_volume_from_pool(file_path) if ret != 0: log.error("Detach disk [%s] from domain [%s] return %s", file_path, inst_name, ret) return False if ret is None: # no disk found in xml config log.error("No volume named %s in domain.", target_volume) return False else: return ret == 0
def update(self, id=None, sn=None, hostname=None, data=None, json_data=None): """ update the data to a record with its primary key is id, id/sn/hostname should not be changed :param id: PK :param sn: UUID for VM, or Serial No. of Physical host :param hostname: VM name or host name for physical host :param data: Dict data :return: True or False """ if id is None and sn is None and hostname is None: log.error("Update to DB need an ID, a hostname or a SN to identify which record will be updated.") return False if data: if not isinstance(data, dict): log.error("Data should be a dict.") return False elif json_data: if not isinstance(json_data, dict): log.error("Json data should be a dict") return False else: return True query_list = self.query(id=id, sn=sn, hostname=hostname) if not query_list: log.error("No record found with ID:%s, hostname:%s, sn:%s", id, hostname, sn) return False modified = query_list[0]['modified'] record_id = query_list[0]['id'] url = self.url + str(record_id) + "/" # update url should be endwith "/" if data: data['modified'] = str(modified) log.debug("Patch url:%s, data: %s", url, data.get('comment', data)) self.resp = self.session.patch(url, data=data) # When dict value is None, pass in data will not set db null elif json_data: json_data['modified'] = str(modified) json_data = json.dumps(json_data) log.debug("Patch url:%s, json data: %s", url, json_data) # when dict value is none, json value is null, pass in json=null will set db null self.resp = self.session.patch(url, json=json_data) if self.resp.status_code == requests.codes.ok: log.info("Update to database successfully.") return True elif self.is_respond_error: log.error("Update failed. Return code: %s, content: %s", self.resp.status_code, self.resp.content) return False return True
def is_instance_halted(self, inst_name): ''' @param inst_name: instance name ''' domain = self._get_domain_handler(inst_name) if not domain: log.debug("%s does not exist", inst_name) return False stats = domain.info() if stats[DOMAIN_INFO_STATE] == libvirt.VIR_DOMAIN_SHUTDOWN: return True return False
def get_host_plat_info(self): """ Return HV platform info This needs root permission to do """ ret_plat_dict = {} hv_root_hdl = self._get_root_handler() try: sys_info_xml = hv_root_hdl.getSysinfo(0) except libvirtError, e: log.debug(str(e)) log.warn("Could not get platform/system info") return ret_plat_dict
def is_instance_running(self, inst_name): ''' whether the instance is running ''' domain = self._get_domain_handler(inst_name) if not domain: log.debug("%s does not exist", inst_name) return False stats = domain.info() if stats[DOMAIN_INFO_STATE] == libvirt.VIR_DOMAIN_RUNNING: return True return False
def _delete_volume_from_pool(self, volume_path): """ :param volume_path: :return: """ try: volobj = self._hypervisor_handler.storageVolLookupByPath(volume_path) volobj.wipe(0) volobj.delete(0) except libvirtError as error: log.debug("Error when delete volume: %s", volume_path) return False return True