def get_handler(self): ''' return the handler of the virt_driver ''' if self._hypervisor_handler is not None: return self._hypervisor_handler if self.hostname is None: self._hypervisor_handler = XenAPI.xapi_local() #no __nonzero__, can not use if/not for bool test else: log.debug("connecting to %s with user:%s,passwd:%s", "http://" + str(self.hostname), self.user, self.passwd) self._hypervisor_handler = XenAPI.Session("http://" + str(self.hostname)) old = signal.signal(signal.SIGALRM, self.timeout_handler) signal.alarm(4) # connetctions timeout set to 5 secs try: self._hypervisor_handler.xenapi.login_with_password(self.user, self.passwd, API_VERSION_1_1, 'XenVirtDriver') except Exception as error: log.warn("Exception raised: %s when get handler.", error) log.info("Retry connecting to :%s", "https://" + str(self.hostname)) self._hypervisor_handler = XenAPI.Session("https://" + str(self.hostname)) signal.alarm(4) try: self._hypervisor_handler.xenapi.login_with_password(self.user, self.passwd, API_VERSION_1_1, 'XenVirtDriver') except Exception as errors: log.exception("Exception errors:%s when get handler", errors) return None finally: signal.alarm(0) signal.signal(signal.SIGALRM, old) log.debug("Get handler ID in vnet driver: %s", id(self._hypervisor_handler)) return self._hypervisor_handler
def config_memory(self, inst_name, static_max=None, static_min=None, dynamic_max=None, dynamic_min=None): """ Memory limits must satisfy: static_min <= dynamic_min <= dynamic_max <= static_max :param inst_name: :param static_max: :param static_min: :param dynamic_max: :param dynamic_min: :return: """ if static_max or static_min: log.info("Start to config the static memory in VM [%s]", inst_name) ret = self.virt_driver.set_vm_static_memory(inst_name, memory_max=static_max, memory_min=static_min) if not ret: return False if dynamic_max or dynamic_min: log.info("Start to config the dynamic memory in VM [%s]", inst_name) ret = self.virt_driver.set_vm_dynamic_memory( inst_name, memory_max=dynamic_max, memory_min=dynamic_min) if not ret: return False # update memory size to cmdb ServerDomain.update_memory_to_database(self) self.update_memory_to_database(inst_name) return True
def add_vdisk_to_vm(self, inst_name, storage_name='Local storage', size=2): """ @param inst_name: the name of VM @param storage_name: which storage repository the virtual disk put @param size: the disk size """ handler = self.get_handler() userdevice = self._get_available_device_num(inst_name) if not userdevice: return False log.info("Start to add virtual disk [%s] to VM: [%s]", userdevice, inst_name) name_description = "VDI created by API, on VM: %s, SR: %s" % ( inst_name, storage_name) record = { "name_label": inst_name + " data " + userdevice, "name_description": name_description } try: sr_ref = handler.xenapi.SR.get_by_name_label(storage_name)[0] except Exception, error: log.exception("No storage named [%s], exception: %s", storage_name, error) return False
def delete_ip_info_from_database(inst_name, vif_index): """ delete the ip in database :param inst_name: :param vif_index: :return: """ log.info("Delete vif [%s] IP information from database.", vif_index) sync_data = {} if vif_index == "0": sync_data["first_ip"] = None elif vif_index == "1": sync_data["second_ip"] = None else: log.info("No IP with vif index [%s] in database, return.", vif_index) return True db_driver = DbFactory.get_db_driver("VirtHost") try: json_data = json.dumps(sync_data) ret = db_driver.update(hostname=inst_name, data=sync_data) except Exception as error: log.warn("Delete ip information raise error: %s", error) ret = False if not ret: log.warn("Delete IP information from database with ret: [%s], data: %s", ret, sync_data) return ret
def power_on_vm(self, inst_name): """ @summary: power on vm with name label inst_name """ log.debug("Start power on VM [%s].", inst_name) if self.is_instance_running(inst_name): log.info("VM [%s] is already running.", inst_name) return True handler = self.get_handler() if handler is not None: vm_ref = handler.xenapi.VM.get_by_name_label(inst_name)[0] vm_state = handler.xenapi.VM.get_record(vm_ref)['power_state'] try: if vm_state == "Suspended": handler.xenapi.VM.resume( vm_ref, False, True) # start_paused = False; force = True elif vm_state == "Paused": handler.xenapi.VM.unpause(vm_ref) else: # vm_state == "Halted" handler.xenapi.VM.start(vm_ref, False, True) time.sleep(1) except Exception, error: log.error("Raise exception:'%s' while power on vm:%s", error, inst_name) return False
def update_ip_infor_to_database(inst_name, vif_index=None, ip=None, host_ip=None): """ As the IP for xenserver'VM is not accessable when it is down, so update it with user's input :param inst_name: :param vif_index: vif index :param ip: the IP on vif :param host_ip: Host server IP :return: """ log.info("Update [%s] IP information [%s, %s] to database.", inst_name, vif_index, ip) sync_data = {} if host_ip: sync_data['vm_host_ip'] = host_ip if vif_index == "0": sync_data["first_ip"] = ip elif vif_index == "1": sync_data["second_ip"] = ip else: log.warn("Database only record the first and second IP for VM.") if not sync_data: return True db_driver = DbFactory.get_db_driver("VirtHost") try: #json_data = json.dumps(sync_data) ret = db_driver.update(hostname=inst_name, data=sync_data) except Exception as error: log.exception("update IP information raise error: %s", error) ret = False if not ret: log.warn("Update IP information to database with ret: [%s], data: %s", ret, sync_data) return ret
def destroy_old_vif(self, inst_name, vif_index): """ Destroy old vif whose index is vif_index @param inst_name: Vm name @param vif_index: vif index """ log.info("Start to delete the old interface device [%s] from VM [%s].", vif_index, inst_name) if not self.vnet_driver.is_vif_exist(inst_name, vif_index): log.info("No old vif with index [%s], don't need to destroy.", vif_index) return True if self.virt_driver.is_instance_running(inst_name): ret = self.vnet_driver.unplug_vif_from_vm(inst_name, vif_index) if not ret: log.error( "Failed to unplug the virtual interface device [%s] from VM.", vif_index) return False ret = self.vnet_driver.destroy_vif(inst_name, vif_index) if not ret: log.error("Failed to destroy the virtual interface device [%s].", vif_index) return False self.delete_ip_info_from_database(inst_name, vif_index) return True
def unplug_vif_from_vm(self, inst_name, vif_index): """ Hot-unplug the specified VIF, dynamically unattaching it from the running VM @param vif_index: virtual interface index @note It should check the power_state before use this API """ if self._hypervisor_handler is None: self._hypervisor_handler = self.get_handler() vif_ref = self._get_vif_by_index(inst_name, vif_index) if vif_ref is None: log.error("No vif found with index [%s] when try to detach vif.", vif_index) return False vm_ref = self._hypervisor_handler.xenapi.VIF.get_VM(vif_ref) power_status = self._hypervisor_handler.xenapi.VM.get_record( vm_ref)['power_state'] allowed_opera = self._hypervisor_handler.xenapi.VIF.get_record( vif_ref)['allowed_operations'] if 'unplug' not in allowed_opera and power_status == 'Running': log.info("VIF [%s] is already unpluged.", vif_index) return True try: self._hypervisor_handler.xenapi.VIF.unplug(vif_ref) except Exception as error: log.exception("Exceptions raised when unplug a VIF:%s", error) return False return True
def plug_vif_to_vm(self, inst_name, vif_index): """ Hotplug the specified VIF, dynamically attaching it to the running VM @param vif_index: virtual interface index """ if self._hypervisor_handler is None: self._hypervisor_handler = self.get_handler() vif_ref = self._get_vif_by_index(inst_name, vif_index) if vif_ref is None: log.error("No vif found with index [%s] when try to attach vif.", vif_index) return False vm_ref = self._hypervisor_handler.xenapi.VIF.get_VM(vif_ref) power_status = self._hypervisor_handler.xenapi.VM.get_record( vm_ref)['power_state'] allowed_opera = self._hypervisor_handler.xenapi.VIF.get_record( vif_ref)['allowed_operations'] if 'plug' not in allowed_opera and power_status == 'Running': log.info("VIF [%s] is already pluged.", vif_index) return True try: self._hypervisor_handler.xenapi.VIF.plug(vif_ref) except Exception as error: log.error("Exception raised when hot-plug a VIF:%s.", error) return False return True
def config_max_memory(self, inst_name, static_max=None, dynamic_max=None): """ Memory limits must satisfy: static_min <= dynamic_min <= dynamic_max <= static_max :param inst_name: :param static_max: :param dynamic_max: :return: """ log.debug( "config max memory in VM [%s]: static max:%s, dynamic max:%s", inst_name, static_max, dynamic_max) if static_max: log.info("Start to config the static max memory to VM [%s]", inst_name) ret = self.virt_driver.set_vm_static_memory(inst_name, memory_max=static_max) if not ret: return False if dynamic_max: log.info("Start to config the dynamic max memory to VM [%s]", inst_name) ret = self.virt_driver.set_vm_dynamic_memory( inst_name, memory_max=dynamic_max) if not ret: return False return True
def set_vm_dynamic_memory(self, inst_name, memory_max=None, memory_min=None): """ set memory for a domain, if it is active, set it lively and the config file, if it is deactive, set the config file :param inst_name: :param memory_max: :param memory_min: :return: """ dom = self._get_domain_handler(domain_name=inst_name) if dom is None: return False gitabyte = 1024 * 1024 # unit is KB if memory_max: memory_size = int(memory_max) * gitabyte elif memory_min: log.info("Don't support min memory set.") return True else: log.error("Neither maxMemory nor minMemory is supplied.") return False try: if dom.isActive(): ret = dom.setMemoryFlags(memory_size, libvirt.VIR_DOMAIN_AFFECT_LIVE | libvirt.VIR_DOMAIN_AFFECT_CONFIG) else: ret = dom.setMemoryFlags(memory_size) # dom.setMemory need dom to be active except libvirtError as error: log.exception("Exception: %s", error) return False return ret == 0
def clone_disk(self, source_file_path, target_disk_name): """ :param source_file_path: :param target_disk_name: :return: """ vol = self._hypervisor_handler.storageVolLookupByPath(source_file_path) format_element = xmlEtree.fromstring( vol.XMLDesc(0)).find("target/format") vol_format = format_element.get('type') vol_clone_xml = """ <volume> <name>%s</name> <capacity>0</capacity> <allocation>0</allocation> <target> <format type='%s'/> <permissions> <owner>107</owner> <group>107</group> <mode>0644</mode> <label>virt_image_t</label> </permissions> </target> </volume>""" % (target_disk_name, vol_format) log.info("Clone from %s to %s", source_file_path, target_disk_name) pool = vol.storagePoolLookupByVolume() new_vol = pool.createXMLFrom( vol_clone_xml, vol, 0) # only (name, perms) are passed for a new volume return new_vol
def find_default_server(hosts, role, config_dict): """ :param hosts: server's ip list :return: a default server's Ip """ server_infors = get_server_infors(hosts) # each item is tuple (server_ip, [logicFreeNoOverCommit, physic-free-mem, logic-free-mem, physic-free-disk, logic-free-disk]) sorted_servers = sorted(server_infors.iteritems(), key=lambda (k, v): operator.itemgetter(0, 1, 2)(v), reverse=True) for item in sorted_servers: log.debug("%s", item) for ip, info in sorted_servers: # find a server's physical memory at least has 10GB free memory to start a new vm, # and logic free memory with over commit fit the logic memory size in new vm, # and the disk pool has at least 100G free if (info[1] > 10) and (info[2] - config_dict[role]['memory'] > 0) and (info[3] > 100): default_server = ip break else: log.error("No server is available for the new vm, please confirm it.") return None log.info("Schedual to server: %s", default_server) return default_server
def config_vcpus(self, inst_name, vcpu_nums=None, vcpu_max=None): """ :param inst_name: VM name :param vcpu_nums: the current vcpu number :param vcpu_max: the max vcpu number :return: """ if not vcpu_max and not vcpu_nums: return True log.info("Start to configure the VCPU in VM [%s].", inst_name) if vcpu_nums and self.virt_driver.is_instance_running( inst_name=inst_name): ret = self.virt_driver.set_vm_vcpu_live(inst_name=inst_name, vcpu_num=vcpu_nums) elif vcpu_max and self.virt_driver.is_instance_halted( inst_name=inst_name): ret = self.virt_driver.set_vm_vcpu_max(inst_name=inst_name, vcpu_num=vcpu_max) else: log.error( "Only support set live cpu on a running VM or set max cpu number on a halted VM." ) return False # set vcpu max will change the start up vcpu when max < live cpu number if ret: # Don't need to check db sync ret, because there is crontab to sync it self.update_database_info(inst_name=inst_name) return ret
def set_vm_static_memory(self, inst_name, memory_max=None, memory_min=None): """ set memory for a inactive domain :param inst_name: :param memory_max: size of GB :param memory_min: size of GB :return: """ # dom.setMaxMemory() need dom to be inactive dom = self._get_domain_handler(domain_name=inst_name) if dom is None: return False if dom.isActive(): log.error("Set domain max memory need it to be stopped.") return False gitabyte = 1024 * 1024 # unit is KB if memory_max: memory_size = int(memory_max) * gitabyte elif memory_min: log.info("Don't support min memory set.") return True else: log.error("Neither maxMemory nor minMemory is supplied.") return False # dom.setMemoryFlags(memory_size, libvirt.VIR_DOMAIN_AFFECT_CURRENT|libvirt.VIR_DOMAIN_MEM_MAXIMUM) also OK try: ret = dom.setMaxMemory(memory_size) except libvirtError as error: log.exception("Exception: %s", error) return False return ret == 0
def config_vif(self, inst_name, vif_index, device_name=None, network=None, bridge=None, ip=None): """ configure a vif: first destroy old vif and then create a new vif @param inst_name: Vm name @param device_name: the host-device name with a bridge/switch bonded which the vif attach to @param vif_index: vif index @param network: bridge name which the vif will connect to @param ip: """ log.info("Start to configure the interface device [%s] in VM [%s].", vif_index, inst_name) if not self.destroy_old_vif(inst_name, vif_index): return False ret = self.create_new_vif(inst_name, vif_index, device_name, network, bridge, ip) return ret
def test_d_delete(self): log.info("Test test_d_delete") self.assertTrue( self.virthost.update(sn=self.testdata['sn'], data={'hostname': self.testdata['hostname']})) data = self.virthost.query(sn=self.testdata['sn'], hostname=self.testdata['hostname']) self.assertNotEqual(data, [], "should not be []") pk = data[0]['id'] self.assertFalse(self.virthost.delete(), "Delete without params should not successfully") self.assertTrue(self.virthost.delete(id=pk), "should delete successfully") self.assertListEqual(self.virthost.query(id=pk), [], "query with new pk after delete should be []") self.assertEqual(self.virthost.respond_data_count, 0, "after delete record should be 0") self.assertListEqual( self.virthost.query(sn=self.testdata['sn'], hostname=self.testdata['hostname']), [], "query should be []") self.assertEqual(self.virthost.query(sn=self.testdata['sn']), [], "query with sn should be []") self.assertEqual( self.virthost.query(hostname=self.testdata['hostname']), [], "query with hostname should be []")
def delete_instance(self, inst_name, delete_disk=False): ''' undefine:If the domain is running, it's converted to transient domain, without stopping it. If the domain is inactive, the domain configuration is removed. ''' domain = self._get_domain_handler(inst_name) if not domain: return True if domain.isActive(): log.info("Try to power off vm [%s] gracefully.", inst_name) ret = domain.destroyFlags(flags=libvirt.VIR_DOMAIN_DESTROY_GRACEFUL) if ret != 0: log.info("Power off failed, try to poweroff forcely.") domain.destroy() # It will shutdown the domain force, if it is already shutdown, libvirtError will raise try: self.detach_disk_from_domain(inst_name, force=delete_disk) except libvirtError: pass try: ret = domain.undefine() # may use undefineFlags to handler managed save image or snapshots except libvirtError as error: log.exception("Exception raise when undefine domain [%s]: %s.", inst_name, error) return False return ret == 0
def power_off_vm(self, inst_name): """ @see: void shutdown (session ref session_id, VM ref vm), it will attempts to first clean shutdown a VM and if it should fail then perform a hard shutdown on it. """ log.debug("Start power off vm [%s].", inst_name) if self.is_instance_halted(inst_name): log.info("VM [%s] is already not running.", inst_name) return True handler = self.get_handler() if handler is None: log.error("Can not get handler when try to power off VM [%s].", inst_name) return False vm_ref = handler.xenapi.VM.get_by_name_label(inst_name)[0] try: handler.xenapi.VM.shutdown(vm_ref) time.sleep(0.5) except Exception, error: log.exception("Exception raised: %s when shutdown VM [%s].", error, inst_name) return False
def print_all_vms(self): """ :return: """ log.info("All VMs in server: %s", self.server_name) vms = self.virt_driver.get_vm_list() log.info(vms) return True
def create_instance(self, vm_name, reference_vm, storage_pool=None): """ :param vm_name: new vm name :param reference_vm: template name :param storage_pool: pool name in which the new vm disk will be put :return: """ log.info("enter create_instance %s", vm_name) if self.is_instance_exists(vm_name): log.error("Already exist domain: %s", vm_name) return False hv_handler = self.get_handler() template_dom = self._get_domain_handler(domain_name=reference_vm) template_xml = template_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE) tree = xmlEtree.fromstring(template_xml) name = tree.find("name") name.text = vm_name uuid = tree.find('uuid') tree.remove(uuid) pool_path = None if storage_pool: pool_path = self.get_target_path_via_pool(storage_pool) # remove MAC for interface for interface in tree.findall("devices/interface"): elm = interface.find("mac") interface.remove(elm) # clone disk for new domain for disk in tree.findall("devices/disk[@device='disk']"): source_elm = disk.find('source') source_file = source_elm.get('file') if source_file: suffix = str(os.path.basename(source_file)).split(".")[-1] target_file = ".".join([self._get_available_vdisk_name(vm_name), suffix]) # set clone path in base path of storage_pool, so that clone disk locate that path if pool_path is not None: clone_path = os.path.join(pool_path, target_file) self.clone_disk_in_pool(source_file, target_file, storage_pool) else: base_path, new_pool_name = self.get_target_path_via_file(source_file) clone_path = os.path.join(base_path, target_file) self.clone_disk_in_pool(source_file, target_file, new_pool_name ) log.info("Cloned disk from %s to %s", source_file, clone_path) source_elm.set('file', clone_path) try: # if failed it will raise libvirtError, return value is always a Domain object new_dom = hv_handler.defineXML(xmlEtree.tostring(tree)) except libvirtError: log.error("Create domain %s failed when define by xml.", vm_name) return False new_dom.setAutostart(1) return True
def delete_database_info(self, inst_name): """ delete from database with VM name is inst_name :param inst_name: :return: """ log.info("Start to delete [%s] information from databse.", inst_name) return self.db_driver.delete(hostname=inst_name)
def delete_database_info(self): """ delete from database with this server :return: """ log.info("Start to delete [%s] information from database.", self.server_name) return self.db_driver.delete(hostname=self.server_name)
def create_instance(self, vm_name, reference_vm): ''' ''' log.debug("enter create_instance %s", vm_name) # copy the disk first target_disk = ''.join( (VM_HOUSE, vm_name, ".qcow2")) # qcow2 is recommand reference_disk = ''.join((VM_HOUSE, reference_vm, ".qcow2")) cmd = "\cp -f %s %s" % (reference_disk, target_disk) log.debug("%s", cmd) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) _, perr = p.communicate() if perr: log.error("Create domain %s meet an error when copy disk: %s", vm_name, str(perr)) return False # change the xml target_xml = TEMPLATE_CFG_POOL + vm_name + ".xml" reference_xml = "".join((VM_HOUSE, reference_vm, ".xml")) cmd = "cp %s %s && sed -i 's/%s/%s/g' %s" % ( reference_xml, target_xml, reference_vm, vm_name, target_xml) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) _, perr = p.communicate() if perr: log.error("Create domain %s meet an error when change xml:%s", vm_name, str(perr)) return False hv_handler = self.get_handler() if not hv_handler: log.error("Can not connect to host: %s when create domain %s.", self.hostname, vm_name) return False if vm_name in [dom.name() for dom in hv_handler.listAllDomains()]: log.info("Vm %s is registered.", vm_name) return True with open(target_xml) as xml_file: xml = xml_file.read() try: # if failed it will raise libvirtError, return value is always a Domain object new_dom = hv_handler.defineXML(xml) except libvirtError: log.error("Create domain %s failed when define by xml.", vm_name) return False new_dom.setAutostart(1) return True
def config_memory_lively(self, inst_name, target_memory): """ :param inst_name: :param target_memory: :return: """ log.info("Start to config the memory for a running VM.") return self.virt_driver.set_vm_memory_live(inst_name, target_memory)
def delete_vm_database_info(inst_name): """ delete from database with VM name is inst_name :param inst_name: :return: """ log.info("Start to delete [%s] information from databse.", inst_name) db_driver = DbFactory.get_db_driver("VirtHost") return db_driver.delete(hostname=inst_name)
def reset_vm(self, vm_name): """ :param vm_name: :return: """ log.info("Start to reset [%s]", vm_name) ret = self.virt_driver.reboot(vm_name) return ret
def setUp(self): log.info("Setup in HostTestCase") self.virthost = HostDriver() self.testdata = { "sn": "hostDriverTestCase", "cpu_cores": 4, "memory_size": 4, "disk_size": 20, "disk_num": 2, "hostname": "Physical Host Test Case", "first_ip": "192.168.11.11" }
def test_c_update(self): log.info("Test test_c_update") self.assertFalse(self.virthost.update(id=-1, data={}), "update id=-1 record should return False") self.assertFalse(self.virthost.update(id=1), "update data is None should return False") self.assertTrue( self.virthost.update(hostname=self.testdata['hostname'], data={ "hostname": "test_c_update", "first_ip": "10.101.10.10" }), "Update should return True")
def get_templates_list(self): """ :description get all the templates on host :return a list of tempaltes names """ log.info("All powered-off VM can be used as a template. ") vm_list = self.get_vm_list() templist = [ dom_name for dom_name in vm_list if self.is_instance_halted(dom_name) ] return templist