def add_host_CPU_live(self, cpu_ref): """ Add cpu to pool, if it is currently not assigned to a pool. @param cpu_ref: reference of host_cpu instance to add @type cpu_ref: str """ if not self.get_activated(): raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated') node = XendNode.instance() number = node.get_host_cpu_field(cpu_ref, 'number') self.pool_lock.acquire() try: pool_id = self.query_pool_id() other_pool_ref = self.get_cpu_pool_by_cpu_ref(cpu_ref) if len(other_pool_ref) != 0: raise PoolError(XEND_ERROR_INVALID_CPU, 'cpu already assigned to pool "%s"' % other_pool_ref[0]) xc.cpupool_addcpu(pool_id, number) finally: self.pool_lock.release() if number not in self.proposed_cpus: self.proposed_cpus.append(number) self._update_ncpu(pool_id) if self._managed: XendNode.instance().save_cpu_pools()
def set_MTU(self, new_mtu): success = linux_set_mtu(self.device, new_mtu) if success: self.MTU = new_mtu import XendNode XendNode.instance().save_PIFs() return success
def add_host_CPU_live(self, cpu_ref): """ Add cpu to pool, if it is currently not assigned to a pool. @param cpu_ref: reference of host_cpu instance to add @type cpu_ref: str """ if not self.get_activated(): raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated') node = XendNode.instance() number = node.get_host_cpu_field(cpu_ref, 'number') self.pool_lock.acquire() try: pool_id = self.query_pool_id() other_pool_ref = self.get_cpu_pool_by_cpu_ref(cpu_ref) if len(other_pool_ref) != 0: raise PoolError( XEND_ERROR_INVALID_CPU, 'cpu already assigned to pool "%s"' % other_pool_ref[0]) xc.cpupool_addcpu(pool_id, number) finally: self.pool_lock.release() if number not in self.proposed_cpus: self.proposed_cpus.append(number) self._update_ncpu(pool_id) if self._managed: XendNode.instance().save_cpu_pools()
def set_ncpu(self, ncpu): _ncpu = int(ncpu) if _ncpu < 1: raise PoolError(XEND_ERROR_POOL_PARAM, 'ncpu') self.ncpu = _ncpu if self._managed: XendNode.instance().save_cpu_pools()
def set_MAC(self, new_mac): success = linux_set_mac(self.device, new_mac) if success: self.MAC = new_mac import XendNode XendNode.instance().save_PIFs() return success
def remove_from_proposed_CPUs(self, cpu): if self.get_activated(): raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') _cpu = int(cpu) if _cpu in self.proposed_cpus: self.proposed_cpus.remove(_cpu) if self._managed: XendNode.instance().save_cpu_pools()
def create_VLAN(self, device, network_uuid, host_ref, vlan): """Exposed via API - create a new VLAN from existing VIF""" ifs = [name for name, _, _ in linux_get_phy_ifaces()] vlan = int(vlan) # Check VLAN tag is valid if vlan < 0 or vlan >= 4096: raise VLANTagInvalid(vlan) # Check device exists if device not in ifs: raise InvalidDeviceError(device) # Check VLAN doesn't already exist if "%s.%d" % (device, vlan) in ifs: raise DeviceExistsError("%s.%d" % (device, vlan)) # Check network ref is valid from XendNetwork import XendNetwork if network_uuid not in XendNetwork.get_all(): raise InvalidHandleError("Network", network_uuid) # Check host_ref is this host import XendNode if host_ref != XendNode.instance().get_uuid(): raise InvalidHandleError("Host", host_ref) # Create the VLAN _create_VLAN(device, vlan) # Create new uuids pif_uuid = genuuid.createString() metrics_uuid = genuuid.createString() # Create the record record = { "device": device, "MAC": linux_get_mac("%s.%d" % (device, vlan)), "MTU": linux_get_mtu("%s.%d" % (device, vlan)), "network": network_uuid, "VLAN": vlan, } # Create instances metrics = XendPIFMetrics(metrics_uuid, pif_uuid) pif = XendPIF(record, pif_uuid, metrics_uuid) # Not sure if they should be created plugged or not... pif.plug() XendNode.instance().save_PIFs() return pif_uuid
def destroy(self): """ In order to destroy a cpu pool, it must be deactivated """ self.pool_lock.acquire() try: if self.get_activated(): raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') XendBase.destroy(self) finally: self.pool_lock.release() XendNode.instance().save_cpu_pools()
def add_to_proposed_CPUs(self, cpu): if self.get_activated(): raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') _cpu = int(cpu) if _cpu not in self.proposed_cpus: self.proposed_cpus.append(_cpu) self.proposed_cpus.sort() if self._managed: XendNode.instance().save_cpu_pools()
def create(cls, record): """ Create a new managed pool instance. @param record: attributes of pool @type record: dict @return: uuid of created pool @rtype: str """ new_uuid = genuuid.createString() XendCPUPool(record, new_uuid) XendNode.instance().save_cpu_pools() return new_uuid
def create_VLAN(self, device, network_uuid, host_ref, vlan): """Exposed via API - create a new VLAN from existing VIF""" ifs = [name for name, _, _ in linux_get_phy_ifaces()] vlan = int(vlan) # Check VLAN tag is valid if vlan < 0 or vlan >= 4096: raise VLANTagInvalid(vlan) # Check device exists if device not in ifs: raise InvalidDeviceError(device) # Check VLAN doesn't already exist if "%s.%d" % (device, vlan) in ifs: raise DeviceExistsError("%s.%d" % (device, vlan)) # Check network ref is valid from XendNetwork import XendNetwork if network_uuid not in XendNetwork.get_all(): raise InvalidHandleError("Network", network_uuid) # Check host_ref is this host import XendNode if host_ref != XendNode.instance().get_uuid(): raise InvalidHandleError("Host", host_ref) # Create the VLAN _create_VLAN(device, vlan) # Create new uuids pif_uuid = genuuid.createString() metrics_uuid = genuuid.createString() # Create the record record = { "device": device, "MAC": '', "MTU": '', "network": network_uuid, "VLAN": vlan } # Create instances metrics = XendPIFMetrics(metrics_uuid, pif_uuid) pif = XendPIF(record, pif_uuid, metrics_uuid) # Not sure if they should be created plugged or not... pif.plug() XendNode.instance().save_PIFs() return pif_uuid
def destroy(self): # check no VIFs or PIFs attached if len(self.get_VIFs()) > 0: raise NetworkError("Cannot destroy network with VIFs attached", self.get_name_label()) if len(self.get_PIFs()) > 0: raise NetworkError("Cannot destroy network with PIFs attached", self.get_name_label()) XendBase.destroy(self) Brctl.bridge_del(self.get_name_label()) XendNode.instance().save_networks()
def destroy(self): # Figure out if this is a physical device if self.get_interface_name() == \ self.get_device(): raise PIFIsPhysical() self.unplug() if _destroy_VLAN(self.get_device(), self.get_VLAN()): XendBase.destroy(self) import XendNode XendNode.instance().save_PIFs() else: raise NetworkError("Unable to delete VLAN", self.get_uuid())
def get_host_CPUs(self): """ Query all cpu refs of this pool currently asisgned . - Read pool id of this pool from xenstore - Read cpu configuration from hypervisor - lookup cpu number -> cpu ref @return: host_cpu refs @rtype: list of str """ if self.get_activated(): node = XendNode.instance() pool_id = self.query_pool_id() if pool_id == None: raise PoolError(XEND_ERROR_INTERNAL, [self.getClass(), 'get_host_CPUs']) cpus = [] for pool_rec in xc.cpupool_getinfo(): if pool_rec['cpupool'] == pool_id: cpus = pool_rec['cpulist'] # query host_cpu ref for any cpu of the pool host_CPUs = [ cpu_ref for cpu_ref in node.get_host_cpu_refs() if node.get_host_cpu_field(cpu_ref, 'number') in cpus ] else: # pool not active, so it couldn't have any assigned cpus host_CPUs = [] return host_CPUs
def pool_list(cls, names): sxprs = [] try: node = XendNode.instance() xd = XendDomain.instance() pools = cls.get_all_records() for (pool_uuid, pool_vals) in pools.items(): if pool_vals['name_label'] in names or len(names) == 0: # conv host_cpu refs to cpu number cpus = [ node.get_host_cpu_field(cpu_ref, 'number') for cpu_ref in pool_vals['host_CPUs'] ] cpus.sort() pool_vals['host_CPU_numbers'] = cpus # query VMs names. Take in account, that a VM # returned by get_all_records could be destroy, now vm_names = [ vm.getName() for vm in map( xd.get_vm_by_uuid, pool_vals['started_VMs']) if vm ] pool_vals['started_VM_names'] = vm_names pool_vals['auto_power_on'] = int( pool_vals['auto_power_on']) sxprs += [[pool_uuid] + map2sxp(pool_vals)] except XendAPIError, ex: raise VmError(ex.get_api_error())
def get_by_name_label_pool_func(self, host_ref, name): poolapi = _get_BNPoolAPI() if poolapi._isMaster: if cmp(host_ref, XendNode.instance().uuid) == 0: return self.get_by_name_label(name) else: remote_ip = poolapi.get_host_ip(host_ref) return xen_rpc_call(remote_ip, 'network_get_by_name_label_pool_func', host_ref, name) else: return self.get_by_name_label(name)
def get_all_records_by_host(self, host_ref): poolapi = _get_BNPoolAPI() if poolapi._isMaster: if cmp(host_ref, XendNode.instance().uuid) == 0: return self.get_all_records() else: remote_ip = poolapi.get_host_ip(host_ref) return xen_rpc_call(remote_ip, 'network_get_all_records_by_host', host_ref) else: return self.get_all_records()
def create(self, record): """ Called from API, to create a new network """ # Create new uuids uuid = genuuid.createString() # Create instance (do this first, to check record) network = XendNetwork(record, uuid) # Check network doesn't already exist name_label = network.name_label if bridge_exists(name_label): del network raise UniqueNameError(name_label, "network") # Create the bridge Brctl.bridge_create(network.name_label) XendNode.instance().save_networks() return uuid
def get_cpu_pool_by_cpu_ref(cls, host_cpu): """ Query cpu_pool ref the given cpu belongs to. @param host_cpu: ref of host_cpu to lookup @type host_cpu: str @return: list cpu_pool refs (list contains not more than one element) @rtype: list of str """ node = XendNode.instance() cpu_nr = node.get_host_cpu_field(host_cpu, 'number') for pool_rec in xc.cpupool_getinfo(): if cpu_nr in pool_rec['cpulist']: # pool found; return the ref return cls.query_pool_ref(pool_rec['cpupool']) return []
def remove_host_CPU_live(self, cpu_ref): """ Remove cpu from pool. After successfull call, the cpu is free. Remove of the last cpu of the pool is rejected. @param cpu_ref: reference of host_cpu instance to remove @type cpu_ref: str """ if not self.get_activated(): raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated') node = XendNode.instance() number = node.get_host_cpu_field(cpu_ref, 'number') self.pool_lock.acquire() try: pool_id = self.query_pool_id() pool_rec = {} for pool in xc.cpupool_getinfo(): if pool['cpupool'] == pool_id: pool_rec = pool break if number in pool_rec['cpulist']: if len(pool_rec['cpulist']) < 2 and pool_rec['n_dom'] > 0: raise PoolError(XEND_ERROR_LAST_CPU_NOT_REM, 'could not remove last cpu') xc.cpupool_removecpu(pool_id, number) else: raise PoolError(XEND_ERROR_INVALID_CPU, 'CPU not assigned to pool') finally: self.pool_lock.release() if number in self.proposed_cpus: self.proposed_cpus.remove(number) self._update_ncpu(pool_id) if self._managed: XendNode.instance().save_cpu_pools()
def create(self, dscsi_struct): # Check if VM is valid xendom = XendDomain.instance() if not xendom.is_valid_vm(dscsi_struct['VM']): raise InvalidHandleError('VM', dscsi_struct['VM']) dom = xendom.get_vm_by_uuid(dscsi_struct['VM']) # Check if PSCSI is valid xennode = XendNode.instance() pscsi_uuid = xennode.get_pscsi_by_uuid(dscsi_struct['PSCSI']) if not pscsi_uuid: raise InvalidHandleError('PSCSI', dscsi_struct['PSCSI']) # Assign PSCSI to VM try: dscsi_ref = XendTask.log_progress(0, 100, \ dom.create_dscsi, \ dscsi_struct) except XendError, e: log.exception("Error in create_dscsi") raise
def create(self, dpci_struct): # Check if VM is valid xendom = XendDomain.instance() if not xendom.is_valid_vm(dpci_struct['VM']): raise InvalidHandleError('VM', dpci_struct['VM']) dom = xendom.get_vm_by_uuid(dpci_struct['VM']) # Check if PPCI is valid xennode = XendNode.instance() ppci_uuid = xennode.get_ppci_by_uuid(dpci_struct['PPCI']) if not ppci_uuid: raise InvalidHandleError('PPCI', dpci_struct['PPCI']) for existing_dpci in XendAPIStore.get_all('DPCI'): if ppci_uuid == existing_dpci.get_PPCI(): raise DirectPCIError("Device is in use") # Assign PPCI to VM try: dpci_ref = XendTask.log_progress(0, 100, dom.create_dpci, dpci_struct) except XendError, e: raise DirectPCIError("Failed to assign device")
def pool_list(cls, names): sxprs = [] try: node = XendNode.instance() xd = XendDomain.instance() pools = cls.get_all_records() for (pool_uuid, pool_vals) in pools.items(): if pool_vals['name_label'] in names or len(names) == 0: # conv host_cpu refs to cpu number cpus = [ node.get_host_cpu_field(cpu_ref, 'number') for cpu_ref in pool_vals['host_CPUs'] ] cpus.sort() pool_vals['host_CPU_numbers'] = cpus # query VMs names. Take in account, that a VM # returned by get_all_records could be destroy, now vm_names = [ vm.getName() for vm in map(xd.get_vm_by_uuid, pool_vals['started_VMs']) if vm ] pool_vals['started_VM_names'] = vm_names pool_vals['auto_power_on'] = int(pool_vals['auto_power_on']) sxprs += [[pool_uuid] + map2sxp(pool_vals)] except XendAPIError, ex: raise VmError(ex.get_api_error())
def add_to_other_config(self, key, value): self.other_config[key] = value XendNode.instance().save_networks()
def set_other_config(self, value): self.other_config = value XendNode.instance().save_networks()
def set_name_description(self, new_desc): self.name_description = new_desc XendNode.instance().save_networks()
def set_proposed_CPUs(self, proposed_cpus): if self.get_activated(): raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') self.proposed_cpus = [ int(cpu) for cpu in proposed_cpus ] if self._managed: XendNode.instance().save_cpu_pools()
def set_proposed_CPUs(self, proposed_cpus): if self.get_activated(): raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') self.proposed_cpus = [int(cpu) for cpu in proposed_cpus] if self._managed: XendNode.instance().save_cpu_pools()
def set_other_config(self, other_config): self.other_config = other_config if self._managed: XendNode.instance().save_cpu_pools()
def set_default_netmask(self, netmask): self.default_netmask = netmask XendNode.instance().save_networks()
def remove_from_other_config(self, key): if key in self.other_config: del self.other_config[key] if self._managed: XendNode.instance().save_cpu_pools()
def add_to_other_config(self, key, value): self.other_config[key] = value if self._managed: XendNode.instance().save_cpu_pools()
def set_name_label(self, name_label): self._checkName(name_label) self.name_label = name_label if self._managed: XendNode.instance().save_cpu_pools()
def set_name_description(self, name_descr): self.name_description = name_descr if self._managed: XendNode.instance().save_cpu_pools()
def set_auto_power_on(self, auto_power_on): self.auto_power_on = bool(int(auto_power_on)) if self._managed: XendNode.instance().save_cpu_pools()
def set_sched_policy(self, sched_policy): if self.get_activated(): raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') self.sched_policy = sched_policy if self._managed: XendNode.instance().save_cpu_pools()
def remove_from_other_config(self, key): if key in self.other_config: del self.other_config[key] XendNode.instance().save_networks()
def set_default_gateway(self, gateway): self.default_gateway = gateway XendNode.instance().save_networks()
def _cpu_number_to_ref(cls, number): node = XendNode.instance() for cpu_ref in node.get_host_cpu_refs(): if node.get_host_cpu_field(cpu_ref, 'number') == number: return cpu_ref return None
def get_host(self): from xen.xend import XendNode return XendNode.instance().get_uuid()
def get_sched_policy(self): if len(self.sched_policy) == 0: # default scheduler selected return XendNode.instance().get_vcpus_policy() else: return self.sched_policy