def networks(): """ Get dict of networks from libvirt :returns: dict of networkname={properties} :rtype: dict of dict { 'ovirtmgmt': { 'bridge': 'ovirtmgmt', 'bridged': True }, 'red': { 'iface': 'red', 'bridged': False } } """ nets = {} conn = libvirtconnection.get() allNets = ((net, net.name()) for net in conn.listAllNetworks(0)) for net, netname in allNets: if netname.startswith(LIBVIRT_NET_PREFIX): netname = netname[len(LIBVIRT_NET_PREFIX):] nets[netname] = {} xml = minidom.parseString(net.XMLDesc(0)) interfaces = xml.getElementsByTagName('interface') if len(interfaces) > 0: nets[netname]['iface'] = interfaces[0].getAttribute('dev') nets[netname]['bridged'] = False else: nets[netname]['bridge'] = \ xml.getElementsByTagName('bridge')[0].getAttribute('name') nets[netname]['bridged'] = True return nets
def networks(): """ Get dict of networks from libvirt :returns: dict of networkname={properties} :rtype: dict of dict { 'ovirtmgmt': { 'bridge': 'ovirtmgmt', 'bridged': True, 'qosInbound': {'average': '1024', 'burst': '', 'peak': ''}, 'qosOutbound': {'average': '1024', 'burst': '2048', 'peak': ''}}, 'red': { 'iface': 'red', 'bridged': False 'qosInbound': '', 'qosOutbound': ''} } """ nets = {} conn = libvirtconnection.get() allNets = ((net, net.name()) for net in conn.listAllNetworks(0)) for net, netname in allNets: if netname.startswith(LIBVIRT_NET_PREFIX): netname = netname[len(LIBVIRT_NET_PREFIX):] nets[netname] = {} xml = minidom.parseString(net.XMLDesc(0)) qos = _parseBandwidthQos(xml) nets[netname]['qosInbound'] = qos.inbound nets[netname]['qosOutbound'] = qos.outbound interfaces = xml.getElementsByTagName('interface') if len(interfaces) > 0: nets[netname]['iface'] = interfaces[0].getAttribute('dev') nets[netname]['bridged'] = False else: nets[netname]['bridge'] = \ xml.getElementsByTagName('bridge')[0].getAttribute('name') nets[netname]['bridged'] = True return nets
def removeLibvirtNetwork(network): netName = LIBVIRT_NET_PREFIX + network conn = libvirtconnection.get() try: net = conn.networkLookupByName(netName) net.destroy() net.undefine() except libvirt.libvirtError: logging.debug('failed to remove libvirt network ' + netName, exec_info=True)
def detachDevice(addr): ''' detach device from host, enable attaching it to VM ''' connection = libvirtconnection.get(None) nodeDevice = connection.nodeDeviceLookupByName(addr) if nodeDevice != None: sys.stderr.write('sriov: detaching pci device: %s\n' % addr) nodeDevice.dettach() else: sys.stderr.write('sriov: cannot dettach device: %s\n' % addr)
def _getEmulatedMachines(): c = libvirtconnection.get() caps = minidom.parseString(c.getCapabilities()) guestTag = caps.getElementsByTagName('guest') # Guest element is missing if kvm modules are not loaded if len(guestTag) == 0: return [] guestTag = guestTag[0] return [ m.firstChild.toxml() for m in guestTag.getElementsByTagName('machine') ]
def createLibvirtNetwork(network, bridged=True, iface=None): conn = libvirtconnection.get() netName = LIBVIRT_NET_PREFIX + network if bridged: netXml = '''<network><name>%s</name><forward mode='bridge'/> <bridge name='%s'/></network>''' % (escape(netName), escape(network)) else: netXml = '''<network><name>%s</name><forward mode='passthrough'> <interface dev='%s'/></forward></network>''' % (escape(netName), escape(iface)) net = conn.networkDefineXML(netXml) net.create() net.setAutostart(1)
def _getCompatibleCpuModels(): c = libvirtconnection.get() cpu_map = minidom.parseString( file('/usr/share/libvirt/cpu_map.xml').read()) allModels = [ m.getAttribute('name') for m in cpu_map.getElementsByTagName('arch')[0].childNodes if m.nodeName == 'model' ] def compatible(model): xml = '<cpu match="minimum"><model>%s</model></cpu>' % model return c.compareCPU(xml, 0) in ( libvirt.VIR_CPU_COMPARE_SUPERSET, libvirt.VIR_CPU_COMPARE_IDENTICAL) return [ 'model_' + model for model in allModels if compatible(model) ]
def getDeviceDetails(addr): ''' investigate device by its address and return [bus, slot, function] list ''' connection = libvirtconnection.get(None) nodeDevice = connection.nodeDeviceLookupByName(addr) devXml = minidom.parseString(nodeDevice.XMLDesc(0)) bus = hex(int(devXml.getElementsByTagName('bus')[0].firstChild.nodeValue)) slot = hex(int(devXml.getElementsByTagName('slot')[0].firstChild.nodeValue)) function = hex(int(devXml.getElementsByTagName('function')[0].firstChild.nodeValue)) sys.stderr.write('sriov: bus=%s slot=%s function=%s\n' % (bus, slot, function)) return (bus, slot, function)
def _getCompatibleCpuModels(): c = libvirtconnection.get() cpu_map = minidom.parseString( file('/usr/share/libvirt/cpu_map.xml').read()) allModels = [ m.getAttribute('name') for m in cpu_map.getElementsByTagName('arch')[0].childNodes if m.nodeName == 'model' ] def compatible(model): xml = '<cpu match="minimum"><model>%s</model></cpu>' % model return c.compareCPU(xml, 0) in (libvirt.VIR_CPU_COMPARE_SUPERSET, libvirt.VIR_CPU_COMPARE_IDENTICAL) return ['model_' + model for model in allModels if compatible(model)]
def __init__ (self, log): """ Initialize the (single) clientIF instance :param log: a log object to be used for this object's logging. :type log: :class:`logging.Logger` """ self.vmContainerLock = threading.Lock() self._networkSemaphore = threading.Semaphore() self._shutdownSemaphore = threading.Semaphore() self.log = log self._recovery = True self._libvirt = libvirtconnection.get() self._syncLibvirtNetworks() self.channelListener = Listener(self.log) self._generationID = str(uuid.uuid4()) self._initIRS() try: self.vmContainer = {} ifids = netinfo.nics() + netinfo.bondings() ifrates = map(netinfo.speed, ifids) self._hostStats = utils.HostStatsThread(cif=self, log=log, ifids=ifids, ifrates=ifrates) self._hostStats.start() mog = min(config.getint('vars', 'max_outgoing_migrations'), caps.CpuInfo().cores()) vm.MigrationSourceThread.setMaxOutgoingMigrations(mog) self.lastRemoteAccess = 0 self._memLock = threading.Lock() self._enabled = True self.ksmMonitor = ksm.KsmMonitorThread(self) self._netConfigDirty = False threading.Thread(target=self._recoverExistingVms, name='clientIFinit').start() self.channelListener.settimeout(config.getint('vars', 'guest_agent_timeout')) self.channelListener.start() self.threadLocal = threading.local() self.threadLocal.client = '' except: self.log.error('failed to init clientIF, shutting down storage dispatcher') if self.irs: self.irs.prepareForShutdown() raise self._prepareBindings()
def returnDeviceToHost(addr, devpath): # attach device back to host connection = libvirtconnection.get(None) nodeDevice = connection.nodeDeviceLookupByName(addr) if nodeDevice != None: sys.stderr.write('sriov after_vm_destroy: attaching pci device %s back to host\n' % addr) nodeDevice.reAttach() # return device permissions owner = 'root:root' for f in os.listdir(devpath): if f.startswith('resource') or f == 'rom' or f == 'reset': dev = os.path.join(devpath, f) command = ['/bin/chown', owner, dev] retcode, out, err = utils.execCmd(command, sudo=True, raw=True) if retcode != 0: sys.stderr.write('sriov after_vm_destroy: error chown %s to %s, err = %s\n' % (dev, owner, err)) sys.exit(2)
def get(self): #conn = libvirt.openReadOnly('qemu:///system') conn=libvirtconnection.get() if conn == None: self.write('Failed to open connection to the hypervisor') return dict={} dict["hostname"]=conn.getHostname() dict["freememory"]=conn.getFreeMemory() dict["version"]=conn.getVersion() list=conn.listDefinedDomains() dict["domains"]=list dict["url"]=conn.getURI() dict["type"]=conn.getType() dict["info"]=conn.getInfo() dict["isAlive"]=conn.isAlive() dict["numOfDefinedDomains"]=conn.numOfDefinedDomains() dict["LibVersion"]=conn.getLibVersion() dict["interfaces"]=conn.listDefinedInterfaces() dict["networks"]=conn.listDefinedNetworks() dict["storagepools"]=conn.listDefinedStoragePools() ret=json.dumps(dict) self.write(ret) print ret
def _getCompatibleCpuModels(): c = libvirtconnection.get() cpu_map = minidom.parseString(file("/usr/share/libvirt/cpu_map.xml").read()) def vendor(modelElem): vs = modelElem.getElementsByTagName("vendor") if vs: return vs[0].getAttribute("name") else: return None allModels = [ (m.getAttribute("name"), vendor(m)) for m in cpu_map.getElementsByTagName("arch")[0].childNodes if m.nodeName == "model" ] def compatible(model, vendor): if not vendor: return False xml = '<cpu match="minimum"><model>%s</model>' "<vendor>%s</vendor></cpu>" % (model, vendor) return c.compareCPU(xml, 0) in (libvirt.VIR_CPU_COMPARE_SUPERSET, libvirt.VIR_CPU_COMPARE_IDENTICAL) return ["model_" + model for (model, vendor) in allModels if compatible(model, vendor)]
fcntl.flock(f.fileno(), fcntl.LOCK_EX) try: if 'direct-pool' not in conn.listNetworks(): createDirectPool(conn) elif not qbhInUse(conn) and not validateDPool(conn): createDirectPool(conn) finally: fcntl.flock(f.fileno(), fcntl.LOCK_UN) if 'vmfex' in os.environ: try: sys.stderr.write('vmfex: starting to edit VM \n') # connect to libvirtd and handle the direct-pool network conn = libvirtconnection.get() handleDirectPool(conn) # Get the vmfex line vmfex = os.environ['vmfex'] sys.stderr.write('vmfex: customProperty: ' + str(vmfex) + '\n') # convert to dictionary vmfexd = ast.literal_eval(vmfex) # make sure the keys are lowercase vmfexd = dict((k.lower(), v) for k, v in vmfexd.iteritems()) # Get the VM's xml definition domxml = hooking.read_domxml() for iface in domxml.getElementsByTagName('interface'): mac = iface.getElementsByTagName('mac')[0] macaddr = mac.getAttribute('address').lower() if macaddr in vmfexd:
for iface in dpoolxml.getElementsByTagName('interface'): definedNics.append(iface.getAttribute('dev')) if set(definedNics) == set(getUsableNics()): return True else: return False def handleDirectPool(conn): with open('/var/run/vdsm/hook-vmfex.lock', 'w') as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) try: if 'direct-pool' not in conn.listNetworks(): createDirectPool(conn) elif not qbhInUse(conn) and not validateDPool(conn): createDirectPool(conn) finally: fcntl.flock(f.fileno(), fcntl.LOCK_UN) if 'vmfex' in os.environ: try: # connect to libvirtd conn = libvirtconnection.get() handleDirectPool(conn) except: sys.stderr.write('vmfex: ERROR: %s\n' % traceback.format_exc()) sys.exit(2)