def start(cif): global _operations _scheduler.start() _executor.start() def per_vm_operation(func, period): disp = VmDispatcher(cif.getVMs, _executor, func, _timeout_from(period)) return Operation(disp, period) _operations = [ # needs dispatching becuse updating the volume stats needs the # access the storage, thus can block. per_vm_operation(UpdateVolumes, config.getint("irs", "vol_size_sample_interval")), # needs dispatching becuse access FS and libvirt data per_vm_operation(NumaInfoMonitor, config.getint("vars", "vm_sample_numa_interval")), # Job monitoring need QEMU monitor access. per_vm_operation(BlockjobMonitor, config.getint("vars", "vm_sample_jobs_interval")), # libvirt sampling using bulk stats can block, but unresponsive # domains are handled inside VMBulkSampler for performance reasons; # thus, does not need dispatching. Operation( sampling.VMBulkSampler(libvirtconnection.get(cif), cif.getVMs, sampling.stats_cache), config.getint("vars", "vm_sample_interval"), ), # we do this only until we get high water mark notifications # from qemu. Access storage and/or qemu monitor, so can block, # thus we need dispatching. per_vm_operation(DriveWatermarkMonitor, config.getint("vars", "vm_watermark_interval")), ] for op in _operations: op.start()
def attach(queries, reverse=False, callback="attached"): def _getDeviceXML(device_xml): devXML = xml.etree.ElementTree.fromstring(device_xml) caps = devXML.find("capability") bus = caps.find("bus").text device = caps.find("device").text doc = getDOMImplementation().createDocument(None, "hostdev", None) hostdev = doc.documentElement hostdev.setAttribute("mode", "subsystem") hostdev.setAttribute("type", "usb") source = doc.createElement("source") hostdev.appendChild(source) address = doc.createElement("address") address.setAttribute("bus", bus) address.setAttribute("device", device) source.appendChild(address) return doc.toxml() vm_id = queries["vmId"][0] dev_name = queries["devname"][0] c = libvirtconnection.get() domain = c.lookupByUUIDString(vm_id) device_xml = _getDeviceXML(c.nodeDeviceLookupByName(dev_name).XMLDesc()) if reverse: domain.detachDevice(device_xml) else: domain.attachDevice(device_xml) return "%s(\"%s\", \"%s\");" % (callback, vm_id, dev_name)
def testParseNetDeviceParams(self): deviceXML = hostdev._parse_device_params( libvirtconnection.get().nodeDeviceLookupByName( _NET_DEVICE).XMLDesc() ) self.assertEquals(_NET_DEVICE_PARSED, deviceXML)
def testParseSRIOV_VFDeviceParams(self): deviceXML = hostdev._parse_device_params( libvirtconnection.get().nodeDeviceLookupByName( _SRIOV_VF).XMLDesc() ) self.assertEquals(_SRIOV_VF_PARSED, deviceXML)
def main(): """ Defines network filters on libvirt """ conn = libvirtconnection.get() NoMacSpoofingFilter().defineNwFilter(conn) conn.close()
def testListByCaps(self, caps): devices = hostdev.list_by_caps( libvirtconnection.get().vmContainer, caps) for cap in caps: self.assertTrue(set(DEVICES_BY_CAPS[cap].keys()). issubset(devices.keys()))
def testCallSucceeded(self): """Positive test - libvirtMock does not raise any errors""" with run_libvirt_event_loop(): LibvirtMock.virConnect.failGetLibVersion = False LibvirtMock.virConnect.failNodeDeviceLookupByName = False connection = libvirtconnection.get() connection.nodeDeviceLookupByName()
def testProcessNetDeviceParams(self): deviceXML = hostdev._process_device_params( libvirtconnection.get().nodeDeviceLookupByName( _NET_DEVICE).XMLDesc() ) self.assertEqual(_NET_DEVICE_PROCESSED, deviceXML)
def testProcessDeviceParamsInvalidEncoding(self): deviceXML = hostdev._process_device_params( libvirtconnection.get().nodeDeviceLookupByName( _COMPUTER_DEVICE).XMLDesc() ) self.assertEqual(_COMPUTER_DEVICE_PROCESSED, deviceXML)
def testProcessSRIOV_VFDeviceParams(self): deviceXML = hostdev._process_device_params( libvirtconnection.get().nodeDeviceLookupByName( _SRIOV_VF).XMLDesc() ) self.assertEqual(_SRIOV_VF_PROCESSED, deviceXML)
def networks(): """ Get dict of networks from libvirt :returns: dict of networkname={properties} :rtype: dict of dict { 'ovirtmgmt': { 'bridge': 'ovirtmgmt', 'bridged': True} 'red': { 'iface': 'red', 'bridged': False}} """ nets = {} conn = libvirtconnection.get() allNets = ((net, net.name()) for net in conn.listAllNetworks(0)) for net, netname in allNets: if netname.startswith(LIBVIRT_NET_PREFIX): netname = netname[len(LIBVIRT_NET_PREFIX):] nets[netname] = {} xml = etree.fromstring(net.XMLDesc(0)) interface = xml.find('.//interface') if interface is not None: nets[netname]['iface'] = interface.get('dev') nets[netname]['bridged'] = False else: nets[netname]['bridge'] = xml.find('.//bridge').get('name') nets[netname]['bridged'] = True return nets
def unregister(uuids): try: uuids = [str(uuid.UUID(s)) for s in uuids] except ValueError as e: logging.warning("Attempt to unregister invalid uuid %s: %s" % (uuids, e)) return response.error("secretBadRequestErr") con = libvirtconnection.get() try: for sec_uuid in uuids: logging.info("Unregistering secret %r", sec_uuid) try: virsecret = con.secretLookupByUUIDString(sec_uuid) except libvirt.libvirtError as e: if e.get_error_code() != libvirt.VIR_ERR_NO_SECRET: raise logging.debug("No such secret %r", sec_uuid) else: virsecret.undefine() except libvirt.libvirtError as e: logging.error("Could not unregister secrets: %s", e) return response.error("secretUnregisterErr") return response.success()
def main(): """ Defines network filters on libvirt """ conn = libvirtconnection.get(None, False) NoMacSpoofingFilter().defineNwFilter(conn) conn.close()
def testParseDeviceParamsInvalidEncoding(self): deviceXML = hostdev._parse_device_params( libvirtconnection.get().nodeDeviceLookupByName( _COMPUTER_DEVICE).XMLDesc() ) self.assertEquals(_COMPUTER_DEVICE_PARSED, deviceXML)
def start(cif, scheduler): global _operations global _executor _executor = executor.Executor(name="periodic", workers_count=_WORKERS, max_tasks=_TASKS, scheduler=scheduler, max_workers=_MAX_WORKERS) _executor.start() def per_vm_operation(func, period): disp = VmDispatcher(cif.getVMs, _executor, func, _timeout_from(period)) return Operation(disp, period, scheduler) _operations = [ # Needs dispatching because updating the volume stats needs # access to the storage, thus can block. per_vm_operation(UpdateVolumes, config.getint('irs', 'vol_size_sample_interval')), # Job monitoring need QEMU monitor access. per_vm_operation(BlockjobMonitor, config.getint('vars', 'vm_sample_jobs_interval')), # We do this only until we get high water mark notifications # from QEMU. It accesses storage and/or QEMU monitor, so can block, # thus we need dispatching. per_vm_operation(DriveWatermarkMonitor, config.getint('vars', 'vm_watermark_interval')), Operation(lambda: recovery.lookup_external_vms(cif), config.getint('sampling', 'external_vm_lookup_interval'), scheduler, exclusive=True, discard=False), Operation(containersconnection.monitor, config.getint('vars', 'vm_sample_interval'), scheduler), ] if config.getboolean('sampling', 'enable'): _operations.extend([ # libvirt sampling using bulk stats can block, but unresponsive # domains are handled inside VMBulkstatsMonitor for performance # reasons; thus, does not need dispatching. Operation( sampling.VMBulkstatsMonitor(libvirtconnection.get(cif), cif.getVMs, sampling.stats_cache), config.getint('vars', 'vm_sample_interval'), scheduler), Operation(sampling.HostMonitor(cif=cif), config.getint('vars', 'host_sample_stats_interval'), scheduler, timeout=config.getint('vars', 'host_sample_stats_interval'), exclusive=True, discard=False), ]) host.stats.start() for op in _operations: op.start()
def main(): portProfile = os.environ.get('vmfex') if portProfile is not None: handleDirectPool(libvirtconnection.get()) doc = hooking.read_domxml() interface, = doc.getElementsByTagName('interface') attachProfileToInterfaceXml(interface, portProfile) hooking.write_domxml(doc)
def removeNetwork(network): netName = netinfo.LIBVIRT_NET_PREFIX + network conn = libvirtconnection.get() net = conn.networkLookupByName(netName) if net.isActive(): net.destroy() if net.isPersistent(): net.undefine()
def flush(): conn = libvirtconnection.get() allNets = ((net, net.name()) for net in conn.listAllNetworks(0)) for net, netname in allNets: if netname.startswith(netinfo.LIBVIRT_NET_PREFIX): if net.isActive(): net.destroy() if net.isPersistent(): net.undefine()
def removeNetwork(network): netName = LIBVIRT_NET_PREFIX + network conn = libvirtconnection.get() net = _netlookup_by_name(conn, netName) if net: if net.isActive(): net.destroy() if net.isPersistent(): net.undefine()
def getNetworkDef(network): netName = netinfo.LIBVIRT_NET_PREFIX + network conn = libvirtconnection.get() try: net = conn.networkLookupByName(netName) return net.XMLDesc(0) except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_NETWORK: return raise
def start(cif, scheduler): global _operations global _executor _executor = executor.Executor(name="periodic", workers_count=_WORKERS, max_tasks=_TASKS, scheduler=scheduler, max_workers=_MAX_WORKERS) _executor.start() def per_vm_operation(func, period): disp = VmDispatcher(cif.getVMs, _executor, func, _timeout_from(period)) return Operation(disp, period, scheduler) _operations = [ # Needs dispatching because updating the volume stats needs # access to the storage, thus can block. per_vm_operation(UpdateVolumes, config.getint('irs', 'vol_size_sample_interval')), # Needs dispatching because it accesses FS and libvirt data. # Ignored by new engine, has to be kept for BC sake. per_vm_operation(NumaInfoMonitor, config.getint('vars', 'vm_sample_numa_interval')), # Job monitoring need QEMU monitor access. per_vm_operation(BlockjobMonitor, config.getint('vars', 'vm_sample_jobs_interval')), # libvirt sampling using bulk stats can block, but unresponsive # domains are handled inside VMBulkSampler for performance reasons; # thus, does not need dispatching. Operation( sampling.VMBulkSampler(libvirtconnection.get(cif), cif.getVMs, sampling.stats_cache), config.getint('vars', 'vm_sample_interval'), scheduler), # We do this only until we get high water mark notifications # from QEMU. It accesses storage and/or QEMU monitor, so can block, # thus we need dispatching. per_vm_operation(DriveWatermarkMonitor, config.getint('vars', 'vm_watermark_interval')), Operation(sampling.HostMonitor(cif=cif), config.getint('vars', 'host_sample_stats_interval'), scheduler), Operation(containersconnection.monitor, config.getint('vars', 'vm_sample_interval'), scheduler), ] host.stats.start() for op in _operations: op.start()
def getNetworkDef(network): netName = netinfo.LIBVIRT_NET_PREFIX + network conn = libvirtconnection.get() try: net = conn.networkLookupByName(netName) return net.XMLDesc(0) except libvirtError as e: if e.get_error_code() == VIR_ERR_NO_NETWORK: return raise
def getMemoryStatsByNumaCell(cell): """ Get the memory stats of a specified numa node, the unit is MiB. :param cell: the index of numa node :type cell: int :return: dict like {'total': '49141', 'free': '46783'} """ cellMemInfo = libvirtconnection.get().getMemoryStats(cell, 0) cellMemInfo['total'] = str(cellMemInfo['total'] / 1024) cellMemInfo['free'] = str(cellMemInfo['free'] / 1024) return cellMemInfo
def memory_by_cell(index): ''' Get the memory stats of a specified numa node, the unit is MiB. :param cell: the index of numa node :type cell: int :return: dict like {'total': '49141', 'free': '46783'} ''' conn = libvirtconnection.get() meminfo = conn.getMemoryStats(index, 0) meminfo['total'] = str(meminfo['total'] / 1024) meminfo['free'] = str(meminfo['free'] / 1024) return meminfo
def testCallFailedConnectionUp(self): """ libvirtMock will raise an error when nodeDeviceLookupByName is called. When getLibVersion is called (used by libvirtconnection to recognize disconnections) it will not raise an error -> in that case an error should be raised ('Unknown libvirterror'). """ connection = libvirtconnection.get(killOnFailure=True) LibvirtMock.virConnect.failNodeDeviceLookupByName = True LibvirtMock.virConnect.failGetLibVersion = False self.assertRaises(LibvirtMock.libvirtError, connection.nodeDeviceLookupByName)
def _list_domains(): conn = libvirtconnection.get() for dom_uuid in conn.listDomainsID(): try: dom_obj = conn.lookupByID(dom_uuid) dom_xml = dom_obj.XMLDesc(0) except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: logging.exception("domain %s is dead", dom_uuid) else: raise else: yield dom_obj, dom_xml
def testCallFailedConnectionDown(self): """ libvirtMock will raise an error when nodeDeviceLookupByName is called. When getLibVersion is called (used by libvirtconnection to recognize disconnections) it will also raise an error -> in that case os.kill should be called ('connection to libvirt broken.'). """ connection = libvirtconnection.get(killOnFailure=True) LibvirtMock.virConnect.failNodeDeviceLookupByName = True LibvirtMock.virConnect.failGetLibVersion = True self.assertRaises(TerminationException, connection.nodeDeviceLookupByName)
def testCallFailedConnectionDown(self): """ libvirtMock will raise an error when nodeDeviceLookupByName is called. When getLibVersion is called (used by libvirtconnection to recognize disconnections) it will also raise an error -> in that case os.kill should be called ('connection to libvirt broken.'). """ with run_libvirt_event_loop(): connection = libvirtconnection.get(killOnFailure=True) LibvirtMock.virConnect.failNodeDeviceLookupByName = True LibvirtMock.virConnect.failGetLibVersion = True self.assertRaises(TerminationException, connection.nodeDeviceLookupByName)
def testCallFailedConnectionUp(self): """ libvirtMock will raise an error when nodeDeviceLookupByName is called. When getLibVersion is called (used by libvirtconnection to recognize disconnections) it will not raise an error -> in that case an error should be raised ('Unknown libvirterror'). """ with run_libvirt_event_loop(): connection = libvirtconnection.get(killOnFailure=True) LibvirtMock.virConnect.failNodeDeviceLookupByName = True LibvirtMock.virConnect.failGetLibVersion = False self.assertRaises(LibvirtMock.libvirtError, connection.nodeDeviceLookupByName)
def clear(): """ Clear all regsistered ovirt secrets. Should be called during startup and shutdown to ensure that we don't leave around stale or unneeded secrets. """ logging.info("Unregistering all secrets") con = libvirtconnection.get() for virsecret in con.listAllSecrets(): try: if _is_ovirt_secret(virsecret): virsecret.undefine() except libvirt.libvirtError as e: logging.error("Could not unregister %s: %s", virsecret, e)
def _getCompatibleCpuModels(): c = libvirtconnection.get() allModels = _getAllCpuModels() def compatible(model, vendor): if not vendor: return False xml = '<cpu match="minimum"><model>%s</model>' "<vendor>%s</vendor></cpu>" % (model, vendor) try: return c.compareCPU(xml, 0) in (libvirt.VIR_CPU_COMPARE_SUPERSET, libvirt.VIR_CPU_COMPARE_IDENTICAL) except libvirt.libvirtError as e: # hack around libvirt BZ#795836 if e.get_error_code() == libvirt.VIR_ERR_OPERATION_INVALID: return False raise return ["model_" + model for (model, vendor) in allModels.iteritems() if compatible(model, vendor)]
def listdev(queries): def _getHostDevOfDom(dom_xml): domXML = xml.etree.ElementTree.fromstring(dom_xml) uuid = domXML.find("uuid").text for device in domXML.find("devices").iter("hostdev"): if device.get("type") == "usb": address = device.find("source").find("address") key = "%s#%s" % (address.get("bus"), address.get("device")) yield key, uuid def _getDevInfo(dev_xml): d = dict() devXML = xml.etree.ElementTree.fromstring(dev_xml) d["name"] = devXML.find("name").text d["parent"] = devXML.find("parent").text caps = devXML.find("capability") d["bus"] = caps.find("bus").text d["device"] = caps.find("device").text for e in ("vendor", "product"): eXML = caps.find(e) if eXML is not None: if "id" in eXML.attrib: d[e + "_id"] = eXML.get("id") if eXML.text: d[e] = eXML.text return d used = dict() devices = list() c = libvirtconnection.get() for dom in c.listAllDomains(): for devid, domid in _getHostDevOfDom(dom.XMLDesc()): used[devid] = domid for dev in c.listDevices("usb_device"): device = _getDevInfo(c.nodeDeviceLookupByName(dev).XMLDesc()) k = "%s#%s" % (device["bus"], device["device"]) if k in used: device["vm"] = used[k] devices.append(device) return "%s(%s);" % ("AllListed", json.dumps(devices))
def lookup_external_vms(cif): conn = libvirtconnection.get() for vm_id in cif.get_unknown_vm_ids(): try: dom_obj = conn.lookupByUUIDString(vm_id) dom_xml = dom_obj.XMLDesc(0) except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: logging.debug("External domain %s not found", vm_id) continue else: raise if _is_ignored_vm(vm_id, dom_obj, dom_xml): continue logging.debug("Recovering external domain: %s", vm_id) if _recover_domain(cif, vm_id, dom_xml, True): cif.log.info("Recovered new external domain: %s", vm_id) else: cif.log.info("Failed to recover new external domain: %s", vm_id)
def _list_domains(): conn = libvirtconnection.get() domains = [] for dom_obj in conn.listAllDomains(): dom_uuid = 'unknown' try: dom_uuid = dom_obj.UUIDString() logging.debug("Found domain %s", dom_uuid) dom_xml = dom_obj.XMLDesc(0) except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: logging.exception("domain %s is dead", dom_uuid) else: raise else: if _is_ignored_vm(dom_uuid, dom_obj, dom_xml): continue domains.append((dom_obj, dom_xml, _is_external_vm(dom_xml),)) return domains
def _getVDSMVms(self): """ Return a list of vdsm created VM's. """ libvirtCon = libvirtconnection.get() domIds = libvirtCon.listDomainsID() vms = [] for domId in domIds: try: vm = libvirtCon.lookupByID(domId) except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: self.log.error("domId: %s is dead", domId, exc_info=True) else: self.log.error("Can't look for domId: %s, code: %s", domId, e.get_error_code(), exc_info=True) raise else: vms.append(vm) return [vm for vm in vms if self.isVDSMVm(vm)]
def _getCompatibleCpuModels(): c = libvirtconnection.get() allModels = _getAllCpuModels() def compatible(model, vendor): if not vendor: return False xml = '<cpu match="minimum"><model>%s</model>' \ '<vendor>%s</vendor></cpu>' % (model, vendor) try: return c.compareCPU(xml, 0) in (libvirt.VIR_CPU_COMPARE_SUPERSET, libvirt.VIR_CPU_COMPARE_IDENTICAL) except libvirt.libvirtError as e: # hack around libvirt BZ#795836 if e.get_error_code() == libvirt.VIR_ERR_OPERATION_INVALID: return False raise return ['model_' + model for (model, vendor) in allModels.iteritems() if compatible(model, vendor)]
def getDeviceDetails(addr): ''' investigate device by its address and return [bus, slot, function] list ''' connection = libvirtconnection.get(None) nodeDevice = connection.nodeDeviceLookupByName(addr) devXml = minidom.parseString(nodeDevice.XMLDesc(0)) bus = hex(int(devXml.getElementsByTagName('bus')[0].firstChild.nodeValue)) slot = hex(int( devXml.getElementsByTagName('slot')[0].firstChild.nodeValue)) function = hex( int(devXml.getElementsByTagName('function')[0].firstChild.nodeValue)) sys.stderr.write('sriov: bus=%s slot=%s function=%s\n' % (bus, slot, function)) return (bus, slot, function)
def contEIOVms(self, sdUUID, isDomainStateValid): # This method is called everytime the onDomainStateChange # event is emitted, this event is emitted even when a domain goes # INVALID if this happens there is nothing to do if not isDomainStateValid: return libvirtCon = libvirtconnection.get() libvirtVms = libvirtCon.listAllDomains( libvirt.VIR_CONNECT_LIST_DOMAINS_PAUSED) with self.vmContainerLock: self.log.info("vmContainerLock acquired") for libvirtVm in libvirtVms: state = libvirtVm.state(0) if state[1] == libvirt.VIR_DOMAIN_PAUSED_IOERROR: vmId = libvirtVm.UUIDString() vmObj = self.vmContainer[vmId] if sdUUID in vmObj.sdIds: self.log.info("Cont vm %s in EIO", vmId) vmObj.cont()
def register(secrets, clear=False): try: secrets = [Secret(params) for params in secrets] except ValueError as e: logging.warning("Attempt to register invalid secret: %s", e) return response.error("secretBadRequestErr") con = libvirtconnection.get() try: for secret in secrets: logging.info("Registering secret %s", secret) secret.register(con) if clear: uuids = frozenset(sec.uuid for sec in secrets) for virsecret in con.listAllSecrets(): if virsecret.UUIDString() not in uuids and _is_ovirt_secret(virsecret): virsecret.undefine() except libvirt.libvirtError as e: logging.error("Could not register secret %s: %s", secret, e) return response.error("secretRegisterErr") return response.success()
def getDeviceDetails(addr): ''' investigate device by its address and return [bus, slot, function] list ''' connection = libvirtconnection.get(None) nodeDevice = connection.nodeDeviceLookupByName(addr) devXml = minidom.parseString(nodeDevice.XMLDesc(0)) bus = hex(int(devXml.getElementsByTagName('bus')[0].firstChild.nodeValue)) slot = hex(int( devXml.getElementsByTagName('slot')[0] .firstChild.nodeValue)) function = hex(int( devXml.getElementsByTagName('function')[0] .firstChild.nodeValue)) sys.stderr.write('sriov: bus=%s slot=%s function=%s\n' % (bus, slot, function)) return (bus, slot, function)
def register(secrets, clear=False): try: secrets = [Secret(params) for params in secrets] except ValueError as e: logging.warning("Attempt to register invalid secret: %s", e) return response.error("secretBadRequestErr") con = libvirtconnection.get() try: for secret in secrets: logging.info("Registering secret %s", secret) secret.register(con) if clear: uuids = frozenset(sec.uuid for sec in secrets) for virsecret in con.listAllSecrets(): if (virsecret.UUIDString() not in uuids and _is_ovirt_secret(virsecret)): virsecret.undefine() except libvirt.libvirtError as e: logging.error("Could not register secret %s: %s", secret, e) return response.error("secretRegisterErr") return response.success()
def start(cif, scheduler): global _operations global _executor _executor = executor.Executor(name="periodic", workers_count=_WORKERS, max_tasks=_TASKS, scheduler=scheduler) _executor.start() def per_vm_operation(func, period): disp = VmDispatcher( cif.getVMs, _executor, func, _timeout_from(period)) return Operation(disp, period, scheduler) _operations = [ # needs dispatching becuse updating the volume stats needs the # access the storage, thus can block. per_vm_operation( UpdateVolumes, config.getint('irs', 'vol_size_sample_interval')), # needs dispatching becuse access FS and libvirt data per_vm_operation( NumaInfoMonitor, config.getint('vars', 'vm_sample_numa_interval')), # Job monitoring need QEMU monitor access. per_vm_operation( BlockjobMonitor, config.getint('vars', 'vm_sample_jobs_interval')), # libvirt sampling using bulk stats can block, but unresponsive # domains are handled inside VMBulkSampler for performance reasons; # thus, does not need dispatching. Operation( sampling.VMBulkSampler( libvirtconnection.get(cif), cif.getVMs, sampling.stats_cache), config.getint('vars', 'vm_sample_interval'), scheduler), # we do this only until we get high water mark notifications # from qemu. Access storage and/or qemu monitor, so can block, # thus we need dispatching. per_vm_operation( DriveWatermarkMonitor, config.getint('vars', 'vm_watermark_interval')), Operation( sampling.HostMonitor(cif=cif), config.getint('vars', 'host_sample_stats_interval'), scheduler) ] hoststats.start() for op in _operations: op.start()