def test_updatedComponent_removeTrue(self): """Test updating a component with _remove or remove set to True.""" for remove_key in ('_remove', 'remove'): DATA = { "id": "eth0", "compname": "os", "relname": "interfaces", remove_key: True, } changed = self.service.applyDataMap(self.device, ObjectMap(DATA)) self.assertFalse(changed, 'update is not idempotent') self.service.applyDataMap( self.device, ObjectMap({ "id": "eth0", "compname": "os", "relname": "interfaces", "modname": "Products.ZenModel.IpInterface", "speed": 10e9, })) self.assertEqual(1, self.device.os.interfaces.countObjects(), 'failed to add object') changed = self.service.applyDataMap(self.device, ObjectMap(DATA)) self.assertTrue(changed, "remove object failed") self.assertEqual(0, self.device.os.interfaces.countObjects(), "failed to remove component")
def process(self, device, results, log): """Process collect's results. Return iterable of datamaps.""" state, results = results for tabledata in results: state.update_iftable(tabledata) clientmacs = set() maps = [] for iface_id, data in state.iftable.items(): clientmacs.update(data['clientmacs']) maps.append( ObjectMap({ 'compname': 'os', 'relname': 'interfaces', 'id': iface_id, 'clientmacs': list(set(data['clientmacs'])), 'baseport': data['baseport'], })) if not state.macs_indexed and state.iftable: reindex_map = ObjectMap({'set_reindex_maps': clientmacs}) maps.insert(0, reindex_map) return maps
def process(self, device, results, log): log.info('Collecting memory and swap for device %s' % device.id) maps = [] memory_line = "" swap_lines = [] lines = results.split("\n") memory_line = lines[0] swap_lines = results.split("\n")[1:] # Process Memory line memory = memory_line.split(':')[1] mem_value, unit = memory.split() mem_size = int(mem_value) * MULTIPLIER.get(unit, 1) swap_size = 0 # Process Swap Spaces for line in swap_lines: vals = line.split() if len(vals) != 5: continue swapfile, dev, swaplo, blocks, free = vals swap_size += (int(blocks) / 2) maps.append(ObjectMap({"totalMemory": mem_size}, compname="hw")) maps.append(ObjectMap({"totalSwap": swap_size}, compname="os")) return maps
def process(self, device, results, log): """collect WMI information from this device""" log.info('processing %s for device %s', self.name(), device.id) try: cs = results.get('Win32_ComputerSystem', [None])[0] os = results.get('Win32_OperatingSystem', [None])[0] if not (cs and os): return maps = [] om = self.objectMap(cs) om.snmpLocation = '' om.snmpOid = '' om.setOSProductKey = MultiArgs(os['_name'].split('|')[0], 'Microsoft') om.setHWProductKey = MultiArgs(cs['_model'], cs['_manufacturer']) sn = str( results.get('Win32_SystemEnclosure', [{ 'sn': '' }])[0]['sn'] or '').strip() if sn: om.setHWSerialNumber = sn maps.append(om) maps.append( ObjectMap({"totalMemory": (os.get('totalMemory', 0) * 1024)}, compname="hw")) maps.append( ObjectMap({"totalSwap": (os.get('totalSwap', 0) * 1024)}, compname="os")) except: log.warning('processing error') return return maps
def process(self, device, results, log): #log.debug(' Start of process - results is %s \n' % (results)) maps = [] for host in results: # Don't actually see there being more than one host..... hostDict = {} hostDict['setOSProductKey'] = host.summary.config.product.fullName hostDict['setHWProductKey'] = host.summary.hardware.model hostDict['cpuMhz'] = long(host.summary.hardware.cpuMhz) hostDict['cpuModel'] = host.summary.hardware.cpuModel hostDict['numCpuCores'] = int(host.summary.hardware.numCpuCores) hostDict['numCpuPkgs'] = int(host.summary.hardware.numCpuPkgs) hostDict['numCpuCoresPerPkgs'] = hostDict['numCpuCores'] / hostDict['numCpuPkgs'] hostDict['numCpuThreads'] = int(host.summary.hardware.numCpuThreads) hostDict['numNics'] = int(host.summary.hardware.numNics) vmotionState = host.summary.config.vmotionEnabled if vmotionState == 0: hostDict['vmotionState'] = True else: hostDict['vmotionState'] = False log.debug(' hostDict is %s \n' % (hostDict)) maps.append(ObjectMap({'totalMemory': host.summary.hardware.memorySize }, compname='hw')) maps.append(ObjectMap({'totalSwap': 0}, compname='os')) maps.append(ObjectMap( modname = 'ZenPacks.community.VMwareESXiMonitorPython.ESXiHost', data = hostDict )) return maps
def process(self, device, results, log): log.info("Modeler %s processing data for device %s", self.name(), device.id) results = results_from_result(results) if len(results) != 3: LOG.error( "Unable to process results. Expected 3 results, but got %d (%s)", len(results), results) return [] hostname, hostname_f, dnsdomainname = results # the FQDN could be either "hostname -f", or "hostname" + "dnsdomainname" fqdn = hostname_f if "." not in hostname and len(dnsdomainname) > 0: merged_fqdn = hostname + '.' + dnsdomainname else: merged_fqdn = "" # pick the longer of the two if len(merged_fqdn) > len(fqdn): fqdn = merged_fqdn hostfqdn_om = ObjectMap({'hostfqdn': fqdn, 'hostlocalname': hostname}) LOG.info("Hostname: %s (%s)", hostname, fqdn) return [ ObjectMap( {'setApplyDataMapToOpenStackInfrastructureHost': hostfqdn_om}) ]
def process(self, device, results, log): log.info('Processing VMware ESXi host info for device %s' % device.id) rlines = results.split("\n") for line in rlines: if line.startswith("Warning:"): log.warning('%s' % line) elif re.search(';', line): maps = [] osVendor, osProduct, hwVendor, hwProduct, memorySize, cpuMhz, cpuModel, numCpuCores, numCpuPkgs, numCpuThreads, numNics, esxiHostName, vmotionState = line.split( ';') maps.append( ObjectMap({'totalMemory': memorySize}, compname='hw')) maps.append(ObjectMap({'totalSwap': 0}, compname='os')) om = self.objectMap() om.setOSProductKey = osProduct om.setHWProductKey = hwProduct om.cpuMhz = long(cpuMhz) om.cpuModel = cpuModel om.numCpuCores = int(numCpuCores) om.numCpuPkgs = int(numCpuPkgs) om.numCpuCoresPerPkgs = int(numCpuCores) / int(numCpuPkgs) om.numCpuThreads = int(numCpuThreads) om.numNics = int(numNics) om.esxiHostName = esxiHostName if int(vmotionState) == 0: om.vmotionState = True else: om.vmotionState = False maps.append(om) return maps
def test_updatedComponent_removeTrue(self): """Test updating a component with _remove or remove set to True.""" for remove_key in ('_remove', 'remove'): eth0_om = ObjectMap({ "id": "eth0", "compname": "os", "relname": "interfaces", remove_key: True, }) changed = self.service.remote_applyDataMaps(self.device.id, [eth0_om]) self.assertFalse( changed, "{} = True resulted in change".format(remove_key)) self.service.remote_applyDataMaps(self.device.id, [ ObjectMap({ "id": "eth0", "compname": "os", "relname": "interfaces", "modname": "Products.ZenModel.IpInterface", "speed": 10e9, })]) changed = self.service.remote_applyDataMaps(self.device.id, [eth0_om]) self.assertTrue( changed, "{} = True didn't result in change".format(remove_key)) self.assertEqual( 0, self.device.os.interfaces.countObjects(), "{} = True didn't remove the component".format(remove_key))
def process(self, device, results, log): log.info('Collecting memory and swap for device %s' % device.id) rm = self.relMap() maps = [] for line in results.split("\n"): vals = line.split(':') if len(vals) != 2: continue name, value = vals vals = value.split() if len(vals) != 2: continue value, unit = vals size = int(value) * MULTIPLIER.get(unit, 1) if name == 'MemTotal': maps.append(ObjectMap({"totalMemory": size}, compname="hw")) if name == 'SwapTotal': maps.append(ObjectMap({"totalSwap": size}, compname="os")) return maps
def process(self, device, results, log): log.info("Modeler %s processing data for device %s", self.name(), device.id) matcher = re.compile(r'^(?P<version>[\d\.]+)') for line in results.split('\n'): match = matcher.search(line) if match: version = match.group('version') productKey = get_productKey(version) openstack_om = ObjectMap({ 'compname': 'os', 'setProductKey': MultiArgs(productKey, 'OpenStack') }) return [ ObjectMap({ 'setApplyDataMapToOpenStackInfrastructureEndpoint': openstack_om }) ] return []
def add_maps(self, result, ds): """ Return a list of ObjectMaps with config properties updates for this regionserver and all it's regions. """ oms = [] conf = ConfWrapper(result) oms.append( ObjectMap({ "compname": "hbase_servers/{}".format(self.component), "modname": "Region Server conf", 'handler_count': conf.handler_count, 'memstore_upper_limit': conf.memstore_upper_limit, 'memstore_lower_limit': conf.memstore_lower_limit, 'logflush_interval': conf.logflush_interval })) # All the regions within the region server will have the same # configuration as set in the region server's conf file. for region in ds.region_ids: oms.append( ObjectMap({ "compname": "hbase_servers/{}/regions/{}{}{}".format( ds.component, ds.component, NAME_SPLITTER, prepId(region)), "modname": "Region conf", 'memstore_flush_size': convToUnits(conf.memestore_flush_size), 'max_file_size': convToUnits(conf.max_file_size) })) return oms
def process(self, device, results, log): log.info('Collecting docker containers for device %s' % device.id) # Change results into a list of of results. One element per command. results = results_from_result(results) maps = [] # Map device "docker_version" property. if results[0].startswith("Docker version"): log.info("%s: %s", device.id, results[0]) maps.append(ObjectMap({'docker_version': results[0]})) else: log.info("%s: no docker version", device.id) maps.append(ObjectMap({"docker_version": None})) try: rows = parsing.rows_from_output( results[1], expected_columns=[ "CONTAINER ID", "IMAGE", "COMMAND", "CREATED", "PORTS", "NAMES", ]) except parsing.MissingColumnsError: log.info("%s: unexpected docker ps output", device.id) return maps rm = self.relMap() maps.append(rm) if not rows: log.info("%s: no docker containers found", device.id) return maps try: cgroup_path = parsing.cgroup_path_from_output(results[2]) except parsing.CgroupPathNotFound: log.info("%s: no cgroup path found. The default value '/sys/fs/cgroup' is set", device.id) cgroup_path = "/sys/fs/cgroup" for row in rows: rm.append( self.objectMap({ "id": row["CONTAINER ID"], "title": row["NAMES"], "image": row["IMAGE"], "command": row["COMMAND"], "created": row["CREATED"], "ports": row["PORTS"], "cgroup_path": cgroup_path, })) log.info("%s: found %s Docker containers", device.id, len(rm.maps)) return maps
def base_relmap(self): return RelationshipMap(compname="os", relname="interfaces", modname="Products.ZenModel.IpInterface", objmaps=[ ObjectMap({"id": "eth0"}), ObjectMap({"id": "eth1"}), ])
def test_updateDevice(self): """Test updating device properties.""" DATA = {"rackSlot": "near-the-top"} changed = self.service.applyDataMap(self.device, ObjectMap(DATA)) self.assertTrue(changed, "update Device failed") self.assertEqual("near-the-top", self.device.rackSlot) changed = self.service.applyDataMap(self.device, ObjectMap(DATA)) self.assertFalse(changed, "updateDevice not idempotent")
def process(self, device, results, log): log.info("Modeler %s processing data for device %s", self.name(), device.id) # Results is a tuple with two items. The first (0) index contains a # dictionary with the results of our "snmpGetMap" queries. The second # (1) index contains a dictionary with the results of our # "snmpGetTableMaps" queries. getdata, tabledata = results # getdata contents.. # {'memTotalReal': 2058776, 'memTotalSwap': 720888} # tabledata contents.. # {'diskIOTable': {'1': {'device': 'ram0', 'index': 1}, # '2': {'device': 'ram1', 'index': 2}, # '3': {'device': 'ram2', 'index': 3}, # '4': {'device': 'ram4', 'index': 4}}} # Create a list to fill up with our results. maps = [] # First we build an ObjectMap to apply to the device's hardware (hw) # component to set the total memory size. Need to check whether SNMP value # suppilied for memTotalReal. If so, multiply the returned value # by 1024 because the SNMP result is in kilybytes and we want to store # it in bytes. if getdata['memTotalReal']: maps.append( ObjectMap({'totalMemory': getdata['memTotalReal'] * 1024}, compname='hw')) # Now do the same thing for total swap space. Zenoss stores this on the # Operating System (os) component of the device. if getdata['memTotalSwap']: maps.append( ObjectMap({'totalSwap': getdata['memTotalSwap'] * 1024}, compname='os')) # Log for each disk returned from our GetTableMap. If we wanted to # create new disks in the model we'd create a RelationshipMap for them # and add an ObjectMap to it for each row in this table. See the # ExampleCMD plugin for an example of this. for snmpindex, disk in tabledata.get('diskIOTable').items(): log.info("Found disk %s", disk['device']) # The process method of the modeler plugin class below is expected to # return output in one of the following forms. # # 1. A single ObjectMap instance # 2. A single RelationshipMap instance # 3. A list of ObjectMap and RelationshipMap instances # 4. None # # If your modeler plugin encounters a bad state and you don't want to # affect Zenoss' model of the device you should return None. return maps
def test_updateDeviceHW(self): """Test updating device.hw properties.""" DATA = { "compname": "hw", "totalMemory": 45097156608, } changed = self.service.applyDataMap(self.device, ObjectMap(DATA)) self.assertTrue(changed, "device.hw not changed by first ObjectMap") self.assertEqual(45097156608, self.device.hw.totalMemory) changed = self.service.applyDataMap(self.device, ObjectMap(DATA)) self.assertFalse(changed, "update is not idempotent")
def process(self, device, results, log): """Process results. Return iterable of datamaps or None.""" maps = [] pools = [] j_data = json.loads(results) serverdict = {} devicedata = {} for k, v in j_data.iteritems(): if isinstance(v, dict): #got a server pool log.info('Pool %s\n' % (k)) poolName = self.prepId(k) serverdictk = {} for k1, v1 in v.iteritems(): if isinstance(v1, dict): # got a server serverdictk[k1] = v1 else: log.info( 'Server pool attributes - key is %s and value is %s' % (k1, v1)) serverdict[poolName] = serverdictk pools.append( ObjectMap(data={ 'id': poolName, 'title': poolName, })) else: if k in ['version', 'uptime', 'curr_connections']: if k == 'uptime': # uptime in seconds so convert to days v = int(v / 86400) device_attr = 'twemproxy_' + k devicedata[device_attr] = v log.info( 'Twemproxy device attributes - key is %s and value is %s' % (device_attr, v)) maps.append( ObjectMap( modname='ZenPacks.community.zplib.twemproxy.TwemproxyDevice', data=devicedata)) maps.append( RelationshipMap( relname='twemproxyServerPools', modname= 'ZenPacks.community.zplib.twemproxy.TwemproxyServerPool', objmaps=pools)) maps.extend(self.getTwemproxyServerMap(device, serverdict, log)) return maps
def process(self, device, results, log): log.info("Modeler %s processing data for device %s", self.name(), device.id) if 'neutron.conf' not in results: log.info("No neutron ini files to process.") return data = { 'neutron_core_plugin': None, 'neutron_mechanism_drivers': [], 'neutron_type_drivers': [], 'set_neutron_ini': {} } if 'plugin_names' not in results: log.error("No neutron implementation plugins were identified, unable to continue.") return if results['neutron.conf']: filename = 'neutron.conf' ini = results[filename] data['neutron_core_plugin'] = self.ini_get(device, filename, ini, 'DEFAULT', 'core_plugin', required=True) if data['neutron_core_plugin']: if data['neutron_core_plugin'] in ('neutron.plugins.ml2.plugin.Ml2Plugin', 'ml2'): filename = 'plugins/ml2/ml2_conf.ini' ini = results[filename] if ini: data['neutron_type_drivers'] = split_list(self.ini_get(device, filename, ini, 'ml2', 'type_drivers', required=True)) data['neutron_mechanism_drivers'] = split_list(self.ini_get(device, filename, ini, 'ml2', 'mechanism_drivers', required=True)) for plugin_name in results['plugin_names']: # See if we have any plugins registered for the core module # (if not ML2) or mechanism type (if ML2) plugin = zope.component.queryUtility(INeutronImplementationPlugin, plugin_name) if not plugin: continue log.debug("(Process) Using plugin '%s'" % plugin_name) for filename, section, option in plugin.ini_required(): ini = results.get(filename, None) if ini: data['set_neutron_ini'][(filename, section, option)] = self.ini_get(device, filename, ini, section, option, required=True) for filename, section, option in plugin.ini_optional(): ini = results.get(filename, None) if ini: data['set_neutron_ini'][(filename, section, option)] = self.ini_get(device, filename, ini, section, option) return ObjectMap({'setApplyDataMapToOpenStackInfrastructureEndpoint': ObjectMap(data)})
def process(self, device, results, log): """collect snmp information from this device""" import re log.info('processing %s for device %s', self.name(), device.id) getdata, tabledata = results om = self.objectMap(getdata) om.totalSwap = om.totalSwap * 4096 maps = [] if om.totalMemory > 0: maps.append( ObjectMap({"totalMemory": long(om.totalMemory)}, compname="hw")) maps.append(ObjectMap({"totalSwap": om.totalSwap}, compname="os")) return maps
def process(self, device, results, log): """collect snmp information from this device""" log.info('processing %s for device %s', self.name(), device.id) maps = [] for record in results["Win32_OperatingSystem"]: if record.TotalVisibleMemorySize: totalMemory = int(record.TotalVisibleMemorySize) * 1024 maps.append(ObjectMap({"totalMemory": totalMemory}, compname="hw")) if record.TotalVirtualMemorySize: totalSwap = int(record.TotalVirtualMemorySize) * 1024 maps.append(ObjectMap({"totalSwap": totalSwap}, compname="os")) return maps
def process(self, device, results, log): """Collect command-line information from this device""" log.info("Processing the uname -a info for device %s" % device.id) rm = self.relMap() maps = [] output = results.split('\n') maps.append( ObjectMap({"totalMemory": int(output[4].split()[1])}, compname="hw")) maps.append( ObjectMap({"totalSwap": int(output[2].split()[1])}, compname="os")) return maps
def makeLVMap(self, columns): # lv_name,vg_name,lv_attr,lv_size,lv_uuid,origin lv_om = ObjectMap() lv_om.title = columns['lv_name'] lv_om.vgname = columns['vg_name'] lv_om.id = 'lv-{}'.format(self.prepId(columns['vg_name'])+'_'+self.prepId(columns['lv_name'])) lv_om.attributes = self.lvm_parser.lv_attributes(columns['lv_attr']) lv_om.lvsize = int(columns['lv_size']) lv_om.uuid = columns['lv_uuid'] if columns['origin']: lv_om.origin = columns['origin'] lv_om.relname = 'snapshotVolumes' lv_om.modname = 'ZenPacks.zenoss.LinuxMonitor.SnapshotVolume' elif columns['lv_metadata_size']: lv_om.id = 'tp-{}'.format(self.prepId(columns['vg_name'])+'_'+self.prepId(columns['lv_name'])) lv_om.metadatasize = int(columns['lv_metadata_size']) lv_om.relname = 'thinPools' lv_om.modname = 'ZenPacks.zenoss.LinuxMonitor.ThinPool' else: lv_om.set_thinPool = None if columns['pool_lv']: lv_om.set_thinPool = 'tp-{}'.format(self.prepId(columns['vg_name'])+'_'+self.prepId(columns['pool_lv'])) lv_om.relname = 'logicalVolumes' lv_om.modname = 'ZenPacks.zenoss.LinuxMonitor.LogicalVolume' return lv_om
def _validate_datamap(device, datamap, relname=None, compname=None, modname=None, parentId=None): if isinstance(datamap, RelationshipMap): log.debug('_validate_datamap: got valid RelationshipMap') elif relname: log.debug('_validate_datamap: build relationship_map using relname') datamap = RelationshipMap(relname=relname, compname=compname, modname=modname, objmaps=datamap, parentId=parentId) elif isinstance(datamap, IncrementalDataMap): log.debug('_validate_datamap: got valid IncrementalDataMap') elif isinstance(datamap, ObjectMap): log.debug('_validate_datamap: got valid ObjectMap') datamap = IncrementalDataMap(device, datamap) else: log.debug('_validate_datamap: build object_map') datamap = ObjectMap(datamap, compname=compname, modname=modname) datamap = IncrementalDataMap(device, datamap) return datamap
def parse_result(self, dsconfs, result): if result.exit_code != 0: counters = [dsconf.params['resource'] for dsconf in dsconfs] log.info( 'Non-zero exit code ({0}) for counters, {1}, on {2}'.format( result.exit_code, counters, dsconf.device)) return # Parse values stdout = parse_stdout(result) if stdout: status, name = stdout dsconf0 = dsconfs[0] compObject = ObjectMap() compObject.id = dsconf0.params['instanceid'] compObject.title = dsconf0.params['instancename'] compObject.compname = dsconf0.params['contextcompname'] compObject.modname = dsconf0.params['contextmodname'] compObject.relname = dsconf0.params['contextrelname'] for dsconf in dsconfs: value = (dsconf0.params['instanceid'], status.strip(), compObject) yield dsconf, value else: log.debug('Error in parsing mssql instance data')
def parse_result(self, dsconfs, result): if result.exit_code != 0: counters = [dsconf.params['resource'] for dsconf in dsconfs] log.info( 'Non-zero exit code ({0}) for counters, {1}, on {2}'.format( result.exit_code, counters, dsconf.device)) return # Parse values stdout = parse_stdout(result, check_stderr=True) if stdout: name, iscoregroup, ownernode, state, description, nodeid,\ priority = stdout dsconf0 = dsconfs[0] compObject = ObjectMap() compObject.id = prepId(nodeid) compObject.title = name compObject.coregroup = iscoregroup compObject.ownernode = ownernode compObject.state = state compObject.description = description compObject.priority = priority compObject.compname = dsconf0.params['contextcompname'] compObject.modname = dsconf0.params['contextmodname'] compObject.relname = dsconf0.params['contextrelname'] for dsconf in dsconfs: value = (name, state, compObject) timestamp = int(time.mktime(time.localtime())) yield dsconf, value, timestamp else: log.debug('Error in parsing cluster service data')
def add_maps(self, res, ds): """ Check for added/removed regionservers and return a RelationshipMap if any changes took place. Otherwise return ObjectMap which only cleares the events of non-existiong components. """ # Check for removed/added region servers. dead_nodes = [prepId(dead_node_name(node)[0]) for node in self.dead] live_nodes = [prepId(node['name']) for node in self.live] nodes = set(dead_nodes + live_nodes) self.added = list(nodes.difference(set(ds.regionserver_ids))) self.removed = list(set(ds.regionserver_ids).difference(nodes)) # Check for removed/added regions. regions = set( region.get('name') for node in self.live for region in node.get('Region')) change = regions.symmetric_difference(ds.region_ids) # Remodel Regions and RegionServers only if some of them # were added/removed. if self.added or self.removed or change: ds.id = ds.device result = {'status': res, 'conf': None} return HBaseCollector().process(ds, result, log) # If nothing changed, just clear events. return [ObjectMap({'getClearEvents': True})]
def process(self, device, results, log): log.debug(' Start of process - results is %s \n' % (results)) maps = [] datastores = [] for datastore in results: datastoreDict = {} datastoreDict['id'] = self.prepId(datastore.summary.name) datastoreDict['title'] = datastore.summary.name datastoreDict['type'] = datastore.summary.type datastoreDict['capacity'] = long(datastore.summary.capacity) if not int(datastore.summary.accessible) == 1: log.warning('Datastore %s of device %s is not accessible' % (datastoreDict['id'], device.id)) continue datastores.append(ObjectMap(data=datastoreDict)) log.debug(' datastoreDict is %s \n' % (datastoreDict)) log.debug('VM Datastore is %s \n' % (datastoreDict['id'])) maps.append( RelationshipMap( relname='esxiDatastore', modname= 'ZenPacks.community.VMwareESXiMonitorPython.ESXiDatastore', objmaps=datastores)) return maps
def model_connectors(self, results, log): log.debug('model_connectors data: {}'.format(results)) rings = {} for entry in results: ring = entry['ring'] if ring not in rings: rings[ring] = [] rings[ring].append(entry) rm = [] for ring, connectors in rings.items(): compname = 'scalitySupervisors/Supervisor/scalityRings/{}'.format( ring) connector_maps = [] for connector in connectors: volume_id = connector['id'] om_connector = ObjectMap() om_connector.id = self.prepId(volume_id) om_connector.title = connector['name'] om_connector.connector_id = volume_id om_connector.protocol = connector['protocol'] om_connector.detached = connector['detached'] om_connector.address = connector['address'] om_connector.ring = connector['ring'] connector_maps.append(om_connector) rm.append( RelationshipMap( compname=compname, relname='scalityConnectors', modname='ZenPacks.community.Scality.ScalityConnector', objmaps=connector_maps)) return rm
def model_nodes(self, results, log): log.debug('model_nodes data: {}'.format(results)) rings = {} for entry in results: ring_name = entry['ring'] if ring_name not in rings: rings[ring_name] = [] rings[ring_name].append(entry) rm = [] for ring, nodes in rings.items(): compname = 'scalitySupervisors/Supervisor/scalityRings/{}'.format( ring) node_maps = [] for node in nodes: om_node = ObjectMap() node_name = node['name'] om_node.id = self.prepId('{}_{}'.format(ring, node_name)) om_node.title = node_name om_node.ring = ring # TODO: not safe om_node.admin_endpoint = '{}:{}'.format( node['admin_address'], node['admin_port']) om_node.chord_endpoint = '{}:{}'.format( node['chord_address'], node['chord_port']) om_node.server_endpoint = node['server'] node_maps.append(om_node) rm.append( RelationshipMap( compname=compname, relname='scalityNodes', modname='ZenPacks.community.Scality.ScalityNode', objmaps=node_maps)) return rm
def model_servers(self, servers, log): log.debug('model_servers data: {}'.format(servers)) server_maps = [] for server in servers: server_name = server['name'] server_ip = server['management_ip_address'] om_server = ObjectMap() # TODO: Use something else than IP address to ID the server om_server.id = self.prepId(server_ip) om_server.title = server_name om_server.server_type = server['server_type'] om_server.ip_address = server_ip om_server.zone = server['zone'] # TODO: check usage of id in datasource om_server.server_id = server['id'] # TODO: BUG since 8.x : TypeError: string indices must be integers rings = server['rings'] if rings and isinstance(rings[0], dict): # Supervisor 7.4.6.1 om_server.rings = ', '.join( sorted([r['name'] for r in server['rings']])) else: # Supervisor 8.3.0.5 om_server.rings = ', '.join(sorted(server['rings'])) om_server.roles = ', '.join(sorted(server['roles'])) om_server.disks = ', '.join(server['disks']) server_maps.append(om_server) return RelationshipMap( compname='scalitySupervisors/Supervisor', relname='scalityServers', modname='ZenPacks.community.Scality.ScalityServer', objmaps=server_maps)