def process(self, device, results, log): """ Must return one of : - None, changes nothing. Good in error cases. - A RelationshipMap, for the device to component information - An ObjectMap, for the device device information - A list of RelationshipMaps and ObjectMaps, both """ log.debug('Process results: {}'.format(results)) bamboo_data = results.get('bamboo', '') rm = [] if bamboo_data: bamboo_maps = [] om_bamboo = ObjectMap() bamboo_name = 'Bamboo {}'.format(bamboo_data['version']) om_bamboo.id = self.prepId(bamboo_name) om_bamboo.title = bamboo_name bamboo_maps.append(om_bamboo) rm.append(RelationshipMap(relname='bambooServers', modname='ZenPacks.community.Bamboo.BambooServer', compname='', objmaps=bamboo_maps)) log.debug('{}: process maps:{}'.format(device.id, rm)) return rm
def get_oss_rel_maps(self, es_response): oss = getattr(self.device, 'zESOssNodes', None) ossList = oss.split(",") rm = [] fs_dict = es_response.get('host', {}) nw_data = self.filter_server(fs_dict, 'oss') i = 0 for key, val in nw_data.items(): log.debug("XXX KEY: %s and VALUE: %r", key, val) if val.get('id') is None: val['id'] = str(key) if val.get('title') is None: val['title'] = str(key) try: val['management_address'] = ossList[i] except IndexError: val['management_address'] = 'null' # Create Object Map om = self.objectMap() om.updateFromDict(val) # Update Object Map to RelationShip Map rm.append(om) i += 1 return RelationshipMap( relname=EXMODEL['objectStorageServers']['relname'], modname=EXMODEL['objectStorageServers']['modname'], objmaps=rm)
def get_inputs(self, results, log): input_maps = [] getdata, tabledata = results for snmpindex, row in tabledata.get('upsPhaseInputTable', {}).items(): inputData = {} snmpindex = snmpindex.strip('.') log.debug('snmpindex:{}'.format(snmpindex)) log.debug('row:{}'.format(row)) inputIndex = row.get('upsPhaseInputTableIndex') name = row.get('upsPhaseInputName') inputData['id'] = self.prepId('Input_{}'.format(inputIndex)) inputData['title'] = self.prepId(name) inputData['snmpindex'] = snmpindex inputData['index'] = row.get('upsPhaseInputTableIndex') inputData['numPhases'] = row.get('upsPhaseNumInputPhases') inputData['orientation'] = self.voltageOrMap[int( row.get('upsPhaseInputVoltageOrientation'))] inputData['inputType'] = self.typeMap[int( row.get('upsPhaseInputType'))] input_maps.append(inputData) inputRelMap = RelationshipMap( relname='powerNetInputs', modname='ZenPacks.community.PowerNet.PowerNetInput', compname=self.compname, objmaps=input_maps, ) log.debug('get_inputs: {}'.format(inputRelMap)) return inputRelMap
def process(self, device, results, log): log.info("Modeler %s processing data for device %s", self.name(), device.id) rm = self.relMap() for service in results.get('Win32_Service', ()): om = self.objectMap() om.id = self.prepId(service.Name) om.serviceName = service.Name om.caption = service.Caption om.setServiceClass = { 'name': service.Name, 'description': service.Caption } om.pathName = service.PathName om.serviceType = service.ServiceType om.startMode = service.StartMode om.startName = service.StartName om.description = service.Description rm.append(om) maps = [] maps.append( RelationshipMap(relname="winrmservices", compname='os', objmaps=[])) maps.append(rm) return maps
def model_nodes(self, results, log): log.debug('model_nodes data: {}'.format(results)) rings = {} for entry in results: ring_name = entry['ring'] if ring_name not in rings: rings[ring_name] = [] rings[ring_name].append(entry) rm = [] for ring, nodes in rings.items(): compname = 'scalitySupervisors/Supervisor/scalityRings/{}'.format( ring) node_maps = [] for node in nodes: om_node = ObjectMap() node_name = node['name'] om_node.id = self.prepId('{}_{}'.format(ring, node_name)) om_node.title = node_name om_node.ring = ring # TODO: not safe om_node.admin_endpoint = '{}:{}'.format( node['admin_address'], node['admin_port']) om_node.chord_endpoint = '{}:{}'.format( node['chord_address'], node['chord_port']) om_node.server_endpoint = node['server'] node_maps.append(om_node) rm.append( RelationshipMap( compname=compname, relname='scalityNodes', modname='ZenPacks.community.Scality.ScalityNode', objmaps=node_maps)) return rm
def get_clusters_rel_maps(self, clusters_response): cluster_maps = {} for cluster in clusters_response.get('cluster', []): zone_id = self.prepId('zone%s' % cluster['zoneid']) pod_id = self.prepId('pod%s' % cluster['podid']) cluster_id = self.prepId('cluster%s' % cluster['id']) compname = 'zones/%s/pods/%s' % (zone_id, pod_id) cluster_maps.setdefault(compname, []) cluster_maps[compname].append( ObjectMap(data=dict( id=cluster_id, title=cluster.get('name', cluster_id), cloudstack_id=cluster['id'], allocation_state=cluster.get('allocationstate', ''), cluster_type=cluster.get('clustertype', ''), hypervisor_type=cluster.get('hypervisortype', ''), managed_state=cluster.get('managedstate', ''), ))) for compname, obj_maps in cluster_maps.items(): yield RelationshipMap(compname=compname, relname='clusters', modname='ZenPacks.zenoss.CloudStack.Cluster', objmaps=obj_maps)
def add_maps(self, res, ds): """ Check for added/removed tables and return a RelationshipMap if any changes took place. Otherwise return empty list. """ try: res = json.loads(res) except ValueError: log.error( 'Error parsing collected data for {} monitoring template'. format(ds.template)) res = [] if not res: return [] tables_update = set(table['name'] for table in res.get('table')) self.added = list(tables_update.difference(set(ds.table_ids))) self.removed = list(set(ds.table_ids).difference(tables_update)) if self.added or self.removed: tables_oms = [] for table in tables_update: tables_oms.append( ObjectMap({ 'id': prepId(table), 'title': table })) return [ RelationshipMap(relname='hbase_tables', modname=MODULE_NAME['HBaseTable'], objmaps=tables_oms) ] return []
def getRedisDbRelMap(self, device, dbData, log): rel_maps = [] log.info('In getRedisDbRelMap - dbData is %s \n' % (dbData)) for k, v in dbData.iteritems(): compname = 'redisPorts/%s' % (k) object_maps = [] for k1, v1 in v.iteritems(): db_number = int(k1[-1]) db_avg_ttl = v1.get('avg_ttl', None) db_keys = v1.get('keys', None) object_maps.append( ObjectMap( data={ 'id': k + '_' + self.prepId(k1), 'title': k + '_' + self.prepId(k1), 'db_number': db_number, 'db_avg_ttl': db_avg_ttl, 'db_keys': db_keys, })) rel_maps.append( RelationshipMap( compname=compname, relname='redisDbs', modname='ZenPacks.community.zplib.Redis.RedisDb', objmaps=object_maps)) return rel_maps
def relMap(self): """Create a relationship map. """ relmap = RelationshipMap() relmap.relname = self.relname relmap.compname = self.compname return relmap
def instances_rm(region_id, reservations): ''' Return instances RelationshipMap given region_id and an InstanceInfo ResultSet. ''' instance_data = [] for instance in chain.from_iterable(r.instances for r in reservations): zone_id = prepId(instance.placement) if instance.placement else None subnet_id = prepId(instance.subnet_id) if instance.subnet_id else None instance_data.append({ 'id': prepId(instance.id), 'title': name_or(instance.tags, instance.id), 'instance_id': instance.id, 'public_dns_name': instance.public_dns_name, 'private_ip_address': instance.private_ip_address, 'image_id': instance.image_id, 'instance_type': instance.instance_type, 'launch_time': instance.launch_time, 'state': instance.state, 'platform': getattr(instance, 'platform', ''), 'detailed_monitoring': instance.monitored, 'setZoneId': zone_id, 'setVPCSubnetId': subnet_id, }) return RelationshipMap(compname='regions/%s' % region_id, relname='instances', modname=MODULE_NAME['EC2Instance'], objmaps=instance_data)
def volumes_rm(region_id, volumes): ''' Return volumes RelationshipMap given region_id and a VolumeInfo ResultSet. ''' volume_data = [] for volume in volumes: if volume.attach_data.instance_id: instance_id = prepId(volume.attach_data.instance_id) else: instance_id = None volume_data.append({ 'id': prepId(volume.id), 'title': name_or(volume.tags, volume.id), 'volume_type': volume.type, 'create_time': volume.create_time, 'size': volume.size / (1024**3), 'iops': volume.iops, 'status': volume.status, 'attach_data_status': volume.attach_data.status, 'attach_data_devicepath': volume.attach_data.device, 'setInstanceId': instance_id, 'setZoneId': volume.zone, }) return RelationshipMap(compname='regions/%s' % region_id, relname='volumes', modname=MODULE_NAME['EC2Volume'], objmaps=volume_data)
def vpc_subnets_rm(region_id, subnets): ''' Return vpc_subnets RelationshipMap given region_id and a SubnetInfo ResultSet. ''' vpc_subnet_data = [] for subnet in subnets: vpc_subnet_data.append({ 'id': prepId(subnet.id), 'title': name_or(subnet.tags, subnet.id), 'available_ip_address_count': subnet.available_ip_address_count, 'cidr_block': subnet.cidr_block, 'defaultForAz': to_boolean(subnet.defaultForAz), 'mapPublicIpOnLaunch': to_boolean(subnet.mapPublicIpOnLaunch), 'state': subnet.state, 'setVPCId': subnet.vpc_id, 'setZoneId': subnet.availability_zone, }) return RelationshipMap(compname='regions/%s' % region_id, relname='vpc_subnets', modname=MODULE_NAME['EC2VPCSubnet'], objmaps=vpc_subnet_data)
def _node_manager_oms(self, data, device): """ Build Node Manager object maps. """ node_mgr_oms = [] # Find live node managers names in the jmx output. live_node_mgrs = [] for bean in data['beans']: if bean['name'] == 'Hadoop:service=ResourceManager,name=RMNMInfo': live_node_mgrs = bean.setdefault('LiveNodeManagers', []) break if live_node_mgrs: live_node_mgrs = json.loads(live_node_mgrs) # Build Node Manager oms given the data found in jmx. comp = self._dict_components['HadoopNodeManager'] for node_mgr in live_node_mgrs: node_mgr_address = prep_ip(device, node_mgr['NodeHTTPAddress']) node_mgr_oms.append( ObjectMap({ 'id': prepId(device.id + NAME_SPLITTER + node_mgr_address), 'title': node_mgr_address, 'node_type': comp[0] })) return RelationshipMap(relname=comp[1], modname=MODULE_NAME['HadoopNodeManager'], objmaps=node_mgr_oms)
def getSpareDisksRelMaps(self, spares, compname): obj_maps = [] for name, data in spares.iteritems(): spare_id = prepId(name) obj_maps.append(ObjectMap(data=dict( id = spare_id, sparedisk_name = name, node = data['node'], disk_uid = data['disk_uid'], raid_state = data['raid_state'], raid_type = data['raid_type'], bay = data['bay'], byte_per_sector = data['byte_per_sector'], disk_type = data['disk_type'], rpm = data['rpm'], model = data['model'], serialnr = data['serialnr'], firmware = data['firmware'], poweron_hours = data['poweron_hours'], grown_defect_list_count = data['grown_defect_list_count'], total_bytes = data['total_bytes'], ))) return [RelationshipMap( compname = compname, relname = 'spare_disks', modname = 'ZenPacks.CS.NetApp.SevenMode.SpareDisk', objmaps = obj_maps)]
def get_zones_rel_maps(self, zones_response): zone_maps = [] for zone in zones_response.get('zone', []): zone_id = self.prepId('zone%s' % zone['id']) zone_maps.append( ObjectMap(data=dict( id=zone_id, title=zone.get('name', zone_id), cloudstack_id=zone['id'], allocation_state=zone.get('allocationstate', ''), guest_cidr_address=zone.get('guestcidraddress', ''), dhcp_provider=zone.get('dhcpprovider', ''), dns1=zone.get('dns1', ''), dns2=zone.get('dns2', ''), internal_dns1=zone.get('internaldns1', ''), internal_dns2=zone.get('internaldns2', ''), network_type=zone.get('networktype', ''), security_groups_enabled=zone.get('securitygroupsenabled', ''), vlan=zone.get('vlan', ''), zone_token=zone.get('zonetoken', ''), ))) yield RelationshipMap(relname='zones', modname='ZenPacks.zenoss.CloudStack.Zone', objmaps=zone_maps)
def getAppInstancesRelMaps(self, appId, instances, compname): obj_maps = [] for data in instances: instance_id = prepId(str(data['index'])) stats = data['stats']['stats'] obj_maps.append( ObjectMap(data=dict( id='{0}_{1}'.format(appId, instance_id), title=instance_id, cfIndex=data['index'], cfState=data['state'], cfSince=data['since'], cfHost=stats['host'], cfPort=stats['port'], cfCores=stats['cores'], modeled_quotaMemory=stats['mem_quota'], modeled_quotaDisk=stats['disk_quota'], modeled_usageCPU=stats['usage']['cpu'], modeled_usageMemory=stats['usage']['mem'] * 1024, modeled_usageDisk=stats['usage']['disk'] * 1024, ))) return [ RelationshipMap(compname=compname, relname='cfAppInstances', modname='ZenPacks.zenoss.CloudFoundry.AppInstance', objmaps=obj_maps) ]
def get_pods_rel_maps(self, pods_response): pod_maps = {} for pod in pods_response.get('pod', []): zone_id = self.prepId('zone%s' % pod['zoneid']) pod_id = self.prepId('pod%s' % pod['id']) compname = 'zones/%s' % zone_id pod_maps.setdefault(compname, []) pod_maps[compname].append( ObjectMap(data=dict( id=pod_id, title=pod.get('name', pod_id), cloudstack_id=pod['id'], allocation_state=pod.get('allocationstate', ''), start_ip=pod.get('startip', ''), end_ip=pod.get('endip', ''), netmask=pod.get('netmask', ''), gateway=pod.get('gateway', ''), ))) for compname, obj_maps in pod_maps.items(): yield RelationshipMap(compname=compname, relname='pods', modname='ZenPacks.zenoss.CloudStack.Pod', objmaps=obj_maps)
def getFrameworksRelMaps(self, frameworks): rel_maps = [] obj_maps = [] for name, data in frameworks.items(): framework_id = prepId(name) obj_maps.append( ObjectMap(data=dict( id=framework_id, title=name, cfName=name, setCFDetection=data['detection'], ))) rel_maps.extend( self.getRuntimesRelMaps( data['runtimes'], 'cfFrameworks/{0}'.format(framework_id))) rel_maps.extend( self.getAppServersRelMaps( data['appservers'], 'cfFrameworks/{0}'.format(framework_id))) return [ RelationshipMap(relname='cfFrameworks', modname='ZenPacks.zenoss.CloudFoundry.Framework', objmaps=obj_maps) ] + rel_maps
def olsonInterface(self, manageIp, macaddr): om = ObjectMap({}, compname="os", modname="Products.ZenModel.IpInterface") om.id = self.prepId("eth0") om.title = om.id om.interfaceName = om.id om.description = "Manually Kludged" om.type = "manual" om.speed = 10000000 om.mtu = 1500 om.ifindex = "1" om.adminStatus = 1 om.operStatus = 1 om.monitor = False om.setIpAddresses = [ manageIp, ] om.macaddress = macaddr # om.lockFromDeletion() # om.lockFromUpdates() return RelationshipMap(relname="interfaces", compname="os", modname="Products.ZenModel.IpInterface", objmaps=[ om, ])
def getSystemServicesRelMaps(self, services): obj_maps = [] for type, type_data in services.items(): for name, name_data in type_data.items(): for version, data in name_data.items(): obj_maps.append( ObjectMap(data=dict( id=prepId(data['id']), title=name, cfId=data['id'], cfName=name, cfVersion=data['version'], cfDescription=data['description'], cfVendor=data['vendor'], cfType=type, setCFTiers=data['tiers'], ))) return [ RelationshipMap( relname='cfSystemServices', modname='ZenPacks.zenoss.CloudFoundry.SystemService', objmaps=obj_maps) ]
def process(self, device, results, log): log.debug(' Start of process - results is %s \n' % (results)) maps = [] datastores = [] for datastore in results: datastoreDict = {} datastoreDict['id'] = self.prepId(datastore.summary.name) datastoreDict['title'] = datastore.summary.name datastoreDict['type'] = datastore.summary.type datastoreDict['capacity'] = long(datastore.summary.capacity) if not int(datastore.summary.accessible) == 1: log.warning('Datastore %s of device %s is not accessible' % (datastoreDict['id'], device.id)) continue datastores.append(ObjectMap(data=datastoreDict)) log.debug(' datastoreDict is %s \n' % (datastoreDict)) log.debug('VM Datastore is %s \n' % (datastoreDict['id'])) maps.append( RelationshipMap( relname='esxiDatastore', modname= 'ZenPacks.community.VMwareESXiMonitorPython.ESXiDatastore', objmaps=datastores)) return maps
def getProvisionedServicesRelMaps(self, services): obj_maps = [] for data in services: obj_maps.append( ObjectMap(data=dict( id=prepId(data['name']), title=data['name'], cfName=data['name'], cfVersion=data['version'], cfVendor=data['vendor'], cfType=data['type'], cfTier=data['tier'], cfMetaCreated=data['meta']['created'], cfMetaUpdated=data['meta']['updated'], cfMetaVersion=data['meta']['version'], setCFMetaTags=data['meta']['tags'], setCFProperties=data['properties'], ))) return [ RelationshipMap( relname='cfProvisionedServices', modname='ZenPacks.zenoss.CloudFoundry.ProvisionedService', objmaps=obj_maps) ]
def model_servers(self, servers, log): log.debug('model_servers data: {}'.format(servers)) server_maps = [] for server in servers: server_name = server['name'] server_ip = server['management_ip_address'] om_server = ObjectMap() # TODO: Use something else than IP address to ID the server om_server.id = self.prepId(server_ip) om_server.title = server_name om_server.server_type = server['server_type'] om_server.ip_address = server_ip om_server.zone = server['zone'] # TODO: check usage of id in datasource om_server.server_id = server['id'] # TODO: BUG since 8.x : TypeError: string indices must be integers rings = server['rings'] if rings and isinstance(rings[0], dict): # Supervisor 7.4.6.1 om_server.rings = ', '.join( sorted([r['name'] for r in server['rings']])) else: # Supervisor 8.3.0.5 om_server.rings = ', '.join(sorted(server['rings'])) om_server.roles = ', '.join(sorted(server['roles'])) om_server.disks = ', '.join(server['disks']) server_maps.append(om_server) return RelationshipMap( compname='scalitySupervisors/Supervisor', relname='scalityServers', modname='ZenPacks.community.Scality.ScalityServer', objmaps=server_maps)
def getAppsRelMaps(self, apps): obj_maps = [] rel_maps = [] for data in apps: app_id = prepId(data['name']) obj_maps.append( ObjectMap(data=dict( id=app_id, title=data['name'], cfName=data['name'], cfVersion=data['version'], cfState=data['state'], cfMetaCreated=data['meta']['created'], cfMetaVersion=data['meta']['version'], setCFURIs=data['uris'], setCFServices=data['services'], cfStagingModel=data['staging']['model'], cfStagingStack=data['staging']['stack'], modeled_instances=len(data['instances']), modeled_runningInstances=data['runningInstances'], modeled_resourcesMemory=data['resources']['memory'] * 1048576, modeled_resourcesDisk=data['resources']['disk'] * 1048576, modeled_resourcesFDS=data['resources']['fds']))) rel_maps.extend( self.getAppInstancesRelMaps(app_id, data['instances'], 'cfApps/{0}'.format(app_id))) return [ RelationshipMap(relname='cfApps', modname='ZenPacks.zenoss.CloudFoundry.App', objmaps=obj_maps) ] + rel_maps
def model_connectors(self, results, log): log.debug('model_connectors data: {}'.format(results)) rings = {} for entry in results: ring = entry['ring'] if ring not in rings: rings[ring] = [] rings[ring].append(entry) rm = [] for ring, connectors in rings.items(): compname = 'scalitySupervisors/Supervisor/scalityRings/{}'.format( ring) connector_maps = [] for connector in connectors: volume_id = connector['id'] om_connector = ObjectMap() om_connector.id = self.prepId(volume_id) om_connector.title = connector['name'] om_connector.connector_id = volume_id om_connector.protocol = connector['protocol'] om_connector.detached = connector['detached'] om_connector.address = connector['address'] om_connector.ring = connector['ring'] connector_maps.append(om_connector) rm.append( RelationshipMap( compname=compname, relname='scalityConnectors', modname='ZenPacks.community.Scality.ScalityConnector', objmaps=connector_maps)) return rm
def getQueueRelMap(self, queues_string, compname): object_maps = [] for queue_string in queues_string.split('\n'): if not queue_string.strip(): continue name, durable, auto_delete, arguments = \ re.split(r'\s+', queue_string) if re.search(r'true', durable, re.I): durable = True else: durable = False if re.search(r'true', auto_delete, re.I): auto_delete = True else: auto_delete = False object_maps.append( ObjectMap( data={ 'id': prepId(name), 'title': name, 'durable': durable, 'auto_delete': auto_delete, 'arguments': arguments, })) return RelationshipMap( compname=compname, relname='rabbitmq_queues', modname='ZenPacks.zenoss.RabbitMQ.RabbitMQQueue', objmaps=object_maps)
def process(self, device, results, log): log.info( 'Modeler %s processing data for device %s', self.name(), device.id ) maps = collections.OrderedDict([ ('hbase_tables', []) ]) try: data = json.loads(results) except ValueError: log.error('HBaseTableCollector: Error parsing collected data') return # List of tables tables_oms = [] if data: # Check if there are any tables. for table in data["table"]: tables_oms.append(self._table_om(table)) maps['hbase_tables'].append(RelationshipMap( relname='hbase_tables', modname=MODULE_NAME['HBaseTable'], objmaps=tables_oms)) log.info( 'Modeler %s finished processing data for device %s', self.name(), device.id ) return list(chain.from_iterable(maps.itervalues()))
def _validate_datamap(device, datamap, relname=None, compname=None, modname=None, parentId=None): if isinstance(datamap, RelationshipMap): log.debug('_validate_datamap: got valid RelationshipMap') elif relname: log.debug('_validate_datamap: build relationship_map using relname') datamap = RelationshipMap(relname=relname, compname=compname, modname=modname, objmaps=datamap, parentId=parentId) elif isinstance(datamap, IncrementalDataMap): log.debug('_validate_datamap: got valid IncrementalDataMap') elif isinstance(datamap, ObjectMap): log.debug('_validate_datamap: got valid ObjectMap') datamap = IncrementalDataMap(device, datamap) else: log.debug('_validate_datamap: build object_map') datamap = ObjectMap(datamap, compname=compname, modname=modname) datamap = IncrementalDataMap(device, datamap) return datamap
def data_nodes_remodel(self, data, device): """ Create RelationshipMap for data nodes remodeling. @param data: parsed result of command execution @type data: dict @param device: object which has config_key and manageIP attributes @type device: object @return: list of RelationshipMap """ nodes_oms = [] try: values = json.loads(data['jmx']) except Exception: raise HadoopException('Error parsing collected data for {} ' 'monitoring template'.format( device.template)) for value in values.get('beans'): if value.get( 'name') == 'Hadoop:service=NameNode,name=NameNodeInfo': for key, val in (('LiveNodes', NODE_HEALTH_NORMAL), ('DeadNodes', NODE_HEALTH_DEAD), ('DecomNodes', NODE_HEALTH_DECOM)): nodes_oms.extend( node_oms(log, device, value.get(key), val, data['conf'], True)) rm = RelationshipMap(relname='hadoop_data_nodes', modname=MODULE_NAME['HadoopDataNode'], objmaps=nodes_oms) if list(rm): return [rm] return []
def process(self, devices, results, unused): if results is None: return None maps = [self.objectMap(dict(setPostgreSQL=True))] databases = [] for dbName, dbDetail in results['databases'].items(): databases.append( ObjectMap(data=dict( id=prepId(dbName), title=dbName, dbName=dbName, dbOid=dbDetail['oid'], modeled_size=dbDetail['size'], ))) maps.append( RelationshipMap(relname='pgDatabases', modname='ZenPacks.zenoss.PostgreSQL.Database', objmaps=databases)) for dbName, dbDetail in results['databases'].items(): if 'tables' not in dbDetail: continue tables = [] for tableName, tableDetail in dbDetail['tables'].items(): tables.append( ObjectMap(data=dict( id='{0}_{1}'.format(prepId(dbName), prepId(tableName)), title=tableName, tableName=tableName, tableOid=tableDetail['oid'], tableSchema=tableDetail['schema'], modeled_size=tableDetail['size'], modeled_totalSize=tableDetail['totalSize'], ))) maps.append( RelationshipMap(compname='pgDatabases/{0}'.format( prepId(dbName)), relname='tables', modname='ZenPacks.zenoss.PostgreSQL.Table', objmaps=tables)) return maps