def relMap(self):
     """Create a relationship map.
     """
     relmap = RelationshipMap()
     relmap.relname = self.relname
     relmap.compname = self.compname
     return relmap
Example #2
0
 def relMap(self):
     """Create a relationship map.
     """
     relmap = RelationshipMap()
     relmap.relname = self.relname
     relmap.compname = self.compname
     return relmap
Example #3
0
    def process(self, device, results, log):
        log.info("Processing %s for device %s", self.name(), device.id)
        getdata, tabledata = results
        maps = []

        ocpRelMap = RelationshipMap(
            relname='raritanOCPs',
            compname=self.compname,
            modname='ZenPacks.community.Raritan.RaritanOCP')

        for snmpindex, row in tabledata.get(
                'overCurrentProtectorConfigurationTable', {}).items():
            ocpData = {}
            snmpindex = snmpindex.strip('.')
            log.info('snmpindex:{}'.format(snmpindex))
            log.info('row:{}'.format(row))

            title = row.get('overCurrentProtectorLabel')
            name = row.get('overCurrentProtectorName')
            if name:
                title = '{} ({})'.format(title, name)

            ocpData['id'] = self.prepId(title)
            ocpData['title'] = title
            ocpData['snmpindex'] = snmpindex

            ocpSensors = tabledata.get(
                'overCurrentProtectorSensorConfigurationTable', {})
            log.debug('sensors:{}'.format(ocpSensors))
            for sensor, sensorNum in self.sensorType.items():
                sensorIndex = '{}.{}'.format(snmpindex, sensorNum)
                ocpSensor = ocpSensors[sensorIndex]
                ocpData['{}_units'.format(
                    sensor)] = ocpSensor['overCurrentProtectorSensorUnits']
                ocpData['{}_digits'.format(sensor)] = ocpSensor[
                    'overCurrentProtectorSensorDecimalDigits']

            log.debug('sensorData:{}'.format(ocpData))

            ocpRelMap.append(
                ObjectMap(
                    compname=self.compname,
                    modname='ZenPacks.community.Raritan.RaritanOCP',
                    data=ocpData,
                ))
        maps.append(ocpRelMap)

        maps.extend([
            ocpRelMap,
        ])

        return maps
    def processTblTrays(self, tblTrays, log):
        mapTrays = RelationshipMap(
            modname='ZenPacks.TwoNMS.PrinterMIB.PrinterTray',
            relname='printermibtray')

        # iterate each tray and translate the mibs
        for trayId, trayData in tblTrays.iteritems():
            # create an input Tray object
            trayObj = self.objectMap(trayData)
            trayObj.id = self.prepId(trayId)

            # translate prtInputTypeTC
            try:
                if (self.PrtInputTypeTC[str(trayObj.prtInputTypeTC)] != None):
                    trayObj.prtInputType = self.PrtInputTypeTC[str(
                        trayObj.prtInputTypeTC)]
            except AttributeError:
                log.warn("Tray does not support the prtInputTypeTC oid")
                trayObj.prtInputType = self.PrtInputTypeTC['na']
                #continue

            # translate PrtCapacityUnitTC
            try:
                if (self.PrtCapacityUnitTC[str(trayObj.prtCapacityUnitTC)] !=
                        None):
                    trayObj.prtCapacityUnit = self.PrtCapacityUnitTC[str(
                        trayObj.prtCapacityUnitTC)]
            except AttributeError:
                log.warn("Tray does not support the PrtCapacityUnitTC oid")
                trayObj.prtCapacityUnit = self.PrtCapacityUnitTC['na']
                #continue

            # add a percentage value of the usage
            try:
                trayObj.usagepct = self.calculateUsagePct(
                    trayObj.prtInputCurrentLevel, trayObj.prtInputMaxCapacity,
                    log)
            except:
                mapTemp.usagepct = 'na'

            # assign object to the relationsipMap
            trayObj.modname = "ZenPacks.TwoNMS.PrinterMIB.PrinterTray"
            trayObj.supplyId = trayObj.id
            trayObj.snmpindex = trayObj.id
            log.debug("New input tray found: %s", trayObj)
            mapTrays.append(trayObj)

        return mapTrays
    def getAppsRelMaps(self, apps):
        obj_maps = []
        rel_maps = []

        for data in apps:
            app_id = prepId(data['name'])
            obj_maps.append(
                ObjectMap(data=dict(
                    id=app_id,
                    title=data['name'],
                    cfName=data['name'],
                    cfVersion=data['version'],
                    cfState=data['state'],
                    cfMetaCreated=data['meta']['created'],
                    cfMetaVersion=data['meta']['version'],
                    setCFURIs=data['uris'],
                    setCFServices=data['services'],
                    cfStagingModel=data['staging']['model'],
                    cfStagingStack=data['staging']['stack'],
                    modeled_instances=len(data['instances']),
                    modeled_runningInstances=data['runningInstances'],
                    modeled_resourcesMemory=data['resources']['memory'] *
                    1048576,
                    modeled_resourcesDisk=data['resources']['disk'] * 1048576,
                    modeled_resourcesFDS=data['resources']['fds'])))

            rel_maps.extend(
                self.getAppInstancesRelMaps(app_id, data['instances'],
                                            'cfApps/{0}'.format(app_id)))

        return [
            RelationshipMap(relname='cfApps',
                            modname='ZenPacks.zenoss.CloudFoundry.App',
                            objmaps=obj_maps)
        ] + rel_maps
    def getProvisionedServicesRelMaps(self, services):
        obj_maps = []

        for data in services:
            obj_maps.append(
                ObjectMap(data=dict(
                    id=prepId(data['name']),
                    title=data['name'],
                    cfName=data['name'],
                    cfVersion=data['version'],
                    cfVendor=data['vendor'],
                    cfType=data['type'],
                    cfTier=data['tier'],
                    cfMetaCreated=data['meta']['created'],
                    cfMetaUpdated=data['meta']['updated'],
                    cfMetaVersion=data['meta']['version'],
                    setCFMetaTags=data['meta']['tags'],
                    setCFProperties=data['properties'],
                )))

        return [
            RelationshipMap(
                relname='cfProvisionedServices',
                modname='ZenPacks.zenoss.CloudFoundry.ProvisionedService',
                objmaps=obj_maps)
        ]
    def getSystemServicesRelMaps(self, services):
        obj_maps = []

        for type, type_data in services.items():
            for name, name_data in type_data.items():
                for version, data in name_data.items():
                    obj_maps.append(
                        ObjectMap(data=dict(
                            id=prepId(data['id']),
                            title=name,
                            cfId=data['id'],
                            cfName=name,
                            cfVersion=data['version'],
                            cfDescription=data['description'],
                            cfVendor=data['vendor'],
                            cfType=type,
                            setCFTiers=data['tiers'],
                        )))

        return [
            RelationshipMap(
                relname='cfSystemServices',
                modname='ZenPacks.zenoss.CloudFoundry.SystemService',
                objmaps=obj_maps)
        ]
    def getFrameworksRelMaps(self, frameworks):
        rel_maps = []
        obj_maps = []

        for name, data in frameworks.items():
            framework_id = prepId(name)
            obj_maps.append(
                ObjectMap(data=dict(
                    id=framework_id,
                    title=name,
                    cfName=name,
                    setCFDetection=data['detection'],
                )))

            rel_maps.extend(
                self.getRuntimesRelMaps(
                    data['runtimes'], 'cfFrameworks/{0}'.format(framework_id)))

            rel_maps.extend(
                self.getAppServersRelMaps(
                    data['appservers'],
                    'cfFrameworks/{0}'.format(framework_id)))

        return [
            RelationshipMap(relname='cfFrameworks',
                            modname='ZenPacks.zenoss.CloudFoundry.Framework',
                            objmaps=obj_maps)
        ] + rel_maps
    def getAppInstancesRelMaps(self, appId, instances, compname):
        obj_maps = []

        for data in instances:
            instance_id = prepId(str(data['index']))
            stats = data['stats']['stats']
            obj_maps.append(
                ObjectMap(data=dict(
                    id='{0}_{1}'.format(appId, instance_id),
                    title=instance_id,
                    cfIndex=data['index'],
                    cfState=data['state'],
                    cfSince=data['since'],
                    cfHost=stats['host'],
                    cfPort=stats['port'],
                    cfCores=stats['cores'],
                    modeled_quotaMemory=stats['mem_quota'],
                    modeled_quotaDisk=stats['disk_quota'],
                    modeled_usageCPU=stats['usage']['cpu'],
                    modeled_usageMemory=stats['usage']['mem'] * 1024,
                    modeled_usageDisk=stats['usage']['disk'] * 1024,
                )))

        return [
            RelationshipMap(compname=compname,
                            relname='cfAppInstances',
                            modname='ZenPacks.zenoss.CloudFoundry.AppInstance',
                            objmaps=obj_maps)
        ]
    def getSpareDisksRelMaps(self, spares, compname):
        obj_maps = []

        for name, data in spares.iteritems():
            spare_id = prepId(name)
            obj_maps.append(ObjectMap(data=dict(
                id = spare_id,
                sparedisk_name = name,
                node = data['node'],
                disk_uid = data['disk_uid'],
                raid_state = data['raid_state'],
                raid_type = data['raid_type'],
                bay = data['bay'],
                byte_per_sector = data['byte_per_sector'],
                disk_type = data['disk_type'],
                rpm = data['rpm'],
                model = data['model'],
                serialnr = data['serialnr'],
                firmware = data['firmware'],
                poweron_hours = data['poweron_hours'],
                grown_defect_list_count = data['grown_defect_list_count'],
                total_bytes = data['total_bytes'],
                )))

        return [RelationshipMap(
            compname = compname,
            relname = 'spare_disks',
            modname = 'ZenPacks.CS.NetApp.SevenMode.SpareDisk',
            objmaps = obj_maps)]
Example #11
0
    def get_zones_rel_maps(self, zones_response):
        zone_maps = []
        for zone in zones_response.get('zone', []):
            zone_id = self.prepId('zone%s' % zone['id'])

            zone_maps.append(
                ObjectMap(data=dict(
                    id=zone_id,
                    title=zone.get('name', zone_id),
                    cloudstack_id=zone['id'],
                    allocation_state=zone.get('allocationstate', ''),
                    guest_cidr_address=zone.get('guestcidraddress', ''),
                    dhcp_provider=zone.get('dhcpprovider', ''),
                    dns1=zone.get('dns1', ''),
                    dns2=zone.get('dns2', ''),
                    internal_dns1=zone.get('internaldns1', ''),
                    internal_dns2=zone.get('internaldns2', ''),
                    network_type=zone.get('networktype', ''),
                    security_groups_enabled=zone.get('securitygroupsenabled',
                                                     ''),
                    vlan=zone.get('vlan', ''),
                    zone_token=zone.get('zonetoken', ''),
                )))

        yield RelationshipMap(relname='zones',
                              modname='ZenPacks.zenoss.CloudStack.Zone',
                              objmaps=zone_maps)
    def getRedisDbRelMap(self, device, dbData, log):
        rel_maps = []

        log.info('In getRedisDbRelMap - dbData is %s  \n' % (dbData))
        for k, v in dbData.iteritems():
            compname = 'redisPorts/%s' % (k)
            object_maps = []
            for k1, v1 in v.iteritems():
                db_number = int(k1[-1])
                db_avg_ttl = v1.get('avg_ttl', None)
                db_keys = v1.get('keys', None)
                object_maps.append(
                    ObjectMap(
                        data={
                            'id': k + '_' + self.prepId(k1),
                            'title': k + '_' + self.prepId(k1),
                            'db_number': db_number,
                            'db_avg_ttl': db_avg_ttl,
                            'db_keys': db_keys,
                        }))
            rel_maps.append(
                RelationshipMap(
                    compname=compname,
                    relname='redisDbs',
                    modname='ZenPacks.community.zplib.Redis.RedisDb',
                    objmaps=object_maps))

        return rel_maps
Example #13
0
def volumes_rm(region_id, volumes):
    '''
    Return volumes RelationshipMap given region_id and a VolumeInfo
    ResultSet.
    '''
    volume_data = []
    for volume in volumes:
        if volume.attach_data.instance_id:
            instance_id = prepId(volume.attach_data.instance_id)
        else:
            instance_id = None

        volume_data.append({
            'id': prepId(volume.id),
            'title': name_or(volume.tags, volume.id),
            'volume_type': volume.type,
            'create_time': volume.create_time,
            'size': volume.size / (1024**3),
            'iops': volume.iops,
            'status': volume.status,
            'attach_data_status': volume.attach_data.status,
            'attach_data_devicepath': volume.attach_data.device,
            'setInstanceId': instance_id,
            'setZoneId': volume.zone,
        })

    return RelationshipMap(compname='regions/%s' % region_id,
                           relname='volumes',
                           modname=MODULE_NAME['EC2Volume'],
                           objmaps=volume_data)
Example #14
0
def instances_rm(region_id, reservations):
    '''
    Return instances RelationshipMap given region_id and an InstanceInfo
    ResultSet.
    '''
    instance_data = []
    for instance in chain.from_iterable(r.instances for r in reservations):
        zone_id = prepId(instance.placement) if instance.placement else None
        subnet_id = prepId(instance.subnet_id) if instance.subnet_id else None

        instance_data.append({
            'id': prepId(instance.id),
            'title': name_or(instance.tags, instance.id),
            'instance_id': instance.id,
            'public_dns_name': instance.public_dns_name,
            'private_ip_address': instance.private_ip_address,
            'image_id': instance.image_id,
            'instance_type': instance.instance_type,
            'launch_time': instance.launch_time,
            'state': instance.state,
            'platform': getattr(instance, 'platform', ''),
            'detailed_monitoring': instance.monitored,
            'setZoneId': zone_id,
            'setVPCSubnetId': subnet_id,
        })

    return RelationshipMap(compname='regions/%s' % region_id,
                           relname='instances',
                           modname=MODULE_NAME['EC2Instance'],
                           objmaps=instance_data)
Example #15
0
def vpc_subnets_rm(region_id, subnets):
    '''
    Return vpc_subnets RelationshipMap given region_id and a SubnetInfo
    ResultSet.
    '''
    vpc_subnet_data = []
    for subnet in subnets:
        vpc_subnet_data.append({
            'id':
            prepId(subnet.id),
            'title':
            name_or(subnet.tags, subnet.id),
            'available_ip_address_count':
            subnet.available_ip_address_count,
            'cidr_block':
            subnet.cidr_block,
            'defaultForAz':
            to_boolean(subnet.defaultForAz),
            'mapPublicIpOnLaunch':
            to_boolean(subnet.mapPublicIpOnLaunch),
            'state':
            subnet.state,
            'setVPCId':
            subnet.vpc_id,
            'setZoneId':
            subnet.availability_zone,
        })

    return RelationshipMap(compname='regions/%s' % region_id,
                           relname='vpc_subnets',
                           modname=MODULE_NAME['EC2VPCSubnet'],
                           objmaps=vpc_subnet_data)
    def _node_manager_oms(self, data, device):
        """
        Build Node Manager object maps.
        """
        node_mgr_oms = []

        # Find live node managers names in the jmx output.
        live_node_mgrs = []
        for bean in data['beans']:
            if bean['name'] == 'Hadoop:service=ResourceManager,name=RMNMInfo':
                live_node_mgrs = bean.setdefault('LiveNodeManagers', [])
                break
        if live_node_mgrs:
            live_node_mgrs = json.loads(live_node_mgrs)

        # Build Node Manager oms given the data found in jmx.
        comp = self._dict_components['HadoopNodeManager']
        for node_mgr in live_node_mgrs:
            node_mgr_address = prep_ip(device, node_mgr['NodeHTTPAddress'])
            node_mgr_oms.append(
                ObjectMap({
                    'id':
                    prepId(device.id + NAME_SPLITTER + node_mgr_address),
                    'title':
                    node_mgr_address,
                    'node_type':
                    comp[0]
                }))
        return RelationshipMap(relname=comp[1],
                               modname=MODULE_NAME['HadoopNodeManager'],
                               objmaps=node_mgr_oms)
Example #17
0
    def get_inputs(self, results, log):

        input_maps = []
        getdata, tabledata = results
        for snmpindex, row in tabledata.get('upsPhaseInputTable', {}).items():
            inputData = {}
            snmpindex = snmpindex.strip('.')
            log.debug('snmpindex:{}'.format(snmpindex))
            log.debug('row:{}'.format(row))

            inputIndex = row.get('upsPhaseInputTableIndex')
            name = row.get('upsPhaseInputName')
            inputData['id'] = self.prepId('Input_{}'.format(inputIndex))
            inputData['title'] = self.prepId(name)
            inputData['snmpindex'] = snmpindex
            inputData['index'] = row.get('upsPhaseInputTableIndex')
            inputData['numPhases'] = row.get('upsPhaseNumInputPhases')
            inputData['orientation'] = self.voltageOrMap[int(
                row.get('upsPhaseInputVoltageOrientation'))]
            inputData['inputType'] = self.typeMap[int(
                row.get('upsPhaseInputType'))]

            input_maps.append(inputData)
        inputRelMap = RelationshipMap(
            relname='powerNetInputs',
            modname='ZenPacks.community.PowerNet.PowerNetInput',
            compname=self.compname,
            objmaps=input_maps,
        )
        log.debug('get_inputs: {}'.format(inputRelMap))
        return inputRelMap
    def getQueueRelMap(self, queues_string, compname):
        object_maps = []
        for queue_string in queues_string.split('\n'):
            if not queue_string.strip():
                continue

            name, durable, auto_delete, arguments = \
                re.split(r'\s+', queue_string)

            if re.search(r'true', durable, re.I):
                durable = True
            else:
                durable = False

            if re.search(r'true', auto_delete, re.I):
                auto_delete = True
            else:
                auto_delete = False

            object_maps.append(
                ObjectMap(
                    data={
                        'id': prepId(name),
                        'title': name,
                        'durable': durable,
                        'auto_delete': auto_delete,
                        'arguments': arguments,
                    }))

        return RelationshipMap(
            compname=compname,
            relname='rabbitmq_queues',
            modname='ZenPacks.zenoss.RabbitMQ.RabbitMQQueue',
            objmaps=object_maps)
    def process(self, device, results, log):
        """
        Must return one of :
            - None, changes nothing. Good in error cases.
            - A RelationshipMap, for the device to component information
            - An ObjectMap, for the device device information
            - A list of RelationshipMaps and ObjectMaps, both
        """
        log.debug('Process results: {}'.format(results))

        bamboo_data = results.get('bamboo', '')
        rm = []
        if bamboo_data:
            bamboo_maps = []
            om_bamboo = ObjectMap()
            bamboo_name = 'Bamboo {}'.format(bamboo_data['version'])
            om_bamboo.id = self.prepId(bamboo_name)
            om_bamboo.title = bamboo_name
            bamboo_maps.append(om_bamboo)

            rm.append(RelationshipMap(relname='bambooServers',
                                      modname='ZenPacks.community.Bamboo.BambooServer',
                                      compname='',
                                      objmaps=bamboo_maps))

        log.debug('{}: process maps:{}'.format(device.id, rm))
        return rm
Example #20
0
    def get_pods_rel_maps(self, pods_response):
        pod_maps = {}
        for pod in pods_response.get('pod', []):
            zone_id = self.prepId('zone%s' % pod['zoneid'])
            pod_id = self.prepId('pod%s' % pod['id'])

            compname = 'zones/%s' % zone_id
            pod_maps.setdefault(compname, [])

            pod_maps[compname].append(
                ObjectMap(data=dict(
                    id=pod_id,
                    title=pod.get('name', pod_id),
                    cloudstack_id=pod['id'],
                    allocation_state=pod.get('allocationstate', ''),
                    start_ip=pod.get('startip', ''),
                    end_ip=pod.get('endip', ''),
                    netmask=pod.get('netmask', ''),
                    gateway=pod.get('gateway', ''),
                )))

        for compname, obj_maps in pod_maps.items():
            yield RelationshipMap(compname=compname,
                                  relname='pods',
                                  modname='ZenPacks.zenoss.CloudStack.Pod',
                                  objmaps=obj_maps)
Example #21
0
 def add_maps(self, res, ds):
     """
     Check for added/removed tables and return a RelationshipMap if
     any changes took place. Otherwise return empty list.
     """
     try:
         res = json.loads(res)
     except ValueError:
         log.error(
             'Error parsing collected data for {} monitoring template'.
             format(ds.template))
         res = []
     if not res:
         return []
     tables_update = set(table['name'] for table in res.get('table'))
     self.added = list(tables_update.difference(set(ds.table_ids)))
     self.removed = list(set(ds.table_ids).difference(tables_update))
     if self.added or self.removed:
         tables_oms = []
         for table in tables_update:
             tables_oms.append(
                 ObjectMap({
                     'id': prepId(table),
                     'title': table
                 }))
         return [
             RelationshipMap(relname='hbase_tables',
                             modname=MODULE_NAME['HBaseTable'],
                             objmaps=tables_oms)
         ]
     return []
Example #22
0
 def olsonInterface(self, manageIp, macaddr):
     om = ObjectMap({},
                    compname="os",
                    modname="Products.ZenModel.IpInterface")
     om.id = self.prepId("eth0")
     om.title = om.id
     om.interfaceName = om.id
     om.description = "Manually Kludged"
     om.type = "manual"
     om.speed = 10000000
     om.mtu = 1500
     om.ifindex = "1"
     om.adminStatus = 1
     om.operStatus = 1
     om.monitor = False
     om.setIpAddresses = [
         manageIp,
     ]
     om.macaddress = macaddr
     #        om.lockFromDeletion()
     #        om.lockFromUpdates()
     return RelationshipMap(relname="interfaces",
                            compname="os",
                            modname="Products.ZenModel.IpInterface",
                            objmaps=[
                                om,
                            ])
Example #23
0
    def get_clusters_rel_maps(self, clusters_response):
        cluster_maps = {}
        for cluster in clusters_response.get('cluster', []):
            zone_id = self.prepId('zone%s' % cluster['zoneid'])
            pod_id = self.prepId('pod%s' % cluster['podid'])
            cluster_id = self.prepId('cluster%s' % cluster['id'])

            compname = 'zones/%s/pods/%s' % (zone_id, pod_id)
            cluster_maps.setdefault(compname, [])

            cluster_maps[compname].append(
                ObjectMap(data=dict(
                    id=cluster_id,
                    title=cluster.get('name', cluster_id),
                    cloudstack_id=cluster['id'],
                    allocation_state=cluster.get('allocationstate', ''),
                    cluster_type=cluster.get('clustertype', ''),
                    hypervisor_type=cluster.get('hypervisortype', ''),
                    managed_state=cluster.get('managedstate', ''),
                )))

        for compname, obj_maps in cluster_maps.items():
            yield RelationshipMap(compname=compname,
                                  relname='clusters',
                                  modname='ZenPacks.zenoss.CloudStack.Cluster',
                                  objmaps=obj_maps)
    def process(self, device, results, log):
        log.info(
            'Modeler %s processing data for device %s',
            self.name(), device.id
        )

        maps = collections.OrderedDict([
            ('hbase_tables', [])
        ])
        try:
            data = json.loads(results)
        except ValueError:
            log.error('HBaseTableCollector: Error parsing collected data')
            return
        # List of tables
        tables_oms = []
        if data:  # Check if there are any tables.
            for table in data["table"]:
                tables_oms.append(self._table_om(table))

        maps['hbase_tables'].append(RelationshipMap(
            relname='hbase_tables',
            modname=MODULE_NAME['HBaseTable'],
            objmaps=tables_oms))
        log.info(
            'Modeler %s finished processing data for device %s',
            self.name(), device.id
        )

        return list(chain.from_iterable(maps.itervalues()))
Example #25
0
    def model_connectors(self, results, log):
        log.debug('model_connectors data: {}'.format(results))
        rings = {}
        for entry in results:
            ring = entry['ring']
            if ring not in rings:
                rings[ring] = []
            rings[ring].append(entry)

        rm = []
        for ring, connectors in rings.items():
            compname = 'scalitySupervisors/Supervisor/scalityRings/{}'.format(
                ring)
            connector_maps = []

            for connector in connectors:
                volume_id = connector['id']
                om_connector = ObjectMap()
                om_connector.id = self.prepId(volume_id)
                om_connector.title = connector['name']
                om_connector.connector_id = volume_id
                om_connector.protocol = connector['protocol']
                om_connector.detached = connector['detached']
                om_connector.address = connector['address']
                om_connector.ring = connector['ring']
                connector_maps.append(om_connector)

            rm.append(
                RelationshipMap(
                    compname=compname,
                    relname='scalityConnectors',
                    modname='ZenPacks.community.Scality.ScalityConnector',
                    objmaps=connector_maps))

        return rm
Example #26
0
    def get_oss_rel_maps(self, es_response):
        oss = getattr(self.device, 'zESOssNodes', None)
        ossList = oss.split(",")
        rm = []
        fs_dict = es_response.get('host', {})
        nw_data = self.filter_server(fs_dict, 'oss')
        i = 0
        for key, val in nw_data.items():
            log.debug("XXX KEY: %s and VALUE: %r", key, val)
            if val.get('id') is None:
                val['id'] = str(key)
            if val.get('title') is None:
                val['title'] = str(key)
            try:
                val['management_address'] = ossList[i]
            except IndexError:
                val['management_address'] = 'null'
            # Create Object Map
            om = self.objectMap()
            om.updateFromDict(val)
            # Update Object Map to RelationShip Map
            rm.append(om)
            i += 1

        return RelationshipMap(
            relname=EXMODEL['objectStorageServers']['relname'],
            modname=EXMODEL['objectStorageServers']['modname'],
            objmaps=rm)
Example #27
0
    def process(self, device, results, log):
        log.info("Modeler %s processing data for device %s", self.name(),
                 device.id)

        rm = self.relMap()

        for service in results.get('Win32_Service', ()):
            om = self.objectMap()
            om.id = self.prepId(service.Name)
            om.serviceName = service.Name
            om.caption = service.Caption
            om.setServiceClass = {
                'name': service.Name,
                'description': service.Caption
            }
            om.pathName = service.PathName
            om.serviceType = service.ServiceType
            om.startMode = service.StartMode
            om.startName = service.StartName
            om.description = service.Description
            rm.append(om)

        maps = []
        maps.append(
            RelationshipMap(relname="winrmservices", compname='os',
                            objmaps=[]))
        maps.append(rm)
        return maps
Example #28
0
def _validate_datamap(device,
                      datamap,
                      relname=None,
                      compname=None,
                      modname=None,
                      parentId=None):
    if isinstance(datamap, RelationshipMap):
        log.debug('_validate_datamap: got valid RelationshipMap')
    elif relname:
        log.debug('_validate_datamap: build relationship_map using relname')
        datamap = RelationshipMap(relname=relname,
                                  compname=compname,
                                  modname=modname,
                                  objmaps=datamap,
                                  parentId=parentId)
    elif isinstance(datamap, IncrementalDataMap):
        log.debug('_validate_datamap: got valid IncrementalDataMap')
    elif isinstance(datamap, ObjectMap):
        log.debug('_validate_datamap: got valid ObjectMap')
        datamap = IncrementalDataMap(device, datamap)
    else:
        log.debug('_validate_datamap: build object_map')
        datamap = ObjectMap(datamap, compname=compname, modname=modname)
        datamap = IncrementalDataMap(device, datamap)

    return datamap
    def process(self, device, results, log):
        log.debug(' Start of process - results is %s \n' % (results))
        maps = []
        datastores = []

        for datastore in results:
            datastoreDict = {}
            datastoreDict['id'] = self.prepId(datastore.summary.name)
            datastoreDict['title'] = datastore.summary.name
            datastoreDict['type'] = datastore.summary.type
            datastoreDict['capacity'] = long(datastore.summary.capacity)
            if not int(datastore.summary.accessible) == 1:
                log.warning('Datastore %s of device %s is not accessible' %
                            (datastoreDict['id'], device.id))
                continue

            datastores.append(ObjectMap(data=datastoreDict))
            log.debug(' datastoreDict is %s \n' % (datastoreDict))
            log.debug('VM Datastore is %s \n' % (datastoreDict['id']))

            maps.append(
                RelationshipMap(
                    relname='esxiDatastore',
                    modname=
                    'ZenPacks.community.VMwareESXiMonitorPython.ESXiDatastore',
                    objmaps=datastores))

        return maps
Example #30
0
    def model_nodes(self, results, log):
        log.debug('model_nodes data: {}'.format(results))
        rings = {}
        for entry in results:
            ring_name = entry['ring']
            if ring_name not in rings:
                rings[ring_name] = []
            rings[ring_name].append(entry)

        rm = []
        for ring, nodes in rings.items():
            compname = 'scalitySupervisors/Supervisor/scalityRings/{}'.format(
                ring)
            node_maps = []
            for node in nodes:
                om_node = ObjectMap()
                node_name = node['name']
                om_node.id = self.prepId('{}_{}'.format(ring, node_name))
                om_node.title = node_name
                om_node.ring = ring
                # TODO: not safe
                om_node.admin_endpoint = '{}:{}'.format(
                    node['admin_address'], node['admin_port'])
                om_node.chord_endpoint = '{}:{}'.format(
                    node['chord_address'], node['chord_port'])
                om_node.server_endpoint = node['server']
                node_maps.append(om_node)
            rm.append(
                RelationshipMap(
                    compname=compname,
                    relname='scalityNodes',
                    modname='ZenPacks.community.Scality.ScalityNode',
                    objmaps=node_maps))
        return rm
Example #31
0
    def model_servers(self, servers, log):
        log.debug('model_servers data: {}'.format(servers))
        server_maps = []
        for server in servers:
            server_name = server['name']
            server_ip = server['management_ip_address']
            om_server = ObjectMap()
            # TODO: Use something else than IP address to ID the server
            om_server.id = self.prepId(server_ip)
            om_server.title = server_name
            om_server.server_type = server['server_type']
            om_server.ip_address = server_ip
            om_server.zone = server['zone']
            # TODO: check usage of id in datasource
            om_server.server_id = server['id']
            # TODO: BUG since 8.x : TypeError: string indices must be integers
            rings = server['rings']
            if rings and isinstance(rings[0], dict):
                # Supervisor 7.4.6.1
                om_server.rings = ', '.join(
                    sorted([r['name'] for r in server['rings']]))
            else:
                # Supervisor 8.3.0.5
                om_server.rings = ', '.join(sorted(server['rings']))
            om_server.roles = ', '.join(sorted(server['roles']))
            om_server.disks = ', '.join(server['disks'])
            server_maps.append(om_server)

        return RelationshipMap(
            compname='scalitySupervisors/Supervisor',
            relname='scalityServers',
            modname='ZenPacks.community.Scality.ScalityServer',
            objmaps=server_maps)
    def test_updateRelationship(self):
        """Test relationship creation."""
        rm = RelationshipMap(
            compname="os",
            relname="interfaces",
            modname="Products.ZenModel.IpInterface",
            objmaps=[
                ObjectMap({"id": "eth0"}),
                ObjectMap({"id": "eth1"}),
                ])

        changed = self.service.remote_applyDataMaps(self.device.id, [rm])
        self.assertTrue(
            changed,
            "device.os.interfaces not changed by first RelationshipMap")

        self.assertEqual(
            2, self.device.os.interfaces.countObjects(),
            "wrong number of interfaces created by first RelationshipMap")

        changed = self.service.remote_applyDataMaps(self.device.id, [rm])
        self.assertFalse(
            changed,
            "device.os.interfaces changed by second RelationshipMap")

        rm.maps = rm.maps[:1]
        changed = self.service.remote_applyDataMaps(self.device.id, [rm])
        self.assertTrue(
            changed,
            "device.os.interfaces not changed by trimmed RelationshipMap")

        self.assertEquals(
            1, self.device.os.interfaces.countObjects(),
            "wrong number of interfaces after trimmed RelationshipMap")

        rm.maps = []
        changed = self.service.remote_applyDataMaps(self.device.id, [rm])
        self.assertTrue(
            changed,
            "device.os.interfaces not changed by empty RelationshipMap")

        self.assertEquals(
            0, self.device.os.interfaces.countObjects(),
            "wrong number of interfaces after empty RelationshipMap")
    def processTblTrays(self, tblTrays, log):
        mapTrays = RelationshipMap(modname='ZenPacks.TwoNMS.PrinterMIB.PrinterTray', relname='printermibtray')

        # iterate each tray and translate the mibs
        for trayId, trayData in tblTrays.iteritems():
            # create an input Tray object
            trayObj = self.objectMap(trayData)
            trayObj.id = self.prepId(trayId)

            # translate prtInputTypeTC
            try:
                if (self.PrtInputTypeTC[str(trayObj.prtInputTypeTC)] != None):
                    trayObj.prtInputType = self.PrtInputTypeTC[str(trayObj.prtInputTypeTC)]
            except AttributeError:
                log.warn("Tray does not support the prtInputTypeTC oid")
                trayObj.prtInputType = self.PrtInputTypeTC['na']
                #continue

            # translate PrtCapacityUnitTC
            try:
                if (self.PrtCapacityUnitTC[str(trayObj.prtCapacityUnitTC)] != None):
                    trayObj.prtCapacityUnit = self.PrtCapacityUnitTC[str(trayObj.prtCapacityUnitTC)]
            except AttributeError:
                log.warn("Tray does not support the PrtCapacityUnitTC oid")
                trayObj.prtCapacityUnit = self.PrtCapacityUnitTC['na']
                #continue

            # add a percentage value of the usage
            try:
                trayObj.usagepct = self.calculateUsagePct(trayObj.prtInputCurrentLevel, trayObj.prtInputMaxCapacity, log)
            except:
                mapTemp.usagepct = 'na'

            # assign object to the relationsipMap
            trayObj.modname = "ZenPacks.TwoNMS.PrinterMIB.PrinterTray"
            trayObj.supplyId = trayObj.id
            trayObj.snmpindex = trayObj.id
            log.debug("New input tray found: %s", trayObj)
            mapTrays.append(trayObj)

        return mapTrays
    def process(self, device, results, log):
        maps = []

        top_rm = RelationshipMap(relname='testTopComponents')

        maps.append(top_rm)

        for i in range(device.zTestCalcPerfTopComponentsPerDevice):
            top_rm.append(
                ObjectMap(data={
                    'id': 'top{}'.format(i),
                    },
                    modname='ZenPacks.test.CalcPerfScale.TestTopComponent'))

            bottom_rm = RelationshipMap(
                compname='testTopComponents/top{}'.format(i),
                relname='testBottomComponents')

            for j in range(device.zTestCalcPerfBottomComponentsPerTopComponent):
                bottom_rm.append(
                    ObjectMap(data={
                        'id': 'top{}-bottom{}'.format(i, j),
                        },
                        modname='ZenPacks.test.CalcPerfScale.TestBottomComponent'))

            maps.append(bottom_rm)

        return maps
Example #35
0
    def process(self, device, results, log):
        log.info(
            "Modeler %s processing data for device %s",
            self.name(),
            device.id
            )
        maps = list()

        pools = dict()

        get_regex = r'^(?P<ds>\S+)\t(?P<key>\S+)\t(?P<value>\S+)\t\S+$'

        for line in results.splitlines():
            get_match = re.match(get_regex, line)

            if get_match:
                ds = get_match.group('ds')
                pool = ds.split('/')[0]
                key = get_match.group('key')
                value = get_match.group('value')
                if pool not in pools:
                    pools[pool] = dict()
                if ds not in pools[pool]:
                    pools[pool][ds] = dict()
                if value.endswith('%') or re.match(r'^\d+\.\d{2}x$', value):
                    value = value[:-1]
                elif value == '-':
                    value = None
                elif key == 'type':
                    pools[pool][ds]['zDsType'] = value
                    continue
                pools[pool][ds][key] = value

        booleans = [
            'atime',
            'defer_destroy',
            'mounted',
            'nbmand',
            'overlay',
            'relatime',
            'setuid',
            'utf8only',
            'vscan',
            'zoned',
            ]

        floats = [
            'compressratio',
            'refcompressratio',
            ]

        ints = [
            'available',
            'copies',
            'filesystem_count',
            'filesystem_limit',
            'logicalreferenced',
            'logicalused',
            'quota',
            'recordsize',
            'referenced',
            'refquota',
            'refreservation',
            'reservation',
            'snapshot_count',
            'snapshot_limit',
            'used',
            'usedbychildren',
            'usedbydataset',
            'usedbyrefreservation',
            'usedbysnapshots',
            'userrefs',
            'volblocksize',
            'volsize',
            'written',
            ]

        times = [
            'creation',
            ]

        prefixes = {
            'filesystem': 'fs',
            'volume': 'vol',
            'snapshot': 'snap'
            }

        suffixes = {
            'filesystem': '',
            'volume': 'Vol',
            'snapshot': 'Snap'
            }

        time_format = '%Y-%m-%d %H:%M:%S'

        rm = RelationshipMap(
            relname='zfsdatasets',
            modname='ZenPacks.daviswr.ZFS.ZFSDataset'
            )

        ignore_names_regex = getattr(device, 'zZFSDatasetIgnoreNames', '')
        if ignore_names_regex:
            log.info('zZFSDatasetIgnoreNames set to %s', ignore_names_regex)
        ignore_types = getattr(device, 'zZFSDatasetIgnoreTypes', list())
        if ignore_types:
            log.info('zZFSDatasetIgnoreTypes set to %s', str(ignore_types))
        ignore_pools_regex = getattr(device, 'zZPoolIgnoreNames', '')
        if ignore_pools_regex:
            log.info('zZPoolIgnoreNames set to %s', ignore_pools_regex)

        # Dataset components
        for pool in pools:
            if ignore_pools_regex and re.match(ignore_pools_regex, pool):
                log.debug('Skipping pool %s due to zZPoolIgnoreNames', pool)
                continue

            rm = RelationshipMap(
                compname='zpools/pool_{0}'.format(pool),
                relname='zfsDatasets',
                modname='ZenPacks.daviswr.ZFS.ZFSDataset'
                )

            datasets = pools[pool]
            for ds in datasets:
                if ignore_names_regex and re.match(ignore_names_regex, ds):
                    log.debug(
                        'Skipping dataset %s due to zZFSDatasetIgnoreNames',
                        ds
                        )
                    continue
                elif ignore_types \
                        and datasets[ds].get('zDsType', '') in ignore_types:
                    log.debug(
                        'Skipping dataset %s due to zZFSDatasetIgnoreTypes',
                        ds
                        )
                    continue

                comp = dict()
                for key in datasets[ds]:
                    if key in booleans:
                        comp[key] = True \
                            if ('on' == datasets[ds][key]
                                or 'yes' == datasets[ds][key]) \
                            else False
                    elif key in floats:
                        comp[key] = float(datasets[ds][key])
                    elif key in ints:
                        comp[key] = int(datasets[ds][key])
                    elif key in times:
                        comp[key] = time.strftime(
                            time_format,
                            time.localtime(int(datasets[ds][key]))
                            )
                    else:
                        comp[key] = datasets[ds][key]
                prefix = prefixes.get(comp.get('zDsType'), '')
                suffix = suffixes.get(comp.get('zDsType'), 'Dataset')
                # Pool name should already be part of the dataset name,
                # making it unique
                comp['id'] = self.prepId('{0}_{1}'.format(prefix, ds))
                comp['title'] = ds
                log.debug('Found ZFS %s: %s', comp.get('type', ''), comp['id'])
                mod = 'ZenPacks.daviswr.ZFS.ZFS{0}'.format(suffix)
                rm.append(ObjectMap(
                    modname=mod,
                    data=comp
                    ))
            maps.append(rm)

        log.debug(
            'ZFS RelMap:\n%s',
            str(maps)
            )

        return maps
Example #36
0
    def test_subclass_catalogs(self):
        rm = RelationshipMap(
            modname="ZenPacks.test.ClassProxies.MyComponent",
            relname="myComponents")

        rm.extend([
            ObjectMap(
                modname="ZenPacks.test.ClassProxies.MyComponent",
                data={
                    "id": "myComponent-1",
                    "idx_device": "myComponent-1",
                    "idx_global": "myComponent-1"}),

            ObjectMap(
                modname="ZenPacks.test.ClassProxies.MyComponent",
                data={
                    "id": "myComponent-2",
                    "idx_device": "myComponent-2",
                    "idx_global": "myComponent-2"}),

            ObjectMap(
                modname="ZenPacks.test.ClassProxies.MyComponentSub1",
                data={
                    "id": "myComponent1-1",
                    "idx_device": "myComponent1-1",
                    "idx_global": "myComponent1-1",
                    "idx_device_sub1": "myComponent1-1",
                    "idx_global_sub1": "myComponent1-1"}),

            ObjectMap(
                modname="ZenPacks.test.ClassProxies.MyComponentSub1",
                data={
                    "id": "myComponent1-2",
                    "idx_device": "myComponent1-2",
                    "idx_global": "myComponent1-2",
                    "idx_device_sub1": "myComponent1-2",
                    "idx_global_sub1": "myComponent1-2"}),

            ObjectMap(
                modname="ZenPacks.test.ClassProxies.MyComponentSub2",
                data={
                    "id": "myComponent2-1",
                    "idx_device": "myComponent2-1",
                    "idx_global": "myComponent2-1",
                    "idx_device_sub2": "myComponent2-1",
                    "idx_global_sub2": "myComponent2-1"}),

            ObjectMap(
                modname="ZenPacks.test.ClassProxies.MyComponentSub2",
                data={
                    "id": "myComponent2-2",
                    "idx_device": "myComponent2-2",
                    "idx_global": "myComponent2-2",
                    "idx_device_sub2": "myComponent2-2",
                    "idx_global_sub2": "myComponent2-2"})])

        # Describe what all of the catalogs should look like.
        component_ids = [
            "myComponent-1", "myComponent1-1", "myComponent2-1",
            "myComponent-2", "myComponent1-2", "myComponent2-2",
            ]

        device_expected = {
            "MyComponent": {"idx_device": component_ids},
            "MyComponentSub1": {"idx_device_sub1": ["myComponent1-1", "myComponent1-2"]},
            "MyComponentSub2": {"idx_device_sub2": ["myComponent2-1", "myComponent2-2"]}}

        from ZenPacks.test.ClassProxies.MyComponent import MyComponent
        from ZenPacks.test.ClassProxies.MyComponentSub1 import MyComponentSub1
        from ZenPacks.test.ClassProxies.MyComponentSub2 import MyComponentSub2

        global_expected = {
            MyComponent: {"idx_global": component_ids * 2},
            MyComponentSub1: {"idx_global_sub1": ["myComponent1-1", "myComponent1-2"] * 2},
            MyComponentSub2: {"idx_global_sub2": ["myComponent2-1", "myComponent2-2"] * 2}}

        def verify_all_catalogs():
            for device in devices:
                for catalog_name, indexes in device_expected.items():
                    for index_name, expected_values in indexes.items():
                        self.assertItemsEqual(
                            expected_values,
                            [getattr(x, index_name) for x in device.search(catalog_name)])

            for class_, indexes in global_expected.items():
                for index_name, expected_values in indexes.items():
                    self.assertItemsEqual(
                        expected_values,
                        [getattr(x, index_name) for x in class_.class_search(self.dmd)])

        # Create devices and components
        devices = [
            create_device(
                self.dmd,
                zPythonClass="ZenPacks.test.ClassProxies.MyDevice",
                device_id="test-device{}".format(x),
                datamaps=[rm])
            for x in (1, 2)]

        # Verify that all catalogs are correct after initial modeling.
        verify_all_catalogs()

        # Delete catalogs.
        self.dmd.Devices._delObject("ZenPacks_test_ClassProxies_MyComponentSearch")
        self.dmd.Devices._delObject("ZenPacks_test_ClassProxies_MyComponentSub1Search")
        self.dmd.Devices._delObject("ZenPacks_test_ClassProxies_MyComponentSub2Search")

        for device in devices:
            device._delObject("ComponentBaseSearch")
            device._delObject("MyComponentSearch")
            device._delObject("MyComponentSub1Search")
            device._delObject("MyComponentSub2Search")

        # Index single component of one of the subclasses.
        devices[0].myComponents._getOb("myComponent1-1").index_object()

        # All components of superclasseses should now be indexed in
        # device and global catalogs.
        self.assertItemsEqual(
            component_ids,
            [x.id for x in devices[0].search("MyComponent")])

        self.assertItemsEqual(
            component_ids * 2,
            [x.id for x in MyComponent.class_search(self.dmd)])

        # All components of the same class should be indexed in device
        # and global catalogs.
        self.assertItemsEqual(
            ["myComponent1-1", "myComponent1-2"],
            [x.id for x in devices[0].search("MyComponentSub1")])

        self.assertItemsEqual(
            ["myComponent1-1", "myComponent1-2"] * 2,
            [x.id for x in MyComponentSub1.class_search(self.dmd)])

        # All components of classes not in the inheritence hierarchy
        # should not yet be indexed in device or global catalogs.
        self.assertItemsEqual(
            [],
            [x.id for x in devices[0].search("MyComponentSub2")])

        self.assertItemsEqual(
            [],
            [x.id for x in MyComponentSub2.class_search(self.dmd)])

        # Index remaining unique device/subclass combinations.
        devices[0].myComponents._getOb("myComponent2-1").index_object()
        devices[1].myComponents._getOb("myComponent1-1").index_object()
        devices[1].myComponents._getOb("myComponent2-1").index_object()

        # Now all catalogs should be complete.
        verify_all_catalogs()
                                    break

                            if not found:
                                log.error("Unable to find a matching objectmap to extend: %s" % om_dict)

                            continue

                        objmaps[key].append(
                            ObjectMap(compname=compname, modname=modname, classname=classname, data=om_dict)
                        )
                    added_count = len(objmaps[key]) - starting_count
                    if added_count > 0:
                        log.info("  Added %d new objectmaps to %s" % (added_count, key))

        # Apply the objmaps in the right order.
        componentsMap = RelationshipMap(relname="components")
        for i in (
            "tenants",
            "regions",
            "flavors",
            "images",
            "servers",
            "zones",
            "hosts",
            "hypervisors",
            "services",
            "networks",
            "subnets",
            "routers",
            "ports",
            "agents",
    def run(self):
        with open('model.yaml', 'r') as f:
            self.model_config = yaml.load(f)

        self.connect()

        objmaps = []
        for modname, obj_attrs in self.get_model_template("Global"):
            objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        for controller_num in range(1, self.options.controllers + 1):
            for modname, obj_attrs in self.get_model_template("Controller"):
                self.talesEvalAttrs(
                    obj_attrs,
                    num=controller_num,
                    device_name=self.options.device
                )
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        for compute_num in range(1, self.options.computes + 1):
            for modname, obj_attrs in self.get_model_template("Compute"):
                self.talesEvalAttrs(
                    obj_attrs,
                    num=compute_num,
                    device_name=self.options.device
                )
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        for tenant_num in range(3, self.options.tenants + 3):
            for modname, obj_attrs in self.get_model_template("Tenant"):
                self.talesEvalAttrs(
                    obj_attrs,
                    num=tenant_num,
                    device_name=self.options.device
                )
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        compute_nums = range(1, self.options.computes + 1)
        tenant_nums = range(3, self.options.tenants + 3)

        for instance_num in range(1, self.options.instances + 1):
            for modname, obj_attrs in self.get_model_template("Instance"):
                tenant_num = tenant_nums[instance_num % self.options.tenants]
                compute_num = compute_nums[instance_num % self.options.computes]

                self.talesEvalAttrs(
                    obj_attrs,
                    num=instance_num,
                    device_name=self.options.device,
                    tenant_num=tenant_num,
                    compute_num=compute_num
                )
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        device = self.dmd.Devices.OpenStack.Infrastructure.findDevice(self.options.device)
        if not device:
            print "Creating OpenStackInfrastructure device %s" % self.options.device
            device = self.dmd.Devices.OpenStack.Infrastructure.createInstance(self.options.device)
        device.setPerformanceMonitor('localhost')

        for controller_num in range(1, self.options.controllers + 1):
            device_name = "%s_controller%d" % (self.options.device, controller_num)
            d = self.dmd.Devices.Server.SSH.Linux.NovaHost.findDevice(device_name)
            if not d:
                print "Creating controller device %s" % device_name
                d = self.dmd.Devices.Server.SSH.Linux.NovaHost.createInstance(device_name)
                d.setZenProperty('zIpServiceMapMaxPort', 32767)

        for compute_num in range(1, self.options.computes + 1):
            device_name = "%s_compute%d" % (self.options.device, compute_num)
            d = self.dmd.Devices.Server.SSH.Linux.NovaHost.findDevice(device_name)
            if not d:
                print "Creating compute device %s" % device_name
                d = self.dmd.Devices.Server.SSH.Linux.NovaHost.createInstance(device_name)
                d.setZenProperty('zIpServiceMapMaxPort', 32767)

        relmap = RelationshipMap(relname='components')
        for objmap in objmaps:
            relmap.append(objmap)

        endpoint_om = ObjectMap(
            modname='ZenPacks.zenoss.OpenStackInfrastructure.Endpoint',
            data=dict(
                set_maintain_proxydevices=True
            )
        )

        print "Applying datamaps (1/2) (%d objects)" % len(objmaps)
        adm = ApplyDataMap()
        adm._applyDataMap(device, relmap)
        adm._applyDataMap(device, endpoint_om)

        print "Gathering network information"
        l3_agent_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureNeutronAgent") if x.type == 'L3 agent']
        dhcp_agent_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureNeutronAgent") if x.type == 'DHCP agent']
        all_network_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureNetwork")]
        all_router_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureRouter")]
        all_subnet_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureSubnet")]
        instance_network_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureNetwork") if x.ports() and len([y for y in x.ports() if y.instance()])]
        instance_subnet_ids = [y.id for y in set(chain.from_iterable([x.subnets() for x in device.getDeviceComponents(type="OpenStackInfrastructureNetwork") if x.ports() and len([y for y in x.ports() if y.instance()])]))]

        objmaps = []
        print "Adding L3 Agent Relationships"
        for agent_id in l3_agent_ids:
            objmaps.append(ObjectMap(
                modname="ZenPacks.zenoss.OpenStackInfrastructure.NeutronAgent",
                compname="components/%s" % agent_id,
                data=dict(
                    id=agent_id,
                    set_networks=all_network_ids,
                    set_routers=all_router_ids,
                    set_subnets=all_subnet_ids
                )))

        print "Adding DHCP agent Relationships"
        for agent_id in dhcp_agent_ids:
            objmaps.append(ObjectMap(
                modname="ZenPacks.zenoss.OpenStackInfrastructure.NeutronAgent",
                compname="components/%s" % agent_id,
                data=dict(
                    id=agent_id,
                    set_networks=instance_network_ids,
                    set_subnets=instance_subnet_ids
                )))

        print "Adding instance <-> hypervisor relationship"
        hypervisor_instances = defaultdict(list)
        for instance_num in range(1, self.options.instances + 1):
            instance_id = "server-%d" % instance_num
            compute_num = compute_nums[instance_num % self.options.computes]
            hypervisor_id = "hypervisor-compute%d.1" % compute_num
            hypervisor_instances[hypervisor_id].append(instance_id)

        for hypervisor_id, instance_ids in hypervisor_instances.iteritems():
            objmaps.append(ObjectMap(
                modname="ZenPacks.zenoss.OpenStackInfrastructure.Hypervisor",
                compname="components/%s" % hypervisor_id,
                data=dict(
                    id=hypervisor_id,
                    set_instances=instance_ids
                )))

        print "Applying datamaps (2/2) (%d objects)" % len(objmaps)
        adm = ApplyDataMap()
        for objmap in objmaps:
            adm._applyDataMap(device, objmap)

        print "Committing model changes."
        transaction.commit()
    def process(self, device, results, log):
        log.info('Modeler %s processing data for device %s',
                self.name(), device.id)

        getdata, tabledata = results
        sensor_count = sum([getdata[x] for x in getdata if 'Count' in x])

        maps = []

        # device-specific data
        manufacturer = 'Geist Manufacturing, Inc.'
        os_name = '%s %s' % (getdata['productTitle'], getdata['productVersion'])
        maps.append(ObjectMap(data={
            'sensor_count': sensor_count,
            'title': getdata['productFriendlyName'],
            'productUrl': getdata['productUrl'],
            'setHWProductKey': MultiArgs(getdata['productHardware'], manufacturer),
            'setOSProductKey': MultiArgs(os_name, manufacturer),
            }))

        # Components: climate sensors
        rm = RelationshipMap(
                relname='geistClimateSensors',
                modname='ZenPacks.crosse.Geist.Monitor.GeistClimateSensor',
                )
        for snmpindex, row in tabledata.get('climateTable', {}).items():
            serial = row.get('climateSerial')
            if not serial:
                log.warn('Skipping climate sensor with no serial')
                continue
            log.debug('Modeling climate sensor %s', serial)
            
            values = {k: row[k] for k in row}
            values['id'] = self.prepId(serial)
            values['title'] = values['climateName']
            values['snmpindex'] = snmpindex.strip('.')

            rm.append(ObjectMap(
                modname='ZenPacks.crosse.Geist.Monitor.GeistClimateSensor',
                data=values
                ))
        maps.append(rm)

        # Components: temperature sensors
        rm = RelationshipMap(
                relname='geistTemperatureSensors',
                modname='ZenPacks.crosse.Geist.Monitor.GeistTemperatureSensor',
                )
        for snmpindex, row in tabledata.get('tempSensorTable', {}).items():
            serial = row.get('tempSensorSerial')
            if not serial:
                log.warn('Skipping temperature sensor with no serial')
                continue
            log.debug('Modeling temperature sensor %s', serial)
            
            values = {k: row[k] for k in row}
            values['id'] = self.prepId(serial)
            values['title'] = values['tempSensorName']
            values['snmpindex'] = snmpindex.strip('.')

            rm.append(ObjectMap(
                modname='ZenPacks.crosse.Geist.Monitor.GeistTemperatureSensor',
                data=values
                ))
        maps.append(rm)

        # Components: airflow sensors
        rm = RelationshipMap(
                relname='geistAirflowSensors',
                modname='ZenPacks.crosse.Geist.Monitor.GeistAirflowSensor',
                )
        for snmpindex, row in tabledata.get('airFlowSensorTable', {}).items():
            serial = row.get('airFlowSensorSerial')
            if not serial:
                log.warn('Skipping airflow sensor with no serial')
                continue
            log.debug('Modeling airflow sensor %s', serial)
            
            values = {k: row[k] for k in row}
            values['id'] = self.prepId(serial)
            values['title'] = values['airFlowSensorName']
            values['snmpindex'] = snmpindex.strip('.')

            rm.append(ObjectMap(
                modname='ZenPacks.crosse.Geist.Monitor.GeistAirflowSensor',
                data=values
                ))
        maps.append(rm)

        return maps
    def process(self, device, results, log):
        log.info('processing %s for device %s', self.name(), device.id)
        maps = list()

        """ Example output through 10.12

        caching:ReservedVolumeSpace = 25000000000
        caching:LogClientIdentity = yes
        caching:CacheLimit = 70000000000
        caching:ServerRoot = "/Library/Server"
        caching:ServerGUID = "02FE97F2-41F3-4CEE-9899-27976DB91A1A"
        caching:DataPath = "/Library/Server/Caching/Data"
        caching:LocalSubnetsOnly = yes
        caching:Port = 0
        caching:CacheLimit = 70000000000
        caching:StartupStatus = "OK"
        caching:RegistrationStatus = 1
        caching:CacheFree = 52754638336
        caching:PersonalCacheUsed = 0
        caching:TotalBytesDropped = 0
        caching:CacheStatus = "OK"
        caching:TotalBytesStoredFromOrigin = 419351941
        caching:state = "RUNNING"
        caching:Port = 49232
        caching:Peers:_array_index:0:address = "aaa.bbb.ccc.ddd"
        caching:Peers:_array_index:0:port = 49094
        caching:Peers:_array_index:0:details:capabilities:ur = yes
        caching:Peers:_array_index:0:details:capabilities:sc = yes
        caching:Peers:_array_index:0:details:capabilities:pc = no
        caching:Peers:_array_index:0:details:capabilities:im = no
        caching:Peers:_array_index:0:details:capabilities:ns = yes
        caching:Peers:_array_index:0:details:capabilities:query-parameters = yes  # noqa
        caching:Peers:_array_index:0:details:cache-size = 900000000000
        caching:Peers:_array_index:0:details:ac-power = yes
        caching:Peers:_array_index:0:details:is-portable = no
        caching:Peers:_array_index:0:details:local-network:_array_index:0:speed = 1000  # noqa
        caching:Peers:_array_index:0:details:local-network:_array_index:0:wired = yes  # noqa
        caching:Peers:_array_index:0:healthy = yes
        caching:Peers:_array_index:0:version = "161"
        caching:Peers:_array_index:0:friendly = yes
        caching:Peers:_array_index:0:guid = "9B9CDED4-F70C-4910-B7D4-11D1530AD34D"  # noqa
        caching:TotalBytesStoredFromPeers = 0
        caching:RestrictedMedia = no
        caching:CacheDetails:_array_index:0:BytesUsed = 0
        caching:CacheDetails:_array_index:0:LocalizedType = "Mac Software"
        caching:CacheDetails:_array_index:0:MediaType = "Mac Software"
        caching:CacheDetails:_array_index:0:Language = "en"
        caching:CacheDetails:_array_index:1:BytesUsed = 419351941
        caching:CacheDetails:_array_index:1:LocalizedType = "iOS Software"
        caching:CacheDetails:_array_index:1:MediaType = "iOS Software"
        caching:CacheDetails:_array_index:1:Language = "en"
        ...
        caching:PersonalCacheLimit = 70000000000
        caching:CacheUsed = 419351941
        caching:TotalBytesStored = 419351941
        caching:TotalBytesImported = 0
        caching:PersonalCacheFree = 52754638336
        caching:Active = yes
        caching:TotalBytesReturned = 476014159
        """

        # Parse results
        service = dict()
        caches = dict()
        peers = dict()
        lines = results.splitlines()

        # Legacy output
        if not results.startswith('{'):
            output = dict(line.split(' = ') for line in lines)
            for key in output:
                if key.startswith('caching:CacheDetails:'):
                    short = key.replace(
                        'caching:CacheDetails:_array_index:',
                        ''
                        )
                    idx = int(short.split(':')[0])
                    k = short.split(':')[1]
                    v = output.get(key).replace('"', '')
                    if idx not in caches:
                        caches[idx] = dict()
                    caches[idx].update({k: v})
                elif key.startswith('caching:Peers:'):
                    short = key.replace('caching:Peers:_array_index:', '')
                    short = short.replace('details:', '')
                    if ('capabilities' not in key
                            and 'local-network' not in key):
                        idx = int(short.split(':')[0])
                        k = short.split(':')[1]
                        v = output.get(key).replace('"', '')
                        if idx not in peers:
                            peers[idx] = dict()
                        peers[idx].update({k: v})
                else:
                    k = key.split(':')[1]
                    service.update({k: output.get(key).replace('"', '')})

        # JSON output
        else:
            for line in lines:
                output = json.loads(line)
                service.update(output.get('result', dict()))

                # Mimic structure of legacy output
                keys = service.get('CacheDetails', dict()).keys()
                for idx in range(0, len(keys)):
                    value = service.get('CacheDetails', dict()).get(keys[idx])
                    caches[idx] = {
                        'MediaType': keys[idx],
                        'BytesUsed': value,
                        }
                    if len(keys) - 1 == idx:
                        break

                # Settings output has an element named "Parents" as well
                if output.get('name', 'status') != 'settings':
                    peer_count = 0
                    for peer in service.get('Peers', list()):
                        peers[peer_count] = peer
                        peer_count += 1
                    for peer in service.get('Parents', list()):
                        peer['is-parent'] = True
                        peers[peer_count] = peer
                        peer_count += 1
                    for idx in peers:
                        for attr in ('ac-power', 'cache-size', 'is-portable'):
                            if attr in peers[idx]['details']:
                                peers[idx][attr] = peers[idx]['details'][attr]

        # Caching Service
        booleans = [
            'Active',
            'AllowPersonalCaching',
            'LocalSubnetsOnly',
            'LogClientIdentity',
            'RestrictedMedia',
            ]

        for attr in booleans:
            if attr in service and type(service[attr]) is not bool:
                service[attr] = True if 'yes' == service[attr] else False

        integers = [
            'CacheFree',
            'CacheLimit',
            'CacheUsed',
            'Port',
            'ReservedVolumeSpace',
            ]

        for attr in integers:
            if attr in service and type(service[attr]) is not int:
                service[attr] = int(service[attr])

        # More realistic Cache Limit value if configured to "unlimited"
        if service.get('CacheLimit', 0) == 0:
            service['CacheLimit'] = service.get('CacheLimit', 0)
            service['CacheLimit'] += service.get('CacheUsed', 0)
            service['CacheLimit'] += service.get('CacheFree', 0)

        service['id'] = self.prepId('CachingService')
        service['title'] = service.get('DataPath', 'Content Caching')

        # Escape spaces in DataPath for zencommand later
        if 'DataPath' in service:
            service['DataPath'] = service['DataPath'].replace(' ', r'\ ')

        # Not listening, service likely not running
        if 'Port' in service and service.get('Port') == 0:
            del service['Port']
        log.debug('Caching Service\n%s', service)

        rm = RelationshipMap(
            relname='contentCachingService',
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCachingService'
            )
        rm.append(ObjectMap(
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCachingService',  # noqa
            data=service
            ))
        maps.append(rm)

        # Individual Cache components
        rm = RelationshipMap(
            compname='contentCachingService/CachingService',
            relname='contentCaches',
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCache'
            )

        for idx in caches:
            cache = caches.get(idx)
            if 'BytesUsed' in cache:
                cache['BytesUsed'] = int(cache['BytesUsed'])
            cache['title'] = self.prepId(cache.get('MediaType', ''))
            cache['id'] = self.prepId(cache['title'])
            log.debug('Individual Cache: %s', cache)
            rm.append(ObjectMap(
                modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCache',
                data=cache
                ))
        maps.append(rm)

        # Peer Server components
        rm = RelationshipMap(
            compname='contentCachingService/CachingService',
            relname='contentCachePeers',
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCachePeer'
            )

        peer_integers = [
            'cache-size',
            'port',
            ]
        peer_booleans = [
            'ac-power',
            'friendly',
            'healthy',
            'is-portable',
            ]

        for idx in peers:
            peer = peers.get(idx)
            for attr in peer_integers:
                if attr in peer and type(peer[attr]) is not int:
                    peer[attr] = int(peer[attr])
            for attr in peer_booleans:
                if attr in peer and type(peer[attr]) is not bool:
                    peer[attr] = True if 'yes' == peer[attr] else False
            peer['title'] = peer.get('address', peer.get('guid', ''))
            id_str = 'cachepeer_{0}'.format(
                peer.get('address', peer.get('guid', ''))
                )
            peer['id'] = self.prepId(id_str)
            log.debug('Peer Caching Server: %s', peer)
            rm.append(ObjectMap(
                modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCachePeer',
                data=peer
                ))
        maps.append(rm)

        return maps
    def process(self, device, results, log):
        log.info(
            "Modeler %s processing data for device %s",
            self.name(),
            device.id
            )
        maps = list()

        pools = dict()
        last_parent = None
        last_pool = None
        last_root = None
        last_tree = None
        last_type = None
        last_vdev = None
        zpool_status = False

        get_regex = r'^(?P<pool>\S+)\t(?P<key>\S+)\t(?P<value>\S+)\t\S+$'
        zdb_header_regex = r'(?P<key>\S+)\:$'
        zdb_kv_regex = r'\ {4}\s*(?P<key>\S+)\:\s?(?P<value>\S+)'
        status_pool_regex = r'^\s+pool: (?P<dev>\S+)$'
        status_logs_regex = r'^\s+logs$'
        status_cache_regex = r'^\s+cache$'
        status_spare_regex = r'^\s+spares$'
        status_dev_regex = r'(?P<dev>\S+)\s+\S+(?:\s+\d+){3}$'

        for line in results.splitlines():
            get_match = re.match(get_regex, line)
            zdb_pool_match = re.match(r'^' + zdb_header_regex, line)
            zdb_tree_match = re.match(r'^    ' + zdb_header_regex, line)
            zdb_root_match = re.match(r'^        ' + zdb_header_regex, line)
            zdb_vdev_match = re.match(r'^            ' + zdb_header_regex, line)  # noqa
            zdb_kv_match = re.match(zdb_kv_regex, line)
            status_pool_match = re.match(status_pool_regex, line) \
                or re.match(r'^\t' + status_dev_regex, line)
            status_logs_match = re.match(status_logs_regex, line)
            status_cache_match = re.match(status_cache_regex, line)
            status_spare_match = re.match(status_spare_regex, line)
            status_root_match = re.match(r'^\t  ' + status_dev_regex, line)
            status_child_match = re.match(r'^\t    ' + status_dev_regex, line)

            if get_match:
                pool = get_match.group('pool')
                key = get_match.group('key')
                value = get_match.group('value')
                if pool not in pools:
                    pools[pool] = dict()
                if value.endswith('%') or re.match(r'^\d+\.\d{2}x$', value):
                    value = value[:-1]
                elif value == '-':
                    value = None
                pools[pool][key] = value

            elif zdb_pool_match:
                if not zpool_status:
                    pool = zdb_pool_match.group('key')
                    if pool not in pools:
                        pools[pool] = dict()
                    last_pool = pools[pool]
                    last_pool['type'] = 'pool'
                    last_parent = last_pool

            elif zdb_tree_match:
                key = zdb_tree_match.group('key')
                if 'tree' in key:
                    last_pool[key] = dict()
                    last_tree = last_pool[key]
                    last_parent = last_tree

            elif zdb_root_match:
                key = zdb_root_match.group('key')
                last_tree[key] = dict()
                last_root = last_tree[key]
                last_parent = last_root

            elif zdb_vdev_match:
                key = zdb_vdev_match.group('key')
                last_root[key] = dict()
                last_vdev = last_root[key]
                last_parent = last_vdev

            elif zdb_kv_match:
                key = zdb_kv_match.group('key')
                value = zdb_kv_match.group('value').replace("'", "")
                # Attributes right under vdev_tree are pool-wide
                # and should already be in `zpool get` output
                if 'vdev_tree' in last_pool \
                        and last_pool['vdev_tree'] == last_parent:
                    continue
                # ZenModeler does not like these in the RelMap
                elif key in ['hostid', 'hostname']:
                    continue
                elif 'name' == key:
                    last_parent['title'] = value
                    continue
                elif 'pool_guid' == key:
                    last_parent['guid'] = value
                    continue
                # Spare devices will be modeled based on 'zpool status' output
                elif 'type' == key and 'spare' == value:
                    continue
                last_parent[key] = value
                # disk type
                if key == 'path':
                    last_parent['title'] = value.split('/')[-1]
                # mirror type
                elif key == 'id' and 'type' in last_parent:
                    last_parent['title'] = '{0}-{1}'.format(
                        last_parent['type'],
                        value
                        )
                # raidz type
                elif key == 'nparity' \
                        and 'id' in last_parent \
                        and 'type' in last_parent:
                    last_parent['type'] += value
                    last_parent['title'] = '{0}-{1}'.format(
                        last_parent['type'],
                        last_parent['id']
                        )

            # 'zpool status' is only to find cache devices
            # since they're strangely absent from zdb
            elif status_pool_match:
                zpool_status = True
                pool = status_pool_match.group('dev')
                if pool not in pools:
                    pools[pool] = dict()
                if 'vdev_tree' not in pools[pool]:
                    pools[pool]['vdev_tree'] = dict()
                last_pool = pools[pool]
                last_pool['type'] = 'pool'
                last_type = last_pool['type']
                last_tree = pools[pool]['vdev_tree']
                last_parent = last_tree

            elif status_logs_match:
                last_type = 'logs'

            elif status_cache_match:
                last_type = 'cache'

            elif status_spare_match:
                last_type = 'spare'

            # Emulate structure in zdb output for log devices
            # Each device is a root vdev,
            # rather than a child vdev in a logs/cache root
            elif status_root_match:
                if 'cache' == last_type or 'spare' == last_type:
                    dev = status_root_match.group('dev')
                    key = '{0}_{1}'.format(last_type, dev)
                    if key not in last_tree:
                        last_tree[key] = dict()
                    last_root = last_tree[key]
                    last_root['title'] = dev
                    for boolean in ['cache', 'log', 'spare']:
                        last_root['is_{0}'.format(boolean)] = '0'
                    last_root['is_{0}'.format(last_type)] = '1'

            elif status_child_match:
                last_type = 'child'

        booleans = [
            'autoexpand',
            'autoreplace',
            'delegation',
            'listsnapshots',
            'readonly',
            ]

        dev_booleans = [
            'is_cache',
            'is_log',
            'is_spare',
            'whole_disk',
            ]

        ints = [
            'allocated',
            'ashift',
            'asize',
            'capacity',
            'create_txg',
            'dedupditto',
            'free',
            'freeing',
            'leaked',
            'metaslab_array',
            'metaslab_shift',
            'size',
            'txg',
            'DTL',
            ]

        floats = [
            'dedupratio',
            'fragmentation',
            ]

        # Basic Linux block device name
        # sda1
        disk_id_basic_regex = r'^([a-z]{3,})\d+$'
        # Linux /dev/disk/by-id
        # ata-WDC_WD2000F9YZ-09N20L0_WD-WCC1P0356812-part1
        # Linux /dev/disk/by-path
        # pci-0000:00:11.0-scsi-2:0:0:0-part1
        # Illumos block device name
        # c8t5000CCA03C41D2FDd0s0
        disk_id_regex = r'^(.*)(?:-part\d+|s\d+)$'

        pool_rm = RelationshipMap(
            relname='zpools',
            modname='ZenPacks.daviswr.ZFS.ZPool'
            )

        root_rm_list = list()
        child_rm_list = list()

        ignore_names_regex = getattr(device, 'zZPoolIgnoreNames', '')
        if ignore_names_regex:
            log.info('zZPoolIgnoreNames set to %s', ignore_names_regex)

        # Pool components
        for pool in pools:
            if ignore_names_regex and re.match(ignore_names_regex, pool):
                log.debug(
                    'Skipping pool %s due to zZPoolIgnoreNames',
                    pool
                    )
                continue

            comp = dict()
            for key in pools[pool]:
                if key in booleans:
                    comp[key] = True if ('on' == pools[pool][key]) else False
                elif key in ints:
                    comp[key] = int(pools[pool][key])
                elif key in floats:
                    comp[key] = float(pools[pool][key])
                elif not key == 'vdev_tree' \
                        and not key == 'name':
                    comp[key] = pools[pool][key]
            # Can't use the GUID since it's not available in iostat
            comp['id'] = self.prepId('pool_{0}'.format(pool))
            log.debug('Found ZPool: %s', comp['id'])
            pool_rm.append(ObjectMap(
                modname='ZenPacks.daviswr.ZFS.ZPool',
                data=comp
                ))

            # Root vDev components
            roots = pools[pool].get('vdev_tree', None)
            if roots is not None:
                log.debug('ZPool %s has children', comp['id'])
                root_rm = RelationshipMap(
                    compname='zpools/pool_{0}'.format(pool),
                    relname='zrootVDevs',
                    modname='ZenPacks.daviswr.ZFS.ZRootVDev'
                    )
                for key in roots.keys():
                    if not key.startswith('children') \
                            and not key.startswith('cache_') \
                            and not key.startswith('spare_'):
                        del roots[key]
                for root in roots:
                    comp = dict()
                    children = list()
                    for key in roots[root]:
                        if key in dev_booleans:
                            comp[key] = True \
                                if '1' == roots[root][key] \
                                else False
                        elif key in ints:
                            comp[key] = int(roots[root][key])
                        elif key == 'type':
                            comp['VDevType'] = roots[root][key]
                        elif key.startswith('children[') \
                                or key.startswith('cache_') \
                                or key.startswith('spare_'):
                            children.append(roots[root][key])
                        elif not key == 'name':
                            comp[key] = roots[root][key]
                    comp['pool'] = pool
                    if comp.get('whole_disk') and comp.get('title'):
                        match = re.match(disk_id_regex, comp['title']) \
                            or re.match(disk_id_basic_regex, comp['title'])
                        if match:
                            comp['title'] = match.groups()[0]
                    id_str = '{0}_{1}'.format(
                        pool,
                        comp.get('title', '').replace('-', '_')
                        )
                    comp['id'] = self.prepId(id_str)
                    if comp.get('is_cache'):
                        modname = 'CacheDev'
                    elif comp.get('is_log'):
                        modname = 'LogDev'
                    elif comp.get('is_spare'):
                        modname = 'SpareDev'
                    else:
                        modname = 'RootVDev'
                    log.debug('Found %s: %s', modname, comp['id'])
                    root_rm.append(ObjectMap(
                        modname='ZenPacks.daviswr.ZFS.Z{0}'.format(modname),
                        data=comp
                        ))

                    # Store Dev components
                    if len(children) > 0:
                        log.debug('Root vDev %s has children', comp['id'])
                        child_rm = RelationshipMap(
                            compname='zpools/pool_{0}/zrootVDevs/{1}'.format(
                                pool,
                                id_str
                                ),
                            relname='zstoreDevs',
                            modname='ZenPacks.daviswr.ZFS.ZStoreDev'
                            )
                        for child in children:
                            comp = dict()
                            for key in child:
                                if key in dev_booleans:
                                    comp[key] = True \
                                        if '1' == child[key] \
                                        else False
                                elif key in ints:
                                    comp[key] = int(child[key])
                                elif key == 'type':
                                    comp['VDevType'] = child[key]
                                elif not key == 'name':
                                    comp[key] = child[key]
                            comp['pool'] = pool
                            if comp.get('whole_disk') and comp.get('title'):
                                match = re.match(
                                    disk_id_regex,
                                    comp['title']
                                    ) \
                                    or re.match(
                                        disk_id_basic_regex,
                                        comp['title']
                                        )
                                if match:
                                    comp['title'] = match.groups()[0]
                            id_str = '{0}_{1}'.format(
                                pool,
                                comp.get('title', '').replace('-', '_')
                                )
                            comp['id'] = self.prepId(id_str)
                            log.debug('Found child vDev: %s', comp['id'])
                            child_rm.append(ObjectMap(
                                modname='ZenPacks.daviswr.ZFS.ZStoreDev',
                                data=comp
                                ))
                        child_rm_list.append(child_rm)
                root_rm_list.append(root_rm)

        maps.append(pool_rm)
        maps += root_rm_list
        maps += child_rm_list

        log.debug(
            'ZPool RelMap:\n%s',
            str(maps)
            )

        return maps
    def processTblSupplies(self, tblSupplies, tblColors, log):

        # initialize seperate maps for toners and other supplies
        # use RelationshipMap() because I want to specify the relationship since there's only 1 modeler
        # for more components
        mapSupplies = RelationshipMap(modname='ZenPacks.TwoNMS.PrinterMIB.PrinterSupply', relname='printermibsupply')
        mapToners = RelationshipMap(modname='ZenPacks.TwoNMS.PrinterMIB.PrinterToner', relname='printermibtoner')


        # simplify the tblColors map to make the code easier to read
        colors = {}
        for cId, cInfo in tblColors.iteritems():
            colors[str(cId)] = cInfo['prtMarkerColorantValue'].split("\x00")[0]
        log.debug("colors table = %s", colors)

        # go over each supply and classifiy as toner (ink cartridge) or other supply
        for supplyId, supplyData in tblSupplies.iteritems():

            # create a temp map first because we don't know yet what kind of supply we have
            mapTemp = self.objectMap(supplyData)
            mapTemp.id = self.prepId(supplyId)
            isToner = False

            # check if it's a toner or other supply, color toners have prtMarkerSuppliesColorantIndex > 0
            # translate the color id
            try:
                if mapTemp.prtMarkerSuppliesColorantIndex > 0:
                    isToner = True
                    # overwrite the index with the color value
                    if (colors[str(mapTemp.prtMarkerSuppliesColorantIndex)] != None):
                        mapTemp.prtMarkerSuppliesColorantValue = colors[str(mapTemp.prtMarkerSuppliesColorantIndex)]
                        mapTemp.rgbColorCode = self.rgbColorCodes[mapTemp.prtMarkerSuppliesColorantValue.lower()]
                    else:
                        mapTemp.prtMarkerSuppliesColorantValue = self.PrtConsoleColorTC['na']
            except (AttributeError, KeyError):
                log.warn("AttributeErorr or KeyError occurred - Supply does not support the prtMarkerSuppliesColorantIndex oid")
                mapTemp.prtMarkerSuppliesColorantValue = self.PrtConsoleColorTC['na']
                mapTemp.rgbColorCode = self.rgbColorCodes['na']
                #continue
            except:
                log.warn("Unknown error occurred")
                mapTemp.prtMarkerSuppliesColorantValue = self.PrtConsoleColorTC['na']
                mapTemp.rgbColorCode = self.rgbColorCodes['na']
                #continue

            # translate the supply unit type id
            try:
                if (self.PrtMarkerSuppliesSupplyUnitTC[str(mapTemp.prtMarkerSuppliesSupplyUnitTC)] != None):
                    mapTemp.prtMarkerSuppliesSupplyUnit = self.PrtMarkerSuppliesSupplyUnitTC[str(mapTemp.prtMarkerSuppliesSupplyUnitTC)]
            except AttributeError:
                log.warn("Supply does not support the prtMarkerSuppliesSupplyUnitTC oid")
                mapTemp.prtMarkerSuppliesSupplyUnit = self.PrtMarkerSuppliesSupplyUnitTC['na']
                #continue

            # translate the supply type id
            try:
                if (self.PrtMarkerSuppliesTypeTC[str(mapTemp.prtMarkerSuppliesTypeTC)] != None):
                    mapTemp.prtMarkerSuppliesType = self.PrtMarkerSuppliesTypeTC[str(mapTemp.prtMarkerSuppliesTypeTC)]
            except AttributeError:
                log.warn("Supply does not support the prtMarkerSuppliesTypeTC oid")
                mapTemp.prtMarkerSuppliesType = self.PrtMarkerSuppliesTypeTC['na']
                #continue

            # add a percentage value of the usage
            try:
                mapTemp.usagepct = self.calculateUsagePct(mapTemp.prtMarkerSuppliesLevel, mapTemp.prtMarkerSuppliesMaxCapacity, log)
            except:
                mapTemp.usagepct = 'na'

            # add the temp map to the toner or supply map
            if (isToner == True):
                mapTemp.modname = "ZenPacks.TwoNMS.PrinterMIB.PrinterToner"
                mapTemp.supplyId = mapTemp.id
                mapTemp.snmpindex = mapTemp.id
                log.debug("New toner found: %s", mapTemp)
                mapToners.append(mapTemp)
            else:
                mapTemp.modname = "ZenPacks.TwoNMS.PrinterMIB.PrinterSupply"
                mapTemp.supplyId = mapTemp.id
                mapTemp.snmpindex = mapTemp.id
                log.debug("New supply found: %s", mapTemp)
                mapSupplies.append(mapTemp)

        return mapSupplies, mapToners