def process(self, device, results, log):
        maps = []

        top_rm = RelationshipMap(relname='testTopComponents')

        maps.append(top_rm)

        for i in range(device.zTestCalcPerfTopComponentsPerDevice):
            top_rm.append(
                ObjectMap(data={
                    'id': 'top{}'.format(i),
                    },
                    modname='ZenPacks.test.CalcPerfScale.TestTopComponent'))

            bottom_rm = RelationshipMap(
                compname='testTopComponents/top{}'.format(i),
                relname='testBottomComponents')

            for j in range(device.zTestCalcPerfBottomComponentsPerTopComponent):
                bottom_rm.append(
                    ObjectMap(data={
                        'id': 'top{}-bottom{}'.format(i, j),
                        },
                        modname='ZenPacks.test.CalcPerfScale.TestBottomComponent'))

            maps.append(bottom_rm)

        return maps
Example #2
0
    def process(self, device, results, log):
        log.info("Processing %s for device %s", self.name(), device.id)
        getdata, tabledata = results
        maps = []

        ocpRelMap = RelationshipMap(
            relname='raritanOCPs',
            compname=self.compname,
            modname='ZenPacks.community.Raritan.RaritanOCP')

        for snmpindex, row in tabledata.get(
                'overCurrentProtectorConfigurationTable', {}).items():
            ocpData = {}
            snmpindex = snmpindex.strip('.')
            log.info('snmpindex:{}'.format(snmpindex))
            log.info('row:{}'.format(row))

            title = row.get('overCurrentProtectorLabel')
            name = row.get('overCurrentProtectorName')
            if name:
                title = '{} ({})'.format(title, name)

            ocpData['id'] = self.prepId(title)
            ocpData['title'] = title
            ocpData['snmpindex'] = snmpindex

            ocpSensors = tabledata.get(
                'overCurrentProtectorSensorConfigurationTable', {})
            log.debug('sensors:{}'.format(ocpSensors))
            for sensor, sensorNum in self.sensorType.items():
                sensorIndex = '{}.{}'.format(snmpindex, sensorNum)
                ocpSensor = ocpSensors[sensorIndex]
                ocpData['{}_units'.format(
                    sensor)] = ocpSensor['overCurrentProtectorSensorUnits']
                ocpData['{}_digits'.format(sensor)] = ocpSensor[
                    'overCurrentProtectorSensorDecimalDigits']

            log.debug('sensorData:{}'.format(ocpData))

            ocpRelMap.append(
                ObjectMap(
                    compname=self.compname,
                    modname='ZenPacks.community.Raritan.RaritanOCP',
                    data=ocpData,
                ))
        maps.append(ocpRelMap)

        maps.extend([
            ocpRelMap,
        ])

        return maps
    def processTblTrays(self, tblTrays, log):
        mapTrays = RelationshipMap(
            modname='ZenPacks.TwoNMS.PrinterMIB.PrinterTray',
            relname='printermibtray')

        # iterate each tray and translate the mibs
        for trayId, trayData in tblTrays.iteritems():
            # create an input Tray object
            trayObj = self.objectMap(trayData)
            trayObj.id = self.prepId(trayId)

            # translate prtInputTypeTC
            try:
                if (self.PrtInputTypeTC[str(trayObj.prtInputTypeTC)] != None):
                    trayObj.prtInputType = self.PrtInputTypeTC[str(
                        trayObj.prtInputTypeTC)]
            except AttributeError:
                log.warn("Tray does not support the prtInputTypeTC oid")
                trayObj.prtInputType = self.PrtInputTypeTC['na']
                #continue

            # translate PrtCapacityUnitTC
            try:
                if (self.PrtCapacityUnitTC[str(trayObj.prtCapacityUnitTC)] !=
                        None):
                    trayObj.prtCapacityUnit = self.PrtCapacityUnitTC[str(
                        trayObj.prtCapacityUnitTC)]
            except AttributeError:
                log.warn("Tray does not support the PrtCapacityUnitTC oid")
                trayObj.prtCapacityUnit = self.PrtCapacityUnitTC['na']
                #continue

            # add a percentage value of the usage
            try:
                trayObj.usagepct = self.calculateUsagePct(
                    trayObj.prtInputCurrentLevel, trayObj.prtInputMaxCapacity,
                    log)
            except:
                mapTemp.usagepct = 'na'

            # assign object to the relationsipMap
            trayObj.modname = "ZenPacks.TwoNMS.PrinterMIB.PrinterTray"
            trayObj.supplyId = trayObj.id
            trayObj.snmpindex = trayObj.id
            log.debug("New input tray found: %s", trayObj)
            mapTrays.append(trayObj)

        return mapTrays
    def process(self, device, results, log):
        log.info("Processing %s for device %s", self.name(), device.id)
        getdata, tabledata = results
        if not getdata['vsxVsConfigured'] > 0:
            log.debug('No Virtual System found {}'.format(
                getdata['vsxVsConfigured']))
            return []

        vsxFirewallMap = RelationshipMap(
            relname='vsxfirewalls',
            compname=self.compname,
            modname='ZenPacks.community.CheckPointVSX.VSXFirewall')

        maps = []
        vsxStatusTable = tabledata.get('vsxStatusTable', {})
        vsxCountersTable = tabledata.get('vsxCountersTable', {})
        for snmpindex, vs in vsxStatusTable.items():
            if not vs['vsxStatusVsType'] == 'Virtual System':
                continue
            snmpindex = snmpindex.strip('.')
            name = vs['vsxStatusVsName']
            haState = vs['vsxStatusHAState']
            # TODO: The connection limit is retrieved, based on the snmpindex. However, I don't think there's
            # any guarantee that the snmpindex in the vsxCountersTable is identical. It would be preferable to
            # use the VSId. Same for the data collection, but it would then require a PythonPlugin.
            vsxFirewallMap.append(
                ObjectMap(
                    compname=self.compname,
                    modname='ZenPacks.community.CheckPointVSX.VSXFirewall',
                    data={
                        'id':
                        self.prepId(name),
                        'title':
                        '{} ({})'.format(name, haState),
                        'snmpindex':
                        snmpindex,
                        'VSId':
                        vs['vsxStatusVSId'],
                        'HAState':
                        haState,
                        'ConnLimit':
                        vsxCountersTable.get(snmpindex,
                                             {})['vsxCountersConnTableLimit']
                    }))
        maps.extend([vsxFirewallMap])

        return maps
    def disks(self, disks, compname, log):
        rm = RelationshipMap()
        rm.compname = compname
        rm.relname = 'disks'
        rm.modname = 'ZenPacks.CS.NetApp.CMode.Disk'
        rm.classname = 'Disk'

        for disk in disks:
            om = ObjectMap()
            om.modname = 'ZenPacks.CS.NetApp.CMode.Disk'
            om.id = self.prepId(disk['disk']['name'])
            om.disk_name = disk['disk']['name']
            om.position = disk['position']
            om.state = disk['state']
            om.type = disk['type']
            om.usable_size = disk['usable_size']
            rm.append(om)

        return rm
    def processTblTrays(self, tblTrays, log):
        mapTrays = RelationshipMap(modname='ZenPacks.TwoNMS.PrinterMIB.PrinterTray', relname='printermibtray')

        # iterate each tray and translate the mibs
        for trayId, trayData in tblTrays.iteritems():
            # create an input Tray object
            trayObj = self.objectMap(trayData)
            trayObj.id = self.prepId(trayId)

            # translate prtInputTypeTC
            try:
                if (self.PrtInputTypeTC[str(trayObj.prtInputTypeTC)] != None):
                    trayObj.prtInputType = self.PrtInputTypeTC[str(trayObj.prtInputTypeTC)]
            except AttributeError:
                log.warn("Tray does not support the prtInputTypeTC oid")
                trayObj.prtInputType = self.PrtInputTypeTC['na']
                #continue

            # translate PrtCapacityUnitTC
            try:
                if (self.PrtCapacityUnitTC[str(trayObj.prtCapacityUnitTC)] != None):
                    trayObj.prtCapacityUnit = self.PrtCapacityUnitTC[str(trayObj.prtCapacityUnitTC)]
            except AttributeError:
                log.warn("Tray does not support the PrtCapacityUnitTC oid")
                trayObj.prtCapacityUnit = self.PrtCapacityUnitTC['na']
                #continue

            # add a percentage value of the usage
            try:
                trayObj.usagepct = self.calculateUsagePct(trayObj.prtInputCurrentLevel, trayObj.prtInputMaxCapacity, log)
            except:
                mapTemp.usagepct = 'na'

            # assign object to the relationsipMap
            trayObj.modname = "ZenPacks.TwoNMS.PrinterMIB.PrinterTray"
            trayObj.supplyId = trayObj.id
            trayObj.snmpindex = trayObj.id
            log.debug("New input tray found: %s", trayObj)
            mapTrays.append(trayObj)

        return mapTrays
    def raidgroups(self, raidgroups, compname, log):
        rm = RelationshipMap()
        rm.compname = compname
        rm.relname = 'raidGroups'
        rm.modname = 'ZenPacks.CS.NetApp.CMode.RaidGroup'
        rm.classname = 'RaidGroup'

        for raid in raidgroups:
            om = ObjectMap()
            om.modname = 'ZenPacks.CS.NetApp.CMode.RaidGroup'
            om.id = self.prepId(raid['name'])
            om.rg_name = raid['name']
            om.cache_tier = raid['cache_tier']
            om.degraded = raid['degraded']
            om.recomputing_parity_active = raid['recomputing_parity']['active']
            om.reconstruct_active = raid['reconstruct']['active']
            rm.append(om)

            compname = '{parent}/raidGroups/{id}'.format(parent=compname,
                                                         id=om.id)
            diskrm = self.disks(raid['disks'], compname, log)

        return (rm, diskrm)
 def getProcessorDetails(self, octetThree, octetFourFive, log):
     """determine the processor type and speed, and build maps"""
     log.info("Processor type: %s, Processor speed: %s" %
              (octetThree, octetFourFive))
     myp = self.hex2int(octetThree)
     if self.processorMap.has_key(myp):
         myp = self.processorMap[myp]
     else:
         myp = "Unknown"
         log.error("Problem determining processor type for type %s" % myp)
     mys = self.hex2int(octetFourFive)
     om = ObjectMap({
         'id': '0',
         'clockspeed': mys,
         'extspeed': mys
     },
                    compname="hw",
                    modname="Products.ZenModel.CPU")
     om.setProductKey = MultiArgs(myp, self.foundryName)
     rm = RelationshipMap(compname="hw",
                          relname="cpus",
                          modname="Products.ZenModel.CPU")
     rm.append(om)
     return rm
Example #9
0
    def collect(self, device, log):
        log.info("%s: faking data for %s", self.name(), device.id)

        metapools_rm = RelationshipMap(relname="aggLapMetaPools")
        pools_rm = RelationshipMap(relname="aggLapPools")
        objects_rm = RelationshipMap(relname="aggLapObjects")

        for metapool_idx in range(1, 2):
            metapools_rm.append(
                ObjectMap(modname="ZenPacks.test.AggLap.AggLapMetaPool",
                          data={
                              "id": "metapool-{}".format(metapool_idx),
                              "title": "Meta Pool {}".format(metapool_idx),
                          }))

            for pool_idx in range(1, 3):
                pools_rm.append(
                    ObjectMap(modname="ZenPacks.test.AggLap.AggLapPool",
                              data={
                                  "id":
                                  "pool-{}-{}".format(metapool_idx, pool_idx),
                                  "title":
                                  "Pool {}/{}".format(metapool_idx, pool_idx),
                                  "set_aggLapMetaPool":
                                  "metapool-{}".format(metapool_idx),
                              }))

                for object_idx in range(1, 3):
                    objects_rm.append(
                        ObjectMap(modname="ZenPacks.test.AggLap.AggLapObject",
                                  data={
                                      "id":
                                      "object-{}-{}-{}".format(
                                          metapool_idx, pool_idx, object_idx),
                                      "title":
                                      "Object {}/{}/{}".format(
                                          metapool_idx, pool_idx, object_idx),
                                      "set_aggLapPool":
                                      "pool-{}-{}".format(
                                          metapool_idx, pool_idx),
                                  }))

        return defer.succeed([metapools_rm, pools_rm, objects_rm])
Example #10
0
    def process(self, device, results, log):
        log.info("Modeler %s processing data for device %s", self.name(),
                 device.id)

        getdata, tabledata = results
        sensor_count = sum([getdata[x] for x in getdata if "Count" in x])

        maps = []

        # device-specific data
        manufacturer = "Geist Manufacturing, Inc."
        os_name = "%s %s" % (getdata["productTitle"],
                             getdata["productVersion"])
        maps.append(
            ObjectMap(
                data={
                    "sensor_count":
                    sensor_count,
                    "title":
                    getdata["productFriendlyName"],
                    "productUrl":
                    getdata["productUrl"],
                    "setHWProductKey":
                    MultiArgs(getdata["productHardware"], manufacturer),
                    "setOSProductKey":
                    MultiArgs(os_name, manufacturer),
                }))

        # Components: climate sensors
        rm = RelationshipMap(
            relname="geistClimateSensors",
            modname="ZenPacks.crosse.Geist.Monitor.GeistClimateSensor",
        )
        for snmpindex, row in tabledata.get("climateTable", {}).items():
            serial = row.get("climateSerial")
            if not serial:
                log.warn("Skipping climate sensor with no serial")
                continue
            log.debug("Modeling climate sensor %s", serial)

            values = {k: row[k] for k in row}
            values["id"] = self.prepId(serial)
            values["title"] = values["climateName"]
            values["snmpindex"] = snmpindex.strip(".")

            rm.append(
                ObjectMap(
                    modname="ZenPacks.crosse.Geist.Monitor.GeistClimateSensor",
                    data=values,
                ))
        maps.append(rm)

        # Components: temperature sensors
        rm = RelationshipMap(
            relname="geistTemperatureSensors",
            modname="ZenPacks.crosse.Geist.Monitor.GeistTemperatureSensor",
        )
        for snmpindex, row in tabledata.get("tempSensorTable", {}).items():
            serial = row.get("tempSensorSerial")
            if not serial:
                log.warn("Skipping temperature sensor with no serial")
                continue
            log.debug("Modeling temperature sensor %s", serial)

            values = {k: row[k] for k in row}
            values["id"] = self.prepId(serial)
            values["title"] = values["tempSensorName"]
            values["snmpindex"] = snmpindex.strip(".")

            rm.append(
                ObjectMap(
                    modname=
                    "ZenPacks.crosse.Geist.Monitor.GeistTemperatureSensor",
                    data=values,
                ))
        maps.append(rm)

        # Components: airflow sensors
        rm = RelationshipMap(
            relname="geistAirflowSensors",
            modname="ZenPacks.crosse.Geist.Monitor.GeistAirflowSensor",
        )
        for snmpindex, row in tabledata.get("airFlowSensorTable", {}).items():
            serial = row.get("airFlowSensorSerial")
            if not serial:
                log.warn("Skipping airflow sensor with no serial")
                continue
            log.debug("Modeling airflow sensor %s", serial)

            values = {k: row[k] for k in row}
            values["id"] = self.prepId(serial)
            values["title"] = values["airFlowSensorName"]
            values["snmpindex"] = snmpindex.strip(".")

            rm.append(
                ObjectMap(
                    modname="ZenPacks.crosse.Geist.Monitor.GeistAirflowSensor",
                    data=values,
                ))
        maps.append(rm)

        return maps
    def process(self, device, results, log):
        log.info(
            "Modeler %s processing data for device %s",
            self.name(),
            device.id
            )
        maps = list()

        pools = dict()
        last_parent = None
        last_pool = None
        last_root = None
        last_tree = None
        last_type = None
        last_vdev = None
        zpool_status = False

        get_regex = r'^(?P<pool>\S+)\t(?P<key>\S+)\t(?P<value>\S+)\t\S+$'
        zdb_header_regex = r'(?P<key>\S+)\:$'
        zdb_kv_regex = r'\ {4}\s*(?P<key>\S+)\:\s?(?P<value>\S+)'
        status_pool_regex = r'^\s+pool: (?P<dev>\S+)$'
        status_logs_regex = r'^\s+logs$'
        status_cache_regex = r'^\s+cache$'
        status_spare_regex = r'^\s+spares$'
        status_dev_regex = r'(?P<dev>\S+)\s+\S+(?:\s+\d+){3}$'

        for line in results.splitlines():
            get_match = re.match(get_regex, line)
            zdb_pool_match = re.match(r'^' + zdb_header_regex, line)
            zdb_tree_match = re.match(r'^    ' + zdb_header_regex, line)
            zdb_root_match = re.match(r'^        ' + zdb_header_regex, line)
            zdb_vdev_match = re.match(r'^            ' + zdb_header_regex, line)  # noqa
            zdb_kv_match = re.match(zdb_kv_regex, line)
            status_pool_match = re.match(status_pool_regex, line) \
                or re.match(r'^\t' + status_dev_regex, line)
            status_logs_match = re.match(status_logs_regex, line)
            status_cache_match = re.match(status_cache_regex, line)
            status_spare_match = re.match(status_spare_regex, line)
            status_root_match = re.match(r'^\t  ' + status_dev_regex, line)
            status_child_match = re.match(r'^\t    ' + status_dev_regex, line)

            if get_match:
                pool = get_match.group('pool')
                key = get_match.group('key')
                value = get_match.group('value')
                if pool not in pools:
                    pools[pool] = dict()
                if value.endswith('%') or re.match(r'^\d+\.\d{2}x$', value):
                    value = value[:-1]
                elif value == '-':
                    value = None
                pools[pool][key] = value

            elif zdb_pool_match:
                if not zpool_status:
                    pool = zdb_pool_match.group('key')
                    if pool not in pools:
                        pools[pool] = dict()
                    last_pool = pools[pool]
                    last_pool['type'] = 'pool'
                    last_parent = last_pool

            elif zdb_tree_match:
                key = zdb_tree_match.group('key')
                if 'tree' in key:
                    last_pool[key] = dict()
                    last_tree = last_pool[key]
                    last_parent = last_tree

            elif zdb_root_match:
                key = zdb_root_match.group('key')
                last_tree[key] = dict()
                last_root = last_tree[key]
                last_parent = last_root

            elif zdb_vdev_match:
                key = zdb_vdev_match.group('key')
                last_root[key] = dict()
                last_vdev = last_root[key]
                last_parent = last_vdev

            elif zdb_kv_match:
                key = zdb_kv_match.group('key')
                value = zdb_kv_match.group('value').replace("'", "")
                # Attributes right under vdev_tree are pool-wide
                # and should already be in `zpool get` output
                if 'vdev_tree' in last_pool \
                        and last_pool['vdev_tree'] == last_parent:
                    continue
                # ZenModeler does not like these in the RelMap
                elif key in ['hostid', 'hostname']:
                    continue
                elif 'name' == key:
                    last_parent['title'] = value
                    continue
                elif 'pool_guid' == key:
                    last_parent['guid'] = value
                    continue
                # Spare devices will be modeled based on 'zpool status' output
                elif 'type' == key and 'spare' == value:
                    continue
                last_parent[key] = value
                # disk type
                if key == 'path':
                    last_parent['title'] = value.split('/')[-1]
                # mirror type
                elif key == 'id' and 'type' in last_parent:
                    last_parent['title'] = '{0}-{1}'.format(
                        last_parent['type'],
                        value
                        )
                # raidz type
                elif key == 'nparity' \
                        and 'id' in last_parent \
                        and 'type' in last_parent:
                    last_parent['type'] += value
                    last_parent['title'] = '{0}-{1}'.format(
                        last_parent['type'],
                        last_parent['id']
                        )

            # 'zpool status' is only to find cache devices
            # since they're strangely absent from zdb
            elif status_pool_match:
                zpool_status = True
                pool = status_pool_match.group('dev')
                if pool not in pools:
                    pools[pool] = dict()
                if 'vdev_tree' not in pools[pool]:
                    pools[pool]['vdev_tree'] = dict()
                last_pool = pools[pool]
                last_pool['type'] = 'pool'
                last_type = last_pool['type']
                last_tree = pools[pool]['vdev_tree']
                last_parent = last_tree

            elif status_logs_match:
                last_type = 'logs'

            elif status_cache_match:
                last_type = 'cache'

            elif status_spare_match:
                last_type = 'spare'

            # Emulate structure in zdb output for log devices
            # Each device is a root vdev,
            # rather than a child vdev in a logs/cache root
            elif status_root_match:
                if 'cache' == last_type or 'spare' == last_type:
                    dev = status_root_match.group('dev')
                    key = '{0}_{1}'.format(last_type, dev)
                    if key not in last_tree:
                        last_tree[key] = dict()
                    last_root = last_tree[key]
                    last_root['title'] = dev
                    for boolean in ['cache', 'log', 'spare']:
                        last_root['is_{0}'.format(boolean)] = '0'
                    last_root['is_{0}'.format(last_type)] = '1'

            elif status_child_match:
                last_type = 'child'

        booleans = [
            'autoexpand',
            'autoreplace',
            'delegation',
            'listsnapshots',
            'readonly',
            ]

        dev_booleans = [
            'is_cache',
            'is_log',
            'is_spare',
            'whole_disk',
            ]

        ints = [
            'allocated',
            'ashift',
            'asize',
            'capacity',
            'create_txg',
            'dedupditto',
            'free',
            'freeing',
            'leaked',
            'metaslab_array',
            'metaslab_shift',
            'size',
            'txg',
            'DTL',
            ]

        floats = [
            'dedupratio',
            'fragmentation',
            ]

        # Basic Linux block device name
        # sda1
        disk_id_basic_regex = r'^([a-z]{3,})\d+$'
        # Linux /dev/disk/by-id
        # ata-WDC_WD2000F9YZ-09N20L0_WD-WCC1P0356812-part1
        # Linux /dev/disk/by-path
        # pci-0000:00:11.0-scsi-2:0:0:0-part1
        # Illumos block device name
        # c8t5000CCA03C41D2FDd0s0
        disk_id_regex = r'^(.*)(?:-part\d+|s\d+)$'

        pool_rm = RelationshipMap(
            relname='zpools',
            modname='ZenPacks.daviswr.ZFS.ZPool'
            )

        root_rm_list = list()
        child_rm_list = list()

        ignore_names_regex = getattr(device, 'zZPoolIgnoreNames', '')
        if ignore_names_regex:
            log.info('zZPoolIgnoreNames set to %s', ignore_names_regex)

        # Pool components
        for pool in pools:
            if ignore_names_regex and re.match(ignore_names_regex, pool):
                log.debug(
                    'Skipping pool %s due to zZPoolIgnoreNames',
                    pool
                    )
                continue

            comp = dict()
            for key in pools[pool]:
                if key in booleans:
                    comp[key] = True if ('on' == pools[pool][key]) else False
                elif key in ints:
                    comp[key] = int(pools[pool][key])
                elif key in floats:
                    comp[key] = float(pools[pool][key])
                elif not key == 'vdev_tree' \
                        and not key == 'name':
                    comp[key] = pools[pool][key]
            # Can't use the GUID since it's not available in iostat
            comp['id'] = self.prepId('pool_{0}'.format(pool))
            log.debug('Found ZPool: %s', comp['id'])
            pool_rm.append(ObjectMap(
                modname='ZenPacks.daviswr.ZFS.ZPool',
                data=comp
                ))

            # Root vDev components
            roots = pools[pool].get('vdev_tree', None)
            if roots is not None:
                log.debug('ZPool %s has children', comp['id'])
                root_rm = RelationshipMap(
                    compname='zpools/pool_{0}'.format(pool),
                    relname='zrootVDevs',
                    modname='ZenPacks.daviswr.ZFS.ZRootVDev'
                    )
                for key in roots.keys():
                    if not key.startswith('children') \
                            and not key.startswith('cache_') \
                            and not key.startswith('spare_'):
                        del roots[key]
                for root in roots:
                    comp = dict()
                    children = list()
                    for key in roots[root]:
                        if key in dev_booleans:
                            comp[key] = True \
                                if '1' == roots[root][key] \
                                else False
                        elif key in ints:
                            comp[key] = int(roots[root][key])
                        elif key == 'type':
                            comp['VDevType'] = roots[root][key]
                        elif key.startswith('children[') \
                                or key.startswith('cache_') \
                                or key.startswith('spare_'):
                            children.append(roots[root][key])
                        elif not key == 'name':
                            comp[key] = roots[root][key]
                    comp['pool'] = pool
                    if comp.get('whole_disk') and comp.get('title'):
                        match = re.match(disk_id_regex, comp['title']) \
                            or re.match(disk_id_basic_regex, comp['title'])
                        if match:
                            comp['title'] = match.groups()[0]
                    id_str = '{0}_{1}'.format(
                        pool,
                        comp.get('title', '').replace('-', '_')
                        )
                    comp['id'] = self.prepId(id_str)
                    if comp.get('is_cache'):
                        modname = 'CacheDev'
                    elif comp.get('is_log'):
                        modname = 'LogDev'
                    elif comp.get('is_spare'):
                        modname = 'SpareDev'
                    else:
                        modname = 'RootVDev'
                    log.debug('Found %s: %s', modname, comp['id'])
                    root_rm.append(ObjectMap(
                        modname='ZenPacks.daviswr.ZFS.Z{0}'.format(modname),
                        data=comp
                        ))

                    # Store Dev components
                    if len(children) > 0:
                        log.debug('Root vDev %s has children', comp['id'])
                        child_rm = RelationshipMap(
                            compname='zpools/pool_{0}/zrootVDevs/{1}'.format(
                                pool,
                                id_str
                                ),
                            relname='zstoreDevs',
                            modname='ZenPacks.daviswr.ZFS.ZStoreDev'
                            )
                        for child in children:
                            comp = dict()
                            for key in child:
                                if key in dev_booleans:
                                    comp[key] = True \
                                        if '1' == child[key] \
                                        else False
                                elif key in ints:
                                    comp[key] = int(child[key])
                                elif key == 'type':
                                    comp['VDevType'] = child[key]
                                elif not key == 'name':
                                    comp[key] = child[key]
                            comp['pool'] = pool
                            if comp.get('whole_disk') and comp.get('title'):
                                match = re.match(
                                    disk_id_regex,
                                    comp['title']
                                    ) \
                                    or re.match(
                                        disk_id_basic_regex,
                                        comp['title']
                                        )
                                if match:
                                    comp['title'] = match.groups()[0]
                            id_str = '{0}_{1}'.format(
                                pool,
                                comp.get('title', '').replace('-', '_')
                                )
                            comp['id'] = self.prepId(id_str)
                            log.debug('Found child vDev: %s', comp['id'])
                            child_rm.append(ObjectMap(
                                modname='ZenPacks.daviswr.ZFS.ZStoreDev',
                                data=comp
                                ))
                        child_rm_list.append(child_rm)
                root_rm_list.append(root_rm)

        maps.append(pool_rm)
        maps += root_rm_list
        maps += child_rm_list

        log.debug(
            'ZPool RelMap:\n%s',
            str(maps)
            )

        return maps
    def process(self, device, results, log):
        log.info('processing %s for device %s', self.name(), device.id)
        maps = list()

        """ Example output through 10.12

        caching:ReservedVolumeSpace = 25000000000
        caching:LogClientIdentity = yes
        caching:CacheLimit = 70000000000
        caching:ServerRoot = "/Library/Server"
        caching:ServerGUID = "02FE97F2-41F3-4CEE-9899-27976DB91A1A"
        caching:DataPath = "/Library/Server/Caching/Data"
        caching:LocalSubnetsOnly = yes
        caching:Port = 0
        caching:CacheLimit = 70000000000
        caching:StartupStatus = "OK"
        caching:RegistrationStatus = 1
        caching:CacheFree = 52754638336
        caching:PersonalCacheUsed = 0
        caching:TotalBytesDropped = 0
        caching:CacheStatus = "OK"
        caching:TotalBytesStoredFromOrigin = 419351941
        caching:state = "RUNNING"
        caching:Port = 49232
        caching:Peers:_array_index:0:address = "aaa.bbb.ccc.ddd"
        caching:Peers:_array_index:0:port = 49094
        caching:Peers:_array_index:0:details:capabilities:ur = yes
        caching:Peers:_array_index:0:details:capabilities:sc = yes
        caching:Peers:_array_index:0:details:capabilities:pc = no
        caching:Peers:_array_index:0:details:capabilities:im = no
        caching:Peers:_array_index:0:details:capabilities:ns = yes
        caching:Peers:_array_index:0:details:capabilities:query-parameters = yes  # noqa
        caching:Peers:_array_index:0:details:cache-size = 900000000000
        caching:Peers:_array_index:0:details:ac-power = yes
        caching:Peers:_array_index:0:details:is-portable = no
        caching:Peers:_array_index:0:details:local-network:_array_index:0:speed = 1000  # noqa
        caching:Peers:_array_index:0:details:local-network:_array_index:0:wired = yes  # noqa
        caching:Peers:_array_index:0:healthy = yes
        caching:Peers:_array_index:0:version = "161"
        caching:Peers:_array_index:0:friendly = yes
        caching:Peers:_array_index:0:guid = "9B9CDED4-F70C-4910-B7D4-11D1530AD34D"  # noqa
        caching:TotalBytesStoredFromPeers = 0
        caching:RestrictedMedia = no
        caching:CacheDetails:_array_index:0:BytesUsed = 0
        caching:CacheDetails:_array_index:0:LocalizedType = "Mac Software"
        caching:CacheDetails:_array_index:0:MediaType = "Mac Software"
        caching:CacheDetails:_array_index:0:Language = "en"
        caching:CacheDetails:_array_index:1:BytesUsed = 419351941
        caching:CacheDetails:_array_index:1:LocalizedType = "iOS Software"
        caching:CacheDetails:_array_index:1:MediaType = "iOS Software"
        caching:CacheDetails:_array_index:1:Language = "en"
        ...
        caching:PersonalCacheLimit = 70000000000
        caching:CacheUsed = 419351941
        caching:TotalBytesStored = 419351941
        caching:TotalBytesImported = 0
        caching:PersonalCacheFree = 52754638336
        caching:Active = yes
        caching:TotalBytesReturned = 476014159
        """

        # Parse results
        service = dict()
        caches = dict()
        peers = dict()
        lines = results.splitlines()

        # Legacy output
        if not results.startswith('{'):
            output = dict(line.split(' = ') for line in lines)
            for key in output:
                if key.startswith('caching:CacheDetails:'):
                    short = key.replace(
                        'caching:CacheDetails:_array_index:',
                        ''
                        )
                    idx = int(short.split(':')[0])
                    k = short.split(':')[1]
                    v = output.get(key).replace('"', '')
                    if idx not in caches:
                        caches[idx] = dict()
                    caches[idx].update({k: v})
                elif key.startswith('caching:Peers:'):
                    short = key.replace('caching:Peers:_array_index:', '')
                    short = short.replace('details:', '')
                    if ('capabilities' not in key
                            and 'local-network' not in key):
                        idx = int(short.split(':')[0])
                        k = short.split(':')[1]
                        v = output.get(key).replace('"', '')
                        if idx not in peers:
                            peers[idx] = dict()
                        peers[idx].update({k: v})
                else:
                    k = key.split(':')[1]
                    service.update({k: output.get(key).replace('"', '')})

        # JSON output
        else:
            for line in lines:
                output = json.loads(line)
                service.update(output.get('result', dict()))

                # Mimic structure of legacy output
                keys = service.get('CacheDetails', dict()).keys()
                for idx in range(0, len(keys)):
                    value = service.get('CacheDetails', dict()).get(keys[idx])
                    caches[idx] = {
                        'MediaType': keys[idx],
                        'BytesUsed': value,
                        }
                    if len(keys) - 1 == idx:
                        break

                # Settings output has an element named "Parents" as well
                if output.get('name', 'status') != 'settings':
                    peer_count = 0
                    for peer in service.get('Peers', list()):
                        peers[peer_count] = peer
                        peer_count += 1
                    for peer in service.get('Parents', list()):
                        peer['is-parent'] = True
                        peers[peer_count] = peer
                        peer_count += 1
                    for idx in peers:
                        for attr in ('ac-power', 'cache-size', 'is-portable'):
                            if attr in peers[idx]['details']:
                                peers[idx][attr] = peers[idx]['details'][attr]

        # Caching Service
        booleans = [
            'Active',
            'AllowPersonalCaching',
            'LocalSubnetsOnly',
            'LogClientIdentity',
            'RestrictedMedia',
            ]

        for attr in booleans:
            if attr in service and type(service[attr]) is not bool:
                service[attr] = True if 'yes' == service[attr] else False

        integers = [
            'CacheFree',
            'CacheLimit',
            'CacheUsed',
            'Port',
            'ReservedVolumeSpace',
            ]

        for attr in integers:
            if attr in service and type(service[attr]) is not int:
                service[attr] = int(service[attr])

        # More realistic Cache Limit value if configured to "unlimited"
        if service.get('CacheLimit', 0) == 0:
            service['CacheLimit'] = service.get('CacheLimit', 0)
            service['CacheLimit'] += service.get('CacheUsed', 0)
            service['CacheLimit'] += service.get('CacheFree', 0)

        service['id'] = self.prepId('CachingService')
        service['title'] = service.get('DataPath', 'Content Caching')

        # Escape spaces in DataPath for zencommand later
        if 'DataPath' in service:
            service['DataPath'] = service['DataPath'].replace(' ', r'\ ')

        # Not listening, service likely not running
        if 'Port' in service and service.get('Port') == 0:
            del service['Port']
        log.debug('Caching Service\n%s', service)

        rm = RelationshipMap(
            relname='contentCachingService',
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCachingService'
            )
        rm.append(ObjectMap(
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCachingService',  # noqa
            data=service
            ))
        maps.append(rm)

        # Individual Cache components
        rm = RelationshipMap(
            compname='contentCachingService/CachingService',
            relname='contentCaches',
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCache'
            )

        for idx in caches:
            cache = caches.get(idx)
            if 'BytesUsed' in cache:
                cache['BytesUsed'] = int(cache['BytesUsed'])
            cache['title'] = self.prepId(cache.get('MediaType', ''))
            cache['id'] = self.prepId(cache['title'])
            log.debug('Individual Cache: %s', cache)
            rm.append(ObjectMap(
                modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCache',
                data=cache
                ))
        maps.append(rm)

        # Peer Server components
        rm = RelationshipMap(
            compname='contentCachingService/CachingService',
            relname='contentCachePeers',
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCachePeer'
            )

        peer_integers = [
            'cache-size',
            'port',
            ]
        peer_booleans = [
            'ac-power',
            'friendly',
            'healthy',
            'is-portable',
            ]

        for idx in peers:
            peer = peers.get(idx)
            for attr in peer_integers:
                if attr in peer and type(peer[attr]) is not int:
                    peer[attr] = int(peer[attr])
            for attr in peer_booleans:
                if attr in peer and type(peer[attr]) is not bool:
                    peer[attr] = True if 'yes' == peer[attr] else False
            peer['title'] = peer.get('address', peer.get('guid', ''))
            id_str = 'cachepeer_{0}'.format(
                peer.get('address', peer.get('guid', ''))
                )
            peer['id'] = self.prepId(id_str)
            log.debug('Peer Caching Server: %s', peer)
            rm.append(ObjectMap(
                modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCachePeer',
                data=peer
                ))
        maps.append(rm)

        return maps
Example #13
0
    def process(self, device, results, log):
        """ Generates RelationshipMaps from Command output """
        log.info('Modeler %s processing data for device %s', self.name(),
                 device.id)
        maps = list()

        pools = dict()
        last_parent = None
        last_pool = None
        last_root = None
        last_tree = None
        last_type = None
        last_vdev = None
        zpool_status = False

        get_regex = r'^(?P<pool>\S+)\t(?P<key>\S+)\t(?P<value>\S+)\t\S+$'
        zdb_header_regex = r'(?P<key>\S+)\:$'
        zdb_kv_regex = r'\ {4}\s*(?P<key>\S+)\:\s?(?P<value>\S+)'
        status_pool_regex = r'^\s+pool: (?P<dev>\S+)$'
        status_logs_regex = r'^\s+logs$'
        status_cache_regex = r'^\s+cache$'
        status_spare_regex = r'^\s+spares$'
        status_dev_regex = r'(?P<dev>\S+)\s+(?P<health>\S+)(?:\s+\d+){3}$'

        for line in results.splitlines():
            get_match = re.match(get_regex, line)
            zdb_pool_match = re.match(r'^' + zdb_header_regex, line)
            zdb_tree_match = re.match(r'^    ' + zdb_header_regex, line)
            zdb_root_match = re.match(r'^        ' + zdb_header_regex, line)
            zdb_vdev_match = re.match(r'^            ' + zdb_header_regex,
                                      line)  # noqa
            zdb_kv_match = re.match(zdb_kv_regex, line)
            status_pool_match = re.match(status_pool_regex, line) \
                or re.match(r'^\t' + status_dev_regex, line)
            status_logs_match = re.match(status_logs_regex, line)
            status_cache_match = re.match(status_cache_regex, line)
            status_spare_match = re.match(status_spare_regex, line)
            status_root_match = re.match(r'^\t  ' + status_dev_regex, line)
            status_child_match = re.match(r'^\t    ' + status_dev_regex, line)

            if get_match:
                pool = get_match.group('pool')
                key = get_match.group('key').replace('@', '_')
                value = get_match.group('value')
                if pool not in pools:
                    pools[pool] = dict()
                if value.endswith('%') or re.match(r'^\d+\.\d{2}x$', value):
                    value = value[:-1]
                elif value == '-':
                    value = None
                pools[pool][key] = value

            elif zdb_pool_match:
                if not zpool_status:
                    pool = zdb_pool_match.group('key')
                    if pool not in pools:
                        pools[pool] = dict()
                    last_pool = pools[pool]
                    last_pool['type'] = 'pool'
                    last_parent = last_pool

            elif zdb_tree_match:
                key = zdb_tree_match.group('key')
                if 'tree' in key:
                    last_pool[key] = dict()
                    last_tree = last_pool[key]
                    last_parent = last_tree

            elif zdb_root_match:
                key = zdb_root_match.group('key')
                last_tree[key] = dict()
                last_root = last_tree[key]
                last_parent = last_root

            elif zdb_vdev_match:
                key = zdb_vdev_match.group('key')
                last_root[key] = dict()
                last_vdev = last_root[key]
                last_parent = last_vdev

            elif zdb_kv_match:
                key = zdb_kv_match.group('key')
                value = zdb_kv_match.group('value').replace("'", "")
                # Attributes right under vdev_tree are pool-wide
                # and should already be in `zpool get` output
                if ('vdev_tree' in last_pool
                        and last_pool['vdev_tree'] == last_parent):
                    continue
                # ZenModeler does not like these in the RelMap
                elif key in ['hostid', 'hostname']:
                    continue
                elif 'name' == key:
                    last_parent['title'] = value
                    continue
                elif 'pool_guid' == key:
                    last_parent['guid'] = value
                    continue
                # Spare devices will be modeled based on 'zpool status' output
                elif 'type' == key and 'spare' == value:
                    continue
                last_parent[key] = value
                # disk type
                if key == 'path':
                    last_parent['title'] = value.split('/')[-1]
                # mirror type
                elif key == 'id' and 'type' in last_parent:
                    last_parent['title'] = '{0}-{1}'.format(
                        last_parent['type'], value)
                # raidz type
                elif (key == 'nparity' and 'id' in last_parent
                      and 'type' in last_parent):
                    last_parent['type'] += value
                    last_parent['title'] = '{0}-{1}'.format(
                        last_parent['type'], last_parent['id'])

            # 'zpool status' is only to find cache devices
            # since they're strangely absent from zdb
            elif status_pool_match:
                zpool_status = True
                pool = status_pool_match.group('dev')
                if pool not in pools:
                    pools[pool] = dict()
                if 'vdev_tree' not in pools[pool]:
                    pools[pool]['vdev_tree'] = dict()
                last_pool = pools[pool]
                last_pool['type'] = 'pool'
                last_type = last_pool['type']
                last_tree = pools[pool]['vdev_tree']
                last_parent = last_tree

            elif status_logs_match:
                last_type = 'logs'

            elif status_cache_match:
                last_type = 'cache'

            elif status_spare_match:
                last_type = 'spare'

            # Emulate structure in zdb output for log devices
            # Each device is a root vdev,
            # rather than a child vdev in a logs/cache root
            elif status_root_match:
                if last_type in ['cache', 'spare']:
                    dev = status_root_match.group('dev')
                    key = '{0}_{1}'.format(last_type, dev)
                    if key not in last_tree:
                        last_tree[key] = dict()
                    last_root = last_tree[key]
                    last_root['title'] = dev
                    for boolean in ['cache', 'log', 'spare']:
                        last_root['is_{0}'.format(boolean)] = '0'
                    last_root['is_{0}'.format(last_type)] = '1'
                    last_root['health'] = status_root_match.group('health')

            elif status_child_match:
                last_type = 'child'

        booleans = [
            'autoexpand',
            'autoreplace',
            'delegation',
            'listsnapshots',
            'readonly',
        ]

        dev_booleans = [
            'is_cache',
            'is_log',
            'is_spare',
            'whole_disk',
        ]

        ints = [
            'allocated',
            'ashift',
            'asize',
            'capacity',
            'create_txg',
            'dedupditto',
            'free',
            'freeing',
            'leaked',
            'metaslab_array',
            'metaslab_shift',
            'size',
            'txg',
            'DTL',
        ]

        floats = [
            'dedupratio',
            'fragmentation',
        ]

        # Basic Linux block device name
        # sda1
        disk_id_basic_regex = r'^([a-z]{3,})\d+$'
        # Linux /dev/disk/by-id
        # ata-WDC_WD2000F9YZ-09N20L0_WD-WCC1P0356812-part1
        # Linux /dev/disk/by-path
        # pci-0000:00:11.0-scsi-2:0:0:0-part1
        # Illumos block device name
        # c8t5000CCA03C41D2FDd0s0
        disk_id_regex = r'^(.*)(?:-part\d+|s\d+)$'

        pool_rm = RelationshipMap(relname='zpools',
                                  modname='ZenPacks.daviswr.ZFS.ZPool')

        root_rm_list = list()
        child_rm_list = list()

        ignore_names_regex = getattr(device, 'zZPoolIgnoreNames', '')
        if ignore_names_regex:
            log.info('zZPoolIgnoreNames set to %s', ignore_names_regex)

        # Pool components
        for pool in pools:
            if ignore_names_regex and re.match(ignore_names_regex, pool):
                log.debug('Skipping pool %s due to zZPoolIgnoreNames', pool)
                continue

            comp = dict()
            for key in pools[pool]:
                if key in booleans:
                    comp[key] = (True if pools[pool][key] in ['on', 'yes'] else
                                 False)
                elif key in ints:
                    comp[key] = int(pools[pool][key])
                elif key in floats:
                    comp[key] = float(pools[pool][key])
                elif (not key == 'vdev_tree' and not key == 'name'):
                    comp[key] = pools[pool][key]
            # Can't use the GUID since it's not available in iostat
            comp['id'] = self.prepId('pool_{0}'.format(pool))
            log.debug('Found ZPool: %s', comp['id'])
            pool_rm.append(
                ObjectMap(modname='ZenPacks.daviswr.ZFS.ZPool', data=comp))

            # Root vDev components
            roots = pools[pool].get('vdev_tree', None)
            if roots is not None:
                log.debug('ZPool %s has children', comp['id'])
                root_rm = RelationshipMap(
                    compname='zpools/pool_{0}'.format(pool),
                    relname='zrootVDevs',
                    modname='ZenPacks.daviswr.ZFS.ZRootVDev')
                for key in roots.keys():
                    if (not key.startswith('children')
                            and not key.startswith('cache_')
                            and not key.startswith('spare_')):
                        del roots[key]
                for root in roots:
                    comp = dict()
                    children = list()
                    for key in roots[root]:
                        if key in dev_booleans:
                            comp[key] = (True
                                         if '1' == roots[root][key] else False)
                        elif key in ints:
                            comp[key] = int(roots[root][key])
                        elif key == 'type':
                            comp['VDevType'] = roots[root][key]
                        elif (key.startswith('children[')
                              or key.startswith('cache_')
                              or key.startswith('spare_')):
                            children.append(roots[root][key])
                        elif not key == 'name':
                            comp[key] = roots[root][key]
                    comp['pool'] = pool
                    if comp.get('whole_disk') and comp.get('title'):
                        match = re.match(disk_id_regex, comp['title']) \
                            or re.match(disk_id_basic_regex, comp['title'])
                        if match:
                            comp['title'] = match.groups()[0]
                    id_str = '{0}_{1}'.format(
                        pool,
                        comp.get('title', '').replace('-', '_'))
                    comp['id'] = self.prepId(id_str)
                    if comp.get('is_cache'):
                        modname = 'CacheDev'
                    elif comp.get('is_log'):
                        modname = 'LogDev'
                    elif comp.get('is_spare'):
                        modname = 'SpareDev'
                    else:
                        modname = 'RootVDev'
                    log.debug('Found %s: %s', modname, comp['id'])
                    root_rm.append(
                        ObjectMap(modname='ZenPacks.daviswr.ZFS.Z{0}'.format(
                            modname),
                                  data=comp))

                    # Store Dev components
                    if len(children) > 0:
                        log.debug('Root vDev %s has children', comp['id'])
                        child_rm = RelationshipMap(
                            compname='zpools/pool_{0}/zrootVDevs/{1}'.format(
                                pool, id_str),
                            relname='zstoreDevs',
                            modname='ZenPacks.daviswr.ZFS.ZStoreDev')
                        for child in children:
                            comp = dict()
                            for key in child:
                                if key in dev_booleans:
                                    comp[key] = (True if '1' == child[key] else
                                                 False)
                                elif key in ints:
                                    comp[key] = int(child[key])
                                elif key == 'type':
                                    comp['VDevType'] = child[key]
                                elif not key == 'name':
                                    comp[key] = child[key]
                            comp['pool'] = pool
                            if comp.get('whole_disk') and comp.get('title'):
                                match = re.match(disk_id_regex, comp['title'])\
                                    or re.match(disk_id_basic_regex, comp['title'])  # noqa
                                if match:
                                    comp['title'] = match.groups()[0]
                            id_str = '{0}_{1}'.format(
                                pool,
                                comp.get('title', '').replace('-', '_'))
                            comp['id'] = self.prepId(id_str)
                            log.debug('Found child vDev: %s', comp['id'])
                            child_rm.append(
                                ObjectMap(
                                    modname='ZenPacks.daviswr.ZFS.ZStoreDev',
                                    data=comp))
                        child_rm_list.append(child_rm)
                root_rm_list.append(root_rm)

        maps.append(pool_rm)
        maps.extend(root_rm_list)
        maps.extend(child_rm_list)

        log.debug('ZPool RelMap:\n%s', str(maps))

        return maps
    def run(self):
        with open('model.yaml', 'r') as f:
            self.model_config = yaml.load(f)

        self.connect()

        objmaps = []
        for modname, obj_attrs in self.get_model_template("Global"):
            objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        for controller_num in range(1, self.options.controllers + 1):
            for modname, obj_attrs in self.get_model_template("Controller"):
                self.talesEvalAttrs(
                    obj_attrs,
                    num=controller_num,
                    device_name=self.options.device
                )
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        for compute_num in range(1, self.options.computes + 1):
            for modname, obj_attrs in self.get_model_template("Compute"):
                self.talesEvalAttrs(
                    obj_attrs,
                    num=compute_num,
                    device_name=self.options.device
                )
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        for tenant_num in range(3, self.options.tenants + 3):
            for modname, obj_attrs in self.get_model_template("Tenant"):
                self.talesEvalAttrs(
                    obj_attrs,
                    num=tenant_num,
                    device_name=self.options.device
                )
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        compute_nums = range(1, self.options.computes + 1)
        tenant_nums = range(3, self.options.tenants + 3)

        for instance_num in range(1, self.options.instances + 1):
            for modname, obj_attrs in self.get_model_template("Instance"):
                tenant_num = tenant_nums[instance_num % self.options.tenants]
                compute_num = compute_nums[instance_num % self.options.computes]

                self.talesEvalAttrs(
                    obj_attrs,
                    num=instance_num,
                    device_name=self.options.device,
                    tenant_num=tenant_num,
                    compute_num=compute_num
                )
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        device = self.dmd.Devices.OpenStack.Infrastructure.findDevice(self.options.device)
        if not device:
            print "Creating OpenStackInfrastructure device %s" % self.options.device
            device = self.dmd.Devices.OpenStack.Infrastructure.createInstance(self.options.device)
        device.setPerformanceMonitor('localhost')

        for controller_num in range(1, self.options.controllers + 1):
            device_name = "%s_controller%d" % (self.options.device, controller_num)
            d = self.dmd.Devices.Server.SSH.Linux.NovaHost.findDevice(device_name)
            if not d:
                print "Creating controller device %s" % device_name
                d = self.dmd.Devices.Server.SSH.Linux.NovaHost.createInstance(device_name)
                d.setZenProperty('zIpServiceMapMaxPort', 32767)

        for compute_num in range(1, self.options.computes + 1):
            device_name = "%s_compute%d" % (self.options.device, compute_num)
            d = self.dmd.Devices.Server.SSH.Linux.NovaHost.findDevice(device_name)
            if not d:
                print "Creating compute device %s" % device_name
                d = self.dmd.Devices.Server.SSH.Linux.NovaHost.createInstance(device_name)
                d.setZenProperty('zIpServiceMapMaxPort', 32767)

        relmap = RelationshipMap(relname='components')
        for objmap in objmaps:
            relmap.append(objmap)

        endpoint_om = ObjectMap(
            modname='ZenPacks.zenoss.OpenStackInfrastructure.Endpoint',
            data=dict(
                set_maintain_proxydevices=True
            )
        )

        print "Applying datamaps (1/2) (%d objects)" % len(objmaps)
        adm = ApplyDataMap()
        adm._applyDataMap(device, relmap)
        adm._applyDataMap(device, endpoint_om)

        print "Gathering network information"
        l3_agent_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureNeutronAgent") if x.type == 'L3 agent']
        dhcp_agent_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureNeutronAgent") if x.type == 'DHCP agent']
        all_network_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureNetwork")]
        all_router_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureRouter")]
        all_subnet_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureSubnet")]
        instance_network_ids = [x.id for x in device.getDeviceComponents(type="OpenStackInfrastructureNetwork") if x.ports() and len([y for y in x.ports() if y.instance()])]
        instance_subnet_ids = [y.id for y in set(chain.from_iterable([x.subnets() for x in device.getDeviceComponents(type="OpenStackInfrastructureNetwork") if x.ports() and len([y for y in x.ports() if y.instance()])]))]

        objmaps = []
        print "Adding L3 Agent Relationships"
        for agent_id in l3_agent_ids:
            objmaps.append(ObjectMap(
                modname="ZenPacks.zenoss.OpenStackInfrastructure.NeutronAgent",
                compname="components/%s" % agent_id,
                data=dict(
                    id=agent_id,
                    set_networks=all_network_ids,
                    set_routers=all_router_ids,
                    set_subnets=all_subnet_ids
                )))

        print "Adding DHCP agent Relationships"
        for agent_id in dhcp_agent_ids:
            objmaps.append(ObjectMap(
                modname="ZenPacks.zenoss.OpenStackInfrastructure.NeutronAgent",
                compname="components/%s" % agent_id,
                data=dict(
                    id=agent_id,
                    set_networks=instance_network_ids,
                    set_subnets=instance_subnet_ids
                )))

        print "Adding instance <-> hypervisor relationship"
        hypervisor_instances = defaultdict(list)
        for instance_num in range(1, self.options.instances + 1):
            instance_id = "server-%d" % instance_num
            compute_num = compute_nums[instance_num % self.options.computes]
            hypervisor_id = "hypervisor-compute%d.1" % compute_num
            hypervisor_instances[hypervisor_id].append(instance_id)

        for hypervisor_id, instance_ids in hypervisor_instances.iteritems():
            objmaps.append(ObjectMap(
                modname="ZenPacks.zenoss.OpenStackInfrastructure.Hypervisor",
                compname="components/%s" % hypervisor_id,
                data=dict(
                    id=hypervisor_id,
                    set_instances=instance_ids
                )))

        print "Applying datamaps (2/2) (%d objects)" % len(objmaps)
        adm = ApplyDataMap()
        for objmap in objmaps:
            adm._applyDataMap(device, objmap)

        print "Committing model changes."
        transaction.commit()
Example #15
0
    def run(self):
        with open('model.yaml', 'r') as f:
            self.model_config = yaml.load(f)

        self.connect()

        objmaps = []
        for modname, obj_attrs in self.get_model_template("Global"):
            objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        for controller_num in range(1, self.options.controllers + 1):
            for modname, obj_attrs in self.get_model_template("Controller"):
                self.talesEvalAttrs(obj_attrs,
                                    num=controller_num,
                                    device_name=self.options.device)
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        for compute_num in range(1, self.options.computes + 1):
            for modname, obj_attrs in self.get_model_template("Compute"):
                self.talesEvalAttrs(obj_attrs,
                                    num=compute_num,
                                    device_name=self.options.device)
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        for tenant_num in range(3, self.options.tenants + 3):
            for modname, obj_attrs in self.get_model_template("Tenant"):
                self.talesEvalAttrs(obj_attrs,
                                    num=tenant_num,
                                    device_name=self.options.device)
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        compute_nums = range(1, self.options.computes + 1)
        tenant_nums = range(3, self.options.tenants + 3)

        for instance_num in range(1, self.options.instances + 1):
            for modname, obj_attrs in self.get_model_template("Instance"):
                tenant_num = tenant_nums[instance_num % self.options.tenants]
                compute_num = compute_nums[instance_num %
                                           self.options.computes]

                self.talesEvalAttrs(obj_attrs,
                                    num=instance_num,
                                    device_name=self.options.device,
                                    tenant_num=tenant_num,
                                    compute_num=compute_num)
                objmaps.append(ObjectMap(modname=modname, data=obj_attrs))

        device = self.dmd.Devices.OpenStack.Infrastructure.findDevice(
            self.options.device)
        if not device:
            print "Creating OpenStackInfrastructure device %s" % self.options.device
            device = self.dmd.Devices.OpenStack.Infrastructure.createInstance(
                self.options.device)
        device.setPerformanceMonitor('localhost')

        for controller_num in range(1, self.options.controllers + 1):
            device_name = "%s_controller%d" % (self.options.device,
                                               controller_num)
            d = self.dmd.Devices.Server.SSH.Linux.NovaHost.findDevice(
                device_name)
            if not d:
                print "Creating controller device %s" % device_name
                d = self.dmd.Devices.Server.SSH.Linux.NovaHost.createInstance(
                    device_name)
                d.setZenProperty('zIpServiceMapMaxPort', 32767)

        for compute_num in range(1, self.options.computes + 1):
            device_name = "%s_compute%d" % (self.options.device, compute_num)
            d = self.dmd.Devices.Server.SSH.Linux.NovaHost.findDevice(
                device_name)
            if not d:
                print "Creating compute device %s" % device_name
                d = self.dmd.Devices.Server.SSH.Linux.NovaHost.createInstance(
                    device_name)
                d.setZenProperty('zIpServiceMapMaxPort', 32767)

        relmap = RelationshipMap(relname='components')
        for objmap in objmaps:
            relmap.append(objmap)

        endpoint_om = ObjectMap(
            modname='ZenPacks.zenoss.OpenStackInfrastructure.Endpoint',
            data=dict(set_maintain_proxydevices=True))

        print "Applying datamaps (1/2) (%d objects)" % len(objmaps)
        adm = ApplyDataMap()
        adm._applyDataMap(device, relmap)
        adm._applyDataMap(device, endpoint_om)

        print "Gathering network information"
        l3_agent_ids = [
            x.id for x in device.getDeviceComponents(
                type="OpenStackInfrastructureNeutronAgent")
            if x.type == 'L3 agent'
        ]
        dhcp_agent_ids = [
            x.id for x in device.getDeviceComponents(
                type="OpenStackInfrastructureNeutronAgent")
            if x.type == 'DHCP agent'
        ]
        all_network_ids = [
            x.id for x in device.getDeviceComponents(
                type="OpenStackInfrastructureNetwork")
        ]
        all_router_ids = [
            x.id for x in device.getDeviceComponents(
                type="OpenStackInfrastructureRouter")
        ]
        all_subnet_ids = [
            x.id for x in device.getDeviceComponents(
                type="OpenStackInfrastructureSubnet")
        ]
        instance_network_ids = [
            x.id for x in device.getDeviceComponents(
                type="OpenStackInfrastructureNetwork")
            if x.ports() and len([y for y in x.ports() if y.instance()])
        ]
        instance_subnet_ids = [
            y.id for y in set(
                chain.from_iterable([
                    x.subnets() for x in device.getDeviceComponents(
                        type="OpenStackInfrastructureNetwork") if x.ports()
                    and len([y for y in x.ports() if y.instance()])
                ]))
        ]

        objmaps = []
        print "Adding L3 Agent Relationships"
        for agent_id in l3_agent_ids:
            objmaps.append(
                ObjectMap(
                    modname=
                    "ZenPacks.zenoss.OpenStackInfrastructure.NeutronAgent",
                    compname="components/%s" % agent_id,
                    data=dict(id=agent_id,
                              set_networks=all_network_ids,
                              set_routers=all_router_ids,
                              set_subnets=all_subnet_ids)))

        print "Adding DHCP agent Relationships"
        for agent_id in dhcp_agent_ids:
            objmaps.append(
                ObjectMap(
                    modname=
                    "ZenPacks.zenoss.OpenStackInfrastructure.NeutronAgent",
                    compname="components/%s" % agent_id,
                    data=dict(id=agent_id,
                              set_networks=instance_network_ids,
                              set_subnets=instance_subnet_ids)))

        print "Adding instance <-> hypervisor relationship"
        hypervisor_instances = defaultdict(list)
        for instance_num in range(1, self.options.instances + 1):
            instance_id = "server-%d" % instance_num
            compute_num = compute_nums[instance_num % self.options.computes]
            hypervisor_id = "hypervisor-compute%d.1" % compute_num
            hypervisor_instances[hypervisor_id].append(instance_id)

        for hypervisor_id, instance_ids in hypervisor_instances.iteritems():
            objmaps.append(
                ObjectMap(modname=
                          "ZenPacks.zenoss.OpenStackInfrastructure.Hypervisor",
                          compname="components/%s" % hypervisor_id,
                          data=dict(id=hypervisor_id,
                                    set_instances=instance_ids)))

        print "Applying datamaps (2/2) (%d objects)" % len(objmaps)
        adm = ApplyDataMap()
        for objmap in objmaps:
            adm._applyDataMap(device, objmap)

        print "Committing model changes."
        transaction.commit()
    def process(self, device, results, log):
        log.info("Processing %s for device %s", self.name(), device.id)
        getdata, tabledata = results
        maps = []

        tempRelMap = RelationshipMap(
            relname='raritanTemperatureSensors',
            compname=self.compname,
            modname='ZenPacks.community.Raritan.RaritanTemperatureSensor')
        humidRelMap = RelationshipMap(
            relname='raritanHumiditySensors',
            compname=self.compname,
            modname='ZenPacks.community.Raritan.RaritanHumiditySensor')
        onOffRelMap = RelationshipMap(
            relname='raritanOnOffSensors',
            compname=self.compname,
            modname='ZenPacks.community.Raritan.RaritanOnOffSensor')

        for snmpindex, row in tabledata.get('externalSensor', {}).items():
            sensor_type = row.get('externalSensorType')
            name = row.get('externalSensorName')

            log.info('index: {} - type: {} - name: {}'.format(
                snmpindex, sensor_type, name))

            description = row.get('externalSensorDescription')
            title = name
            if description:
                title = '{} ({})'.format(title, description)

            # Next two cases to merge into one ?
            #
            if sensor_type == 10:  # Temperature sensor
                log.debug('Found temp sensor:{}'.format(name))
                tempRelMap.append(
                    ObjectMap(
                        compname=self.compname,
                        modname=
                        'ZenPacks.community.Raritan.RaritanTemperatureSensor',
                        data={
                            'id':
                            self.prepId(name),
                            'title':
                            title,
                            'snmpindex':
                            snmpindex.strip('.'),
                            'serial':
                            row.get('externalSensorSerialNumber', ''),
                            'sensor_type':
                            row.get('externalSensorType', ''),
                            'sensor_units':
                            row.get('externalSensorUnits', ''),
                            'sensor_digits':
                            row.get('externalSensorDecimalDigits', ''),
                            'port':
                            row.get('externalSensorPort', ''),
                        }))
            elif sensor_type == 11:  # Humidity sensor
                log.debug('Found humid sensor:{}'.format(name))
                humidRelMap.append(
                    ObjectMap(
                        compname=self.compname,
                        modname=
                        'ZenPacks.community.Raritan.RaritanHumiditySensor',
                        data={
                            'id':
                            self.prepId(name),
                            'title':
                            title,
                            'snmpindex':
                            snmpindex.strip('.'),
                            'serial':
                            row.get('externalSensorSerialNumber', ''),
                            'sensor_type':
                            row.get('externalSensorType', ''),
                            'sensor_units':
                            row.get('externalSensorUnits', ''),
                            'sensor_digits':
                            row.get('externalSensorDecimalDigits', ''),
                            'port':
                            row.get('externalSensorPort', ''),
                        }))
            elif sensor_type == 14:  # Humidity sensor
                log.debug('Found OnOff sensor:{}'.format(name))
                onOffRelMap.append(
                    ObjectMap(
                        compname=self.compname,
                        modname='ZenPacks.community.Raritan.RaritanOnOffSensor',
                        data={
                            'id':
                            self.prepId(name),
                            'title':
                            title,
                            'snmpindex':
                            snmpindex.strip('.'),
                            'serial':
                            row.get('externalSensorSerialNumber', ''),
                            'sensor_type':
                            row.get('externalSensorType', ''),
                            'sensor_units':
                            row.get('externalSensorUnits', ''),
                            'sensor_digits':
                            row.get('externalSensorDecimalDigits', ''),
                            'port':
                            row.get('externalSensorPort', ''),
                        }))
        maps.extend([tempRelMap, humidRelMap, onOffRelMap])

        return maps
    def processTblSupplies(self, tblSupplies, tblColors, log):

        # initialize seperate maps for toners and other supplies
        # use RelationshipMap() because I want to specify the relationship since there's only 1 modeler
        # for more components
        mapSupplies = RelationshipMap(modname='ZenPacks.TwoNMS.PrinterMIB.PrinterSupply', relname='printermibsupply')
        mapToners = RelationshipMap(modname='ZenPacks.TwoNMS.PrinterMIB.PrinterToner', relname='printermibtoner')


        # simplify the tblColors map to make the code easier to read
        colors = {}
        for cId, cInfo in tblColors.iteritems():
            colors[str(cId)] = cInfo['prtMarkerColorantValue'].split("\x00")[0]
        log.debug("colors table = %s", colors)

        # go over each supply and classifiy as toner (ink cartridge) or other supply
        for supplyId, supplyData in tblSupplies.iteritems():

            # create a temp map first because we don't know yet what kind of supply we have
            mapTemp = self.objectMap(supplyData)
            mapTemp.id = self.prepId(supplyId)
            isToner = False

            # check if it's a toner or other supply, color toners have prtMarkerSuppliesColorantIndex > 0
            # translate the color id
            try:
                if mapTemp.prtMarkerSuppliesColorantIndex > 0:
                    isToner = True
                    # overwrite the index with the color value
                    if (colors[str(mapTemp.prtMarkerSuppliesColorantIndex)] != None):
                        mapTemp.prtMarkerSuppliesColorantValue = colors[str(mapTemp.prtMarkerSuppliesColorantIndex)]
                        mapTemp.rgbColorCode = self.rgbColorCodes[mapTemp.prtMarkerSuppliesColorantValue.lower()]
                    else:
                        mapTemp.prtMarkerSuppliesColorantValue = self.PrtConsoleColorTC['na']
            except (AttributeError, KeyError):
                log.warn("AttributeErorr or KeyError occurred - Supply does not support the prtMarkerSuppliesColorantIndex oid")
                mapTemp.prtMarkerSuppliesColorantValue = self.PrtConsoleColorTC['na']
                mapTemp.rgbColorCode = self.rgbColorCodes['na']
                #continue
            except:
                log.warn("Unknown error occurred")
                mapTemp.prtMarkerSuppliesColorantValue = self.PrtConsoleColorTC['na']
                mapTemp.rgbColorCode = self.rgbColorCodes['na']
                #continue

            # translate the supply unit type id
            try:
                if (self.PrtMarkerSuppliesSupplyUnitTC[str(mapTemp.prtMarkerSuppliesSupplyUnitTC)] != None):
                    mapTemp.prtMarkerSuppliesSupplyUnit = self.PrtMarkerSuppliesSupplyUnitTC[str(mapTemp.prtMarkerSuppliesSupplyUnitTC)]
            except AttributeError:
                log.warn("Supply does not support the prtMarkerSuppliesSupplyUnitTC oid")
                mapTemp.prtMarkerSuppliesSupplyUnit = self.PrtMarkerSuppliesSupplyUnitTC['na']
                #continue

            # translate the supply type id
            try:
                if (self.PrtMarkerSuppliesTypeTC[str(mapTemp.prtMarkerSuppliesTypeTC)] != None):
                    mapTemp.prtMarkerSuppliesType = self.PrtMarkerSuppliesTypeTC[str(mapTemp.prtMarkerSuppliesTypeTC)]
            except AttributeError:
                log.warn("Supply does not support the prtMarkerSuppliesTypeTC oid")
                mapTemp.prtMarkerSuppliesType = self.PrtMarkerSuppliesTypeTC['na']
                #continue

            # add a percentage value of the usage
            try:
                mapTemp.usagepct = self.calculateUsagePct(mapTemp.prtMarkerSuppliesLevel, mapTemp.prtMarkerSuppliesMaxCapacity, log)
            except:
                mapTemp.usagepct = 'na'

            # add the temp map to the toner or supply map
            if (isToner == True):
                mapTemp.modname = "ZenPacks.TwoNMS.PrinterMIB.PrinterToner"
                mapTemp.supplyId = mapTemp.id
                mapTemp.snmpindex = mapTemp.id
                log.debug("New toner found: %s", mapTemp)
                mapToners.append(mapTemp)
            else:
                mapTemp.modname = "ZenPacks.TwoNMS.PrinterMIB.PrinterSupply"
                mapTemp.supplyId = mapTemp.id
                mapTemp.snmpindex = mapTemp.id
                log.debug("New supply found: %s", mapTemp)
                mapSupplies.append(mapTemp)

        return mapSupplies, mapToners
    def process(self, device, results, log):
        """Process results. Return iterable of datamaps or None."""

        maps = list()

        # ZoneMinder daemon (getVersion.json & configs.json)
        daemon = dict()

        # Expecting results to be a dict
        for key in ['url', 'version', 'apiversion']:
            daemon[key] = results.get(key)

        for item in results.get('configs', list()):
            config = item['Config']
            key = config['Name'].title().replace('_', '')
            value = config['Value']
            daemon[key] = value

        booleans = ['ZmOptControl', 'ZmOptFfmpeg', 'ZmOptUseEventnotification']
        for key in booleans:
            if key in daemon:
                daemon[key] = True if daemon[key] == '1' else False

        daemon['title'] = 'ZoneMinder'
        daemon['id'] = self.prepId(daemon['title'])

        rm = RelationshipMap(relname='zoneMinder',
                             modname='ZenPacks.daviswr.ZoneMinder.ZoneMinder')
        rm.append(
            ObjectMap(modname='ZenPacks.daviswr.ZoneMinder.ZoneMinder',
                      data=daemon))
        log.debug('%s ZoneMinder daemon:\n%s', device.id, rm)
        maps.append(rm)

        # Monitors
        rm = RelationshipMap(compname='zoneMinder/ZoneMinder',
                             relname='zmMonitors',
                             modname='ZenPacks.daviswr.ZoneMinder.ZMMonitor')

        ptz = dict()
        for item in results.get('controls', list()):
            control = item['Control']
            key = control['Id']
            value = '{0} {1}'.format(control['Name'], control['Type'])
            ptz[key] = value

        ignore_ids = getattr(device, 'zZoneMinderIgnoreMonitorId', list())
        ignore_names = getattr(device, 'zZoneMinderIgnoreMonitorName', '')
        ignore_host = getattr(device, 'zZoneMinderIgnoreMonitorHostname', '')

        for item in results.get('monitors', list()):
            monitor = item['Monitor']
            monitor_id = monitor.get('Id') \
                or (int(monitor.get('Sequence')) + 1)
            monitor_name = monitor.get('Name') or monitor_id
            monitor['id'] = self.prepId('zmMonitor{0}'.format(monitor_id))
            monitor['title'] = monitor_name

            if ignore_ids and monitor_id in ignore_ids:
                log.info(
                    '%s: Skipping monitor %s in zZoneMinderIgnoreMonitorId',
                    device.id, monitor_id)
                continue
            elif ignore_names and re.search(ignore_names, monitor_name):
                log.info('%s: Skipping %s in zZoneMinderIgnoreMonitorName',
                         device.id, monitor_name)
                continue

            # We may or may not have a hostname/IP, port, protocol,
            # path, or full URL. Some of these may have passwords.
            full_url_regex = r'([A-Za-z]+):\/\/([^/]+)(\/.*)'
            path = monitor.get('Path', '')
            path_url_match = re.match(full_url_regex, path)
            if path_url_match:
                # Path attribute is a full URL with protocol, host, and port
                # so it's more believable than the individual attributes
                log.debug('%s: Path is full URL: %s', device.id, path)
                protocol = path_url_match.groups()[0]
                host_string = path_url_match.groups()[1]
                url_path = path_url_match.groups()[2]
            else:
                log.debug('%s: Path is NOT a full URL: %s', device.id, path)
                protocol = monitor.get('Protocol', '')
                host_string = monitor.get('Host', '')
                url_path = monitor.get('Path', '')

            if '@' in host_string:
                log.debug('%s: Credentials found in host: %s', device.id,
                          host_string)
                (credentials, host_string) = host_string.split('@')
                url_path = url_path.replace(credentials + '@', '')
                path = path.replace(credentials + '@', '')

            if ':' in host_string:
                log.debug('%s: Port found in host: %s', device.id, host_string)
                (host, port) = host_string.split(':')
            else:
                host = host_string
                port = monitor.get('Port', '')

            protocol_port = {
                'http': '80',
                'https': '443',
                'rtsp': '554',
            }

            # Invert the dictionary
            port_protocol = dict(
                (value, key) for key, value in protocol_port.items())

            if not path_url_match:
                path = '{0}://{1}:{2}{3}'.format(protocol, host, port,
                                                 url_path)
                log.debug('%s: Assembled URL %s', device.id, path)

                if port:
                    if (not protocol
                            or protocol != port_protocol.get(port, protocol)):
                        protocol = port_protocol.get(port, protocol)
                        log.debug('%s: Fixing protocol: %s', device.id,
                                  protocol)
                elif protocol:
                    port = protocol_port.get(protocol, port)
                    log.debug('%s: Fixing port: %s', device.id, port)

            # Getting protocol from the full URL path
            elif port != protocol_port.get(protocol, port):
                port = protocol_port.get(protocol, port)

            url_pass_mid_regex = r'(\S+)passw?o?r?d?=[^_&?]+[_&?](\S+)'
            url_pass_end_regex = r'(\S+)passw?o?r?d?=[^_&?]+'
            url_pass_match = (re.match(url_pass_mid_regex, path)
                              or re.match(url_pass_end_regex, path))
            if url_pass_match:
                log.debug('%s: Remove password from URL %s', device.id, path)
                path = url_pass_match.groups()[0]
                if len(url_pass_match.groups()) > 1:
                    path += url_pass_match.groups()[1]

            monitor['Path'] = path
            monitor['Host'] = host
            monitor['Port'] = port
            monitor['Protocol'] = protocol.upper()

            ignore_match = re.search(ignore_host, host)
            if ignore_host and ignore_match:
                log.info('%s: Skipping %s in zZoneMinderIgnoreMonitorHostname',
                         device.id, host)
                continue

            monitor['MonitorType'] = monitor.get('Type')
            if 'Ffmpeg' == monitor['MonitorType']:
                monitor['MonitorType'] = 'FFmpeg'

            integers = [
                'ServerId',
                'Port',
                'Width',
                'Height',
            ]

            for key in integers:
                value = monitor.get(key, None)
                monitor[key] = int(value) if value else 0

            color_map = {
                '1': '8-bit grayscale',
                '2': '24-bit color',
                '3': '32-bit color',
                '4': '32-bit color',
            }

            monitor['Color'] = color_map.get(monitor.get('Colours', 0),
                                             'Unknown')

            if monitor['Width'] > 0 and monitor['Height'] > 0:
                monitor['Resolution'] = '{0}x{1}'.format(
                    monitor['Width'], monitor['Height'])
            else:
                monitor['Resolution'] = ''

            floats = [
                'MaxFPS',
                'AlarmMaxFPS',
            ]

            for key in floats:
                monitor[key] = float(monitor.get(key, 0.0)) \
                    if monitor.get(key, None) \
                    else 0.0

            booleans = [
                'Enabled',
                'Controllable',
            ]

            for key in booleans:
                monitor[key] = True if monitor.get(key, '0') == '1' else False

            if (monitor['Controllable']
                    and monitor.get('ControlId', '0') != '0'):
                monitor['ControlId'] = ptz.get(monitor['ControlId'])
            else:
                monitor['ControlId'] = 'None'

            rm.append(
                ObjectMap(modname='ZenPacks.daviswr.ZoneMinder.ZMMonitor',
                          data=monitor))
            log.debug('%s ZoneMinder monitor:\n%s', device.id, rm)
        maps.append(rm)

        # Storage Volumes
        rm = RelationshipMap(compname='zoneMinder/ZoneMinder',
                             relname='zmStorage',
                             modname='ZenPacks.daviswr.ZoneMinder.ZMStorage')
        ignore_ids = getattr(device, 'zZoneMinderIgnoreStorageId', list())
        ignore_names = getattr(device, 'zZoneMinderIgnoreStorageName', '')
        ignore_paths = getattr(device, 'zZoneMinderIgnoreStoragePath', '')

        volumes = results.get('volumes', dict())

        # Combine storage info from API with that scraped from Console
        for item in results.get('storage', list()):
            store = item['Storage']
            if store['Name'] in volumes:
                volumes[store['Name']].update(store)
            # Scraping failed
            else:
                volumes[store['Name']] = store

        for store_name in volumes:
            store = volumes[store_name]
            store_id = store.get('Id')
            store_path = store.get('Path')
            store['id'] = self.prepId('zmStorage_{0}'.format(store_name))
            store['title'] = store_name

            if ignore_ids and store_id in ignore_ids:
                log.info(
                    '%s: Skipping storage %s in zZoneMinderIgnoreStorageId',
                    device.id, store_id)
                continue
            elif ignore_names and re.search(ignore_names, store_name):
                log.info('%s: Skipping %s in zZoneMinderIgnoreStorageName',
                         device.id, monitor_name)
                continue
            elif ignore_paths and re.search(ignore_paths, store_path):
                log.info('%s: Skipping %s in zZoneMinderIgnoreStoragePath',
                         device.id, monitor_name)
                continue

            if 'total' in store:
                store['DiskSpace'] = store['total']
            else:
                # DiskSpace is actually space used by events
                del store['DiskSpace']

            store['StorageType'] = store.get('Type', None)

            rm.append(
                ObjectMap(modname='ZenPacks.daviswr.ZoneMinder.ZMStorage',
                          data=store))
            log.debug('%s ZoneMinder storage:\n%s', device.id, rm)
        maps.append(rm)

        return maps
        rm = RelationshipMap()
        rm.compname = compname
        rm.relname = 'plexs'
        rm.modname = 'ZenPacks.CS.NetApp.CMode.Plex'
        rm.classname = 'Plex'

        for record in response['records']:
            om = ObjectMap()
            om.modname = 'ZenPacks.CS.NetApp.CMode.Plex'
            om.id = self.prepId(record['name'])
            om.plex_name = record['name']
            om.online = record['online']
            om.plex_state = record['state']
            om.pool = record['pool']
            om.resync = record['resync']['active']
            rm.append(om)

            compname = '{parent}/plexs/{id}'.format(parent=compname, id=om.id)
            raidgrouprm, diskrm = self.raidgroups(record['raid_groups'],
                                                  compname, log)

        returnValue((rm, raidgrouprm, diskrm))

    @inlineCallbacks
    def volumes(self, device, uuid, baseUrl, auth, compname, log):
        try:
            response = yield getPage(
                '{url}/storage/volumes?fields=aggregates,name,uuid,space.size,state,style,tiering,type,svm.name,snapshot_policy.name,nas.path,clone,space.available,space.used,space.over_provisioned,space.snapshot.reserve_percent,nas.security_style&return_records=true&return_timeout=15'
                .format(url=baseUrl, oid=uuid),
                headers=auth)
            response = json.loads(response)
Example #20
0
    def process(self, device, results, log):
        log.info(
            "Modeler %s processing data for device %s",
            self.name(),
            device.id
            )
        maps = list()

        pools = dict()

        get_regex = r'^(?P<ds>\S+)\t(?P<key>\S+)\t(?P<value>\S+)\t\S+$'

        for line in results.splitlines():
            get_match = re.match(get_regex, line)

            if get_match:
                ds = get_match.group('ds')
                pool = ds.split('/')[0]
                key = get_match.group('key')
                value = get_match.group('value')
                if pool not in pools:
                    pools[pool] = dict()
                if ds not in pools[pool]:
                    pools[pool][ds] = dict()
                if value.endswith('%') or re.match(r'^\d+\.\d{2}x$', value):
                    value = value[:-1]
                elif value == '-':
                    value = None
                elif key == 'type':
                    pools[pool][ds]['zDsType'] = value
                    continue
                pools[pool][ds][key] = value

        booleans = [
            'atime',
            'defer_destroy',
            'mounted',
            'nbmand',
            'overlay',
            'relatime',
            'setuid',
            'utf8only',
            'vscan',
            'zoned',
            ]

        floats = [
            'compressratio',
            'refcompressratio',
            ]

        ints = [
            'available',
            'copies',
            'filesystem_count',
            'filesystem_limit',
            'logicalreferenced',
            'logicalused',
            'quota',
            'recordsize',
            'referenced',
            'refquota',
            'refreservation',
            'reservation',
            'snapshot_count',
            'snapshot_limit',
            'used',
            'usedbychildren',
            'usedbydataset',
            'usedbyrefreservation',
            'usedbysnapshots',
            'userrefs',
            'volblocksize',
            'volsize',
            'written',
            ]

        times = [
            'creation',
            ]

        prefixes = {
            'filesystem': 'fs',
            'volume': 'vol',
            'snapshot': 'snap'
            }

        suffixes = {
            'filesystem': '',
            'volume': 'Vol',
            'snapshot': 'Snap'
            }

        time_format = '%Y-%m-%d %H:%M:%S'

        rm = RelationshipMap(
            relname='zfsdatasets',
            modname='ZenPacks.daviswr.ZFS.ZFSDataset'
            )

        ignore_names_regex = getattr(device, 'zZFSDatasetIgnoreNames', '')
        if ignore_names_regex:
            log.info('zZFSDatasetIgnoreNames set to %s', ignore_names_regex)
        ignore_types = getattr(device, 'zZFSDatasetIgnoreTypes', list())
        if ignore_types:
            log.info('zZFSDatasetIgnoreTypes set to %s', str(ignore_types))
        ignore_pools_regex = getattr(device, 'zZPoolIgnoreNames', '')
        if ignore_pools_regex:
            log.info('zZPoolIgnoreNames set to %s', ignore_pools_regex)

        # Dataset components
        for pool in pools:
            if ignore_pools_regex and re.match(ignore_pools_regex, pool):
                log.debug('Skipping pool %s due to zZPoolIgnoreNames', pool)
                continue

            rm = RelationshipMap(
                compname='zpools/pool_{0}'.format(pool),
                relname='zfsDatasets',
                modname='ZenPacks.daviswr.ZFS.ZFSDataset'
                )

            datasets = pools[pool]
            for ds in datasets:
                if ignore_names_regex and re.match(ignore_names_regex, ds):
                    log.debug(
                        'Skipping dataset %s due to zZFSDatasetIgnoreNames',
                        ds
                        )
                    continue
                elif ignore_types \
                        and datasets[ds].get('zDsType', '') in ignore_types:
                    log.debug(
                        'Skipping dataset %s due to zZFSDatasetIgnoreTypes',
                        ds
                        )
                    continue

                comp = dict()
                for key in datasets[ds]:
                    if key in booleans:
                        comp[key] = True \
                            if ('on' == datasets[ds][key]
                                or 'yes' == datasets[ds][key]) \
                            else False
                    elif key in floats:
                        comp[key] = float(datasets[ds][key])
                    elif key in ints:
                        comp[key] = int(datasets[ds][key])
                    elif key in times:
                        comp[key] = time.strftime(
                            time_format,
                            time.localtime(int(datasets[ds][key]))
                            )
                    else:
                        comp[key] = datasets[ds][key]
                prefix = prefixes.get(comp.get('zDsType'), '')
                suffix = suffixes.get(comp.get('zDsType'), 'Dataset')
                # Pool name should already be part of the dataset name,
                # making it unique
                comp['id'] = self.prepId('{0}_{1}'.format(prefix, ds))
                comp['title'] = ds
                log.debug('Found ZFS %s: %s', comp.get('type', ''), comp['id'])
                mod = 'ZenPacks.daviswr.ZFS.ZFS{0}'.format(suffix)
                rm.append(ObjectMap(
                    modname=mod,
                    data=comp
                    ))
            maps.append(rm)

        log.debug(
            'ZFS RelMap:\n%s',
            str(maps)
            )

        return maps
                    if added_count > 0:
                        log.info("  Added %d new objectmaps to %s" % (added_count, key))

        # Apply the objmaps in the right order.
        componentsMap = RelationshipMap(relname="components")
        for i in (
            "tenants",
            "regions",
            "flavors",
            "images",
            "servers",
            "zones",
            "hosts",
            "hypervisors",
            "services",
            "networks",
            "subnets",
            "routers",
            "ports",
            "agents",
            "floatingips",
        ):
            for objmap in objmaps[i]:
                componentsMap.append(objmap)

        endpointObjMap = ObjectMap(
            modname="ZenPacks.zenoss.OpenStackInfrastructure.Endpoint", data=dict(set_maintain_proxydevices=True)
        )

        return (componentsMap, endpointObjMap)
    def process(self, device, results, log):
        log.info('Modeler %s processing data for device %s',
                self.name(), device.id)

        getdata, tabledata = results
        sensor_count = sum([getdata[x] for x in getdata if 'Count' in x])

        maps = []

        # device-specific data
        manufacturer = 'Geist Manufacturing, Inc.'
        os_name = '%s %s' % (getdata['productTitle'], getdata['productVersion'])
        maps.append(ObjectMap(data={
            'sensor_count': sensor_count,
            'title': getdata['productFriendlyName'],
            'productUrl': getdata['productUrl'],
            'setHWProductKey': MultiArgs(getdata['productHardware'], manufacturer),
            'setOSProductKey': MultiArgs(os_name, manufacturer),
            }))

        # Components: climate sensors
        rm = RelationshipMap(
                relname='geistClimateSensors',
                modname='ZenPacks.crosse.Geist.Monitor.GeistClimateSensor',
                )
        for snmpindex, row in tabledata.get('climateTable', {}).items():
            serial = row.get('climateSerial')
            if not serial:
                log.warn('Skipping climate sensor with no serial')
                continue
            log.debug('Modeling climate sensor %s', serial)
            
            values = {k: row[k] for k in row}
            values['id'] = self.prepId(serial)
            values['title'] = values['climateName']
            values['snmpindex'] = snmpindex.strip('.')

            rm.append(ObjectMap(
                modname='ZenPacks.crosse.Geist.Monitor.GeistClimateSensor',
                data=values
                ))
        maps.append(rm)

        # Components: temperature sensors
        rm = RelationshipMap(
                relname='geistTemperatureSensors',
                modname='ZenPacks.crosse.Geist.Monitor.GeistTemperatureSensor',
                )
        for snmpindex, row in tabledata.get('tempSensorTable', {}).items():
            serial = row.get('tempSensorSerial')
            if not serial:
                log.warn('Skipping temperature sensor with no serial')
                continue
            log.debug('Modeling temperature sensor %s', serial)
            
            values = {k: row[k] for k in row}
            values['id'] = self.prepId(serial)
            values['title'] = values['tempSensorName']
            values['snmpindex'] = snmpindex.strip('.')

            rm.append(ObjectMap(
                modname='ZenPacks.crosse.Geist.Monitor.GeistTemperatureSensor',
                data=values
                ))
        maps.append(rm)

        # Components: airflow sensors
        rm = RelationshipMap(
                relname='geistAirflowSensors',
                modname='ZenPacks.crosse.Geist.Monitor.GeistAirflowSensor',
                )
        for snmpindex, row in tabledata.get('airFlowSensorTable', {}).items():
            serial = row.get('airFlowSensorSerial')
            if not serial:
                log.warn('Skipping airflow sensor with no serial')
                continue
            log.debug('Modeling airflow sensor %s', serial)
            
            values = {k: row[k] for k in row}
            values['id'] = self.prepId(serial)
            values['title'] = values['airFlowSensorName']
            values['snmpindex'] = snmpindex.strip('.')

            rm.append(ObjectMap(
                modname='ZenPacks.crosse.Geist.Monitor.GeistAirflowSensor',
                data=values
                ))
        maps.append(rm)

        return maps
    def processTblSupplies(self, tblSupplies, tblColors, log):

        # initialize seperate maps for toners and other supplies
        # use RelationshipMap() because I want to specify the relationship since there's only 1 modeler
        # for more components
        mapSupplies = RelationshipMap(
            modname='ZenPacks.TwoNMS.PrinterMIB.PrinterSupply',
            relname='printermibsupply')
        mapToners = RelationshipMap(
            modname='ZenPacks.TwoNMS.PrinterMIB.PrinterToner',
            relname='printermibtoner')

        # simplify the tblColors map to make the code easier to read
        colors = {}
        for cId, cInfo in tblColors.iteritems():
            colors[str(cId)] = cInfo['prtMarkerColorantValue'].split("\x00")[0]
        log.debug("colors table = %s", colors)

        # go over each supply and classifiy as toner (ink cartridge) or other supply
        for supplyId, supplyData in tblSupplies.iteritems():

            # create a temp map first because we don't know yet what kind of supply we have
            mapTemp = self.objectMap(supplyData)
            mapTemp.id = self.prepId(supplyId)
            isToner = False

            # check if it's a toner or other supply, color toners have prtMarkerSuppliesColorantIndex > 0
            # translate the color id
            try:
                if mapTemp.prtMarkerSuppliesColorantIndex > 0:
                    isToner = True
                    # overwrite the index with the color value
                    if (colors[str(mapTemp.prtMarkerSuppliesColorantIndex)] !=
                            None):
                        mapTemp.prtMarkerSuppliesColorantValue = colors[str(
                            mapTemp.prtMarkerSuppliesColorantIndex)]
                        mapTemp.rgbColorCode = self.rgbColorCodes[
                            mapTemp.prtMarkerSuppliesColorantValue.lower()]
                    else:
                        mapTemp.prtMarkerSuppliesColorantValue = self.PrtConsoleColorTC[
                            'na']
            except (AttributeError, KeyError):
                log.warn(
                    "AttributeErorr or KeyError occurred - Supply does not support the prtMarkerSuppliesColorantIndex oid"
                )
                mapTemp.prtMarkerSuppliesColorantValue = self.PrtConsoleColorTC[
                    'na']
                mapTemp.rgbColorCode = self.rgbColorCodes['na']
                #continue
            except:
                log.warn("Unknown error occurred")
                mapTemp.prtMarkerSuppliesColorantValue = self.PrtConsoleColorTC[
                    'na']
                mapTemp.rgbColorCode = self.rgbColorCodes['na']
                #continue

            # translate the supply unit type id
            try:
                if (self.PrtMarkerSuppliesSupplyUnitTC[str(
                        mapTemp.prtMarkerSuppliesSupplyUnitTC)] != None):
                    mapTemp.prtMarkerSuppliesSupplyUnit = self.PrtMarkerSuppliesSupplyUnitTC[
                        str(mapTemp.prtMarkerSuppliesSupplyUnitTC)]
            except AttributeError:
                log.warn(
                    "Supply does not support the prtMarkerSuppliesSupplyUnitTC oid"
                )
                mapTemp.prtMarkerSuppliesSupplyUnit = self.PrtMarkerSuppliesSupplyUnitTC[
                    'na']
                #continue

            # translate the supply type id
            try:
                if (self.PrtMarkerSuppliesTypeTC[str(
                        mapTemp.prtMarkerSuppliesTypeTC)] != None):
                    mapTemp.prtMarkerSuppliesType = self.PrtMarkerSuppliesTypeTC[
                        str(mapTemp.prtMarkerSuppliesTypeTC)]
            except AttributeError:
                log.warn(
                    "Supply does not support the prtMarkerSuppliesTypeTC oid")
                mapTemp.prtMarkerSuppliesType = self.PrtMarkerSuppliesTypeTC[
                    'na']
                #continue

            # add a percentage value of the usage
            try:
                mapTemp.usagepct = self.calculateUsagePct(
                    mapTemp.prtMarkerSuppliesLevel,
                    mapTemp.prtMarkerSuppliesMaxCapacity, log)
            except:
                mapTemp.usagepct = 'na'

            # add the temp map to the toner or supply map
            if (isToner == True):
                mapTemp.modname = "ZenPacks.TwoNMS.PrinterMIB.PrinterToner"
                mapTemp.supplyId = mapTemp.id
                mapTemp.snmpindex = mapTemp.id
                log.debug("New toner found: %s", mapTemp)
                mapToners.append(mapTemp)
            else:
                mapTemp.modname = "ZenPacks.TwoNMS.PrinterMIB.PrinterSupply"
                mapTemp.supplyId = mapTemp.id
                mapTemp.snmpindex = mapTemp.id
                log.debug("New supply found: %s", mapTemp)
                mapSupplies.append(mapTemp)

        return mapSupplies, mapToners
Example #24
0
    def process(self, device, results, log):
        log.info('processing %s for device %s', self.name(), device.id)
        maps = list()
        """ Example output through 10.12

        caching:ReservedVolumeSpace = 25000000000
        caching:LogClientIdentity = yes
        caching:CacheLimit = 70000000000
        caching:ServerRoot = "/Library/Server"
        caching:ServerGUID = "02FE97F2-41F3-4CEE-9899-27976DB91A1A"
        caching:DataPath = "/Library/Server/Caching/Data"
        caching:LocalSubnetsOnly = yes
        caching:Port = 0
        caching:CacheLimit = 70000000000
        caching:StartupStatus = "OK"
        caching:RegistrationStatus = 1
        caching:CacheFree = 52754638336
        caching:PersonalCacheUsed = 0
        caching:TotalBytesDropped = 0
        caching:CacheStatus = "OK"
        caching:TotalBytesStoredFromOrigin = 419351941
        caching:state = "RUNNING"
        caching:Port = 49232
        caching:Peers:_array_index:0:address = "aaa.bbb.ccc.ddd"
        caching:Peers:_array_index:0:port = 49094
        caching:Peers:_array_index:0:details:capabilities:ur = yes
        caching:Peers:_array_index:0:details:capabilities:sc = yes
        caching:Peers:_array_index:0:details:capabilities:pc = no
        caching:Peers:_array_index:0:details:capabilities:im = no
        caching:Peers:_array_index:0:details:capabilities:ns = yes
        caching:Peers:_array_index:0:details:capabilities:query-parameters = yes  # noqa
        caching:Peers:_array_index:0:details:cache-size = 900000000000
        caching:Peers:_array_index:0:details:ac-power = yes
        caching:Peers:_array_index:0:details:is-portable = no
        caching:Peers:_array_index:0:details:local-network:_array_index:0:speed = 1000  # noqa
        caching:Peers:_array_index:0:details:local-network:_array_index:0:wired = yes  # noqa
        caching:Peers:_array_index:0:healthy = yes
        caching:Peers:_array_index:0:version = "161"
        caching:Peers:_array_index:0:friendly = yes
        caching:Peers:_array_index:0:guid = "9B9CDED4-F70C-4910-B7D4-11D1530AD34D"  # noqa
        caching:TotalBytesStoredFromPeers = 0
        caching:RestrictedMedia = no
        caching:CacheDetails:_array_index:0:BytesUsed = 0
        caching:CacheDetails:_array_index:0:LocalizedType = "Mac Software"
        caching:CacheDetails:_array_index:0:MediaType = "Mac Software"
        caching:CacheDetails:_array_index:0:Language = "en"
        caching:CacheDetails:_array_index:1:BytesUsed = 419351941
        caching:CacheDetails:_array_index:1:LocalizedType = "iOS Software"
        caching:CacheDetails:_array_index:1:MediaType = "iOS Software"
        caching:CacheDetails:_array_index:1:Language = "en"
        ...
        caching:PersonalCacheLimit = 70000000000
        caching:CacheUsed = 419351941
        caching:TotalBytesStored = 419351941
        caching:TotalBytesImported = 0
        caching:PersonalCacheFree = 52754638336
        caching:Active = yes
        caching:TotalBytesReturned = 476014159
        """

        # Parse results
        service = dict()
        caches = dict()
        peers = dict()
        lines = results.splitlines()

        # Legacy output
        if not results.startswith('{'):
            output = dict(line.split(' = ') for line in lines)
            for key in output:
                if key.startswith('caching:CacheDetails:'):
                    short = key.replace('caching:CacheDetails:_array_index:',
                                        '')
                    idx = int(short.split(':')[0])
                    k = short.split(':')[1]
                    v = output.get(key).replace('"', '')
                    if idx not in caches:
                        caches[idx] = dict()
                    caches[idx].update({k: v})
                elif key.startswith('caching:Peers:'):
                    short = key.replace('caching:Peers:_array_index:', '')
                    short = short.replace('details:', '')
                    if ('capabilities' not in key
                            and 'local-network' not in key):
                        idx = int(short.split(':')[0])
                        k = short.split(':')[1]
                        v = output.get(key).replace('"', '')
                        if idx not in peers:
                            peers[idx] = dict()
                        peers[idx].update({k: v})
                else:
                    k = key.split(':')[1]
                    service.update({k: output.get(key).replace('"', '')})

        # JSON output
        else:
            for line in lines:
                output = json.loads(line)
                service.update(output.get('result', dict()))

                # Mimic structure of legacy output
                keys = service.get('CacheDetails', dict()).keys()
                for idx in range(0, len(keys)):
                    value = service.get('CacheDetails', dict()).get(keys[idx])
                    caches[idx] = {
                        'MediaType': keys[idx],
                        'BytesUsed': value,
                    }
                    if len(keys) - 1 == idx:
                        break

                # Settings output has an element named "Parents" as well
                if output.get('name', 'status') != 'settings':
                    peer_count = 0
                    for peer in service.get('Peers', list()):
                        peers[peer_count] = peer
                        peer_count += 1
                    for peer in service.get('Parents', list()):
                        peer['is-parent'] = True
                        peers[peer_count] = peer
                        peer_count += 1
                    for idx in peers:
                        for attr in ('ac-power', 'cache-size', 'is-portable'):
                            if attr in peers[idx]['details']:
                                peers[idx][attr] = peers[idx]['details'][attr]

        # Prevent existing components from being removed if service is down
        if service.get('StartupStatus', '') != 'OK':
            return None

        # Caching Service
        booleans = [
            'Active',
            'AllowPersonalCaching',
            'LocalSubnetsOnly',
            'LogClientIdentity',
            'RestrictedMedia',
        ]

        for attr in booleans:
            if attr in service and not isinstance(service[attr], bool):
                service[attr] = True if 'yes' == service[attr] else False

        integers = [
            'CacheFree',
            'CacheLimit',
            'CacheUsed',
            'Port',
            'ReservedVolumeSpace',
        ]

        for attr in integers:
            if attr in service and not isinstance(service[attr], int):
                service[attr] = int(service[attr])

        try:
            # Fixups for unconfigured Cache Limit and negative Cache Free
            cache_limit = service.get('CacheLimit', 0)
            cache_used = service.get('CacheUsed', 0)
            cache_free = service.get('CacheFree', 0)
            log.debug('CacheLimit is: %s', str(cache_limit))
            if cache_free < 0:
                log.debug('Negative CacheFree value: %s', str(cache_free))
                cache_limit = cache_used if cache_limit == 0 else cache_limit
                cache_avail = cache_limit - cache_used
                # CacheLimit - CacheUsed > space available on disk
                # so CacheFree value is negative
                cache_free = cache_avail + cache_free if cache_avail > 0 \
                    else 0
            else:
                cache_limit = cache_used + cache_free if cache_limit == 0 \
                    else cache_limit
                cache_avail = cache_limit - cache_used
                # Unsure what CacheFree of 10 MB means when there's
                # a 20 GB difference between CacheLimit and CacheUsed
                cache_free = cache_avail if cache_avail > cache_free \
                    else cache_free
            log.debug('New CacheLimit: %s', str(cache_limit))
            log.debug('New CacheFree: %s', str(cache_free))
            service['CacheLimit'] = cache_limit
            service['CacheFree'] = cache_free

        except Exception:
            log.exception('Error in CacheLimit & CacheFree fixup')
            # More realistic Cache Limit value if configured to "unlimited"
            if service.get('CacheLimit', 0) == 0:
                service['CacheLimit'] = service.get('CacheLimit', 0)
                service['CacheLimit'] += service.get('CacheUsed', 0)
                service['CacheLimit'] += service.get('CacheFree', 0)

        service['id'] = self.prepId('CachingService')
        service['title'] = service.get('DataPath', 'Content Caching')

        log.debug('Caching Service\n%s', service)

        rm = RelationshipMap(
            relname='contentCachingService',
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCachingService'
        )
        rm.append(
            ObjectMap(
                modname=
                'ZenPacks.daviswr.OSX.Server.Caching.ContentCachingService',  # noqa
                data=service))
        maps.append(rm)

        # Individual Cache components
        rm = RelationshipMap(
            compname='contentCachingService/CachingService',
            relname='contentCaches',
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCache')

        for idx in caches:
            cache = caches.get(idx)
            if 'BytesUsed' in cache:
                cache['BytesUsed'] = int(cache['BytesUsed'])
            cache['title'] = self.prepId(cache.get('MediaType', ''))
            cache['id'] = self.prepId(cache['title'])
            log.debug('Individual Cache: %s', cache)
            rm.append(
                ObjectMap(
                    modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCache',
                    data=cache))
        maps.append(rm)

        # Peer Server components
        rm = RelationshipMap(
            compname='contentCachingService/CachingService',
            relname='contentCachePeers',
            modname='ZenPacks.daviswr.OSX.Server.Caching.ContentCachePeer')

        peer_integers = [
            'cache-size',
            'port',
        ]
        peer_booleans = [
            'ac-power',
            'friendly',
            'healthy',
            'is-portable',
        ]

        for idx in peers:
            peer = peers.get(idx)
            for attr in peer_integers:
                if attr in peer and not isinstance(peer[attr], int):
                    peer[attr] = int(peer[attr])
            for attr in peer_booleans:
                if attr in peer and not isinstance(peer[attr], bool):
                    peer[attr] = True if 'yes' == peer[attr] else False
            peer['title'] = peer.get('address', peer.get('guid', ''))
            id_str = 'cachepeer_{0}'.format(
                peer.get('address', peer.get('guid', '')))
            peer['id'] = self.prepId(id_str)
            log.debug('Peer Caching Server: %s', peer)
            rm.append(
                ObjectMap(
                    modname=
                    'ZenPacks.daviswr.OSX.Server.Caching.ContentCachePeer',
                    data=peer))
        maps.append(rm)

        log.debug('%s RelMaps:\n%s', self.name(), str(maps))
        return maps
Example #25
0
    def process(self, device, results, log):
        """ Generates RelationshipMaps from Command output """
        log.info('Modeler %s processing data for device %s', self.name(),
                 device.id)
        maps = list()

        pools = dict()

        get_regex = r'^(?P<ds>\S+)\t(?P<key>\S+)\t(?P<value>\S+)\t\S+$'

        for line in results.splitlines():
            get_match = re.match(get_regex, line)

            if get_match:
                ds = get_match.group('ds')
                pool = ds.split('/')[0]
                key = get_match.group('key')
                value = get_match.group('value')
                if pool not in pools:
                    pools[pool] = dict()
                if ds not in pools[pool]:
                    pools[pool][ds] = dict()
                if value.endswith('%') or re.match(r'^\d+\.\d{2}x$', value):
                    value = value[:-1]
                elif value == '-':
                    value = None
                elif key == 'type':
                    pools[pool][ds]['zDsType'] = value
                    continue
                pools[pool][ds][key] = value

        booleans = [
            'atime',
            'defer_destroy',
            'mounted',
            'nbmand',
            'overlay',
            'relatime',
            'setuid',
            'utf8only',
            'vscan',
            'zoned',
        ]

        floats = [
            'compressratio',
            'refcompressratio',
        ]

        ints = [
            'available',
            'copies',
            'filesystem_count',
            'filesystem_limit',
            'logicalreferenced',
            'logicalused',
            'quota',
            'recordsize',
            'referenced',
            'refquota',
            'refreservation',
            'reservation',
            'snapshot_count',
            'snapshot_limit',
            'used',
            'usedbychildren',
            'usedbydataset',
            'usedbyrefreservation',
            'usedbysnapshots',
            'userrefs',
            'volblocksize',
            'volsize',
            'written',
        ]

        times = [
            'creation',
        ]

        prefixes = {'filesystem': 'fs', 'volume': 'vol', 'snapshot': 'snap'}

        suffixes = {'filesystem': '', 'volume': 'Vol', 'snapshot': 'Snap'}

        time_format = '%Y-%m-%d %H:%M:%S'

        rm = RelationshipMap(relname='zfsdatasets',
                             modname='ZenPacks.daviswr.ZFS.ZFSDataset')

        ignore_names_regex = getattr(device, 'zZFSDatasetIgnoreNames', '')
        if ignore_names_regex:
            log.info('zZFSDatasetIgnoreNames set to %s', ignore_names_regex)
        ignore_types = getattr(device, 'zZFSDatasetIgnoreTypes', list())
        if ignore_types:
            log.info('zZFSDatasetIgnoreTypes set to %s', str(ignore_types))
        ignore_pools_regex = getattr(device, 'zZPoolIgnoreNames', '')
        if ignore_pools_regex:
            log.info('zZPoolIgnoreNames set to %s', ignore_pools_regex)

        # Dataset components
        for pool in pools:
            if ignore_pools_regex and re.match(ignore_pools_regex, pool):
                log.debug('Skipping pool %s due to zZPoolIgnoreNames', pool)
                continue

            rm = RelationshipMap(compname='zpools/pool_{0}'.format(pool),
                                 relname='zfsDatasets',
                                 modname='ZenPacks.daviswr.ZFS.ZFSDataset')

            datasets = pools[pool]
            for ds in datasets:
                if ignore_names_regex and re.match(ignore_names_regex, ds):
                    log.debug(
                        'Skipping dataset %s due to zZFSDatasetIgnoreNames',
                        ds)
                    continue
                elif (ignore_types
                      and datasets[ds].get('zDsType', '') in ignore_types):
                    log.debug(
                        'Skipping dataset %s due to zZFSDatasetIgnoreTypes',
                        ds)
                    continue

                comp = dict()
                for key in datasets[ds]:
                    if key in booleans:
                        comp[key] = (True if datasets[ds][key]
                                     in ['on', 'yes'] else False)
                    elif key in floats:
                        comp[key] = float(datasets[ds][key])
                    elif key in ints:
                        comp[key] = int(datasets[ds][key])
                    elif key in times:
                        comp[key] = time.strftime(
                            time_format,
                            time.localtime(int(datasets[ds][key])))
                    elif 'encryption' == key and 'on' == datasets[ds][key]:
                        # https://docs.oracle.com/cd/E53394_01/html/E54801/gkkih.html  # noqa
                        # The default encryption algorithm is aes-128-ccm when
                        # a file system's encryption value is on.
                        comp[key] = 'aes-128-ccm'
                    else:
                        comp[key] = datasets[ds][key]
                prefix = prefixes.get(comp.get('zDsType'), '')
                suffix = suffixes.get(comp.get('zDsType'), 'Dataset')
                # Pool name should already be part of the dataset name,
                # making it unique
                comp['id'] = self.prepId('{0}_{1}'.format(prefix, ds))
                comp['title'] = ds
                log.debug('Found ZFS %s: %s', comp.get('type', ''), comp['id'])
                mod = 'ZenPacks.daviswr.ZFS.ZFS{0}'.format(suffix)
                rm.append(ObjectMap(modname=mod, data=comp))
            maps.append(rm)

        log.debug('ZFS RelMap:\n%s', str(maps))

        return maps