示例#1
0
def volumes_rm(region_id, volumes):
    '''
    Return volumes RelationshipMap given region_id and a VolumeInfo
    ResultSet.
    '''
    volume_data = []
    for volume in volumes:
        if volume.attach_data.instance_id:
            instance_id = prepId(volume.attach_data.instance_id)
        else:
            instance_id = None

        volume_data.append({
            'id': prepId(volume.id),
            'title': name_or(volume.tags, volume.id),
            'volume_type': volume.type,
            'create_time': format_time(volume.create_time),
            'size': format_size(volume.size),  # Min:1GiB, Max:1TiB
            'iops': volume.iops,
            'status': volume.status,
            'attach_data_status': volume.attach_data.status,
            'attach_data_devicepath': volume.attach_data.device,
            'setInstanceId': instance_id,
            'setZoneId': volume.zone,
        })

    return RelationshipMap(
        compname='regions/%s' % region_id,
        relname='volumes',
        modname=MODULE_NAME['EC2Volume'],
        objmaps=volume_data)
示例#2
0
    def onSuccess(self, result, config):
        log.debug('Success - result is {}'.format(result))
        map_status = {0: [0, 'OK'], 1: [3, 'unhealthy'], 2: [5, 'in failure']}
        items = result.get('items')
        data = self.new_data()
        for rproxy in items:
            for junction in rproxy.get('children', []):
                component = prepId('{}_{}'.format(rproxy['name'], junction['name']))
                value = int(junction['health'])
                data['values'][component]['jstatus_jstatus'] = value
                data['events'].append({
                    'device': config.id,
                    'component': component,
                    'severity': 2,
                    'measured_severity': map_status[value][0],
                    'eventKey': 'JStatus',
                    'eventClassKey': 'ISAMJStatusTest',
                    'summary': 'Junction {} - Status is {}'.format(junction['name'], map_status[value][1]),
                    'eventClass': '/Status/ISAMJunction',
                    'isamRP': prepId(rproxy['name']),
                    'isamJ': prepId(junction['name']),
                    'isamJS': None,
                })

        log.debug('JStatus data: {}'.format(data))
        return data
示例#3
0
    def add_maps(self, res, ds):
        """
        Check for added/removed regionservers and return a RelationshipMap if
        any changes took place. Otherwise return ObjectMap which only cleares
        the events of non-existiong components.
        """
        # Check for removed/added region servers.
        dead_nodes = [prepId(dead_node_name(node)[0]) for node in self.dead]
        live_nodes = [prepId(node['name']) for node in self.live]
        nodes = set(dead_nodes + live_nodes)
        self.added = list(nodes.difference(set(ds.regionserver_ids)))
        self.removed = list(set(ds.regionserver_ids).difference(nodes))

        # Check for removed/added regions.
        regions = set(
            region.get('name') for node in self.live
            for region in node.get('Region'))
        change = regions.symmetric_difference(ds.region_ids)
        # Remodel Regions and RegionServers only if some of them
        # were added/removed.
        if self.added or self.removed or change:
            ds.id = ds.device
            result = {'status': res, 'conf': None}
            return HBaseCollector().process(ds, result, log)
        # If nothing changed, just clear events.
        return [ObjectMap({'getClearEvents': True})]
示例#4
0
def snapshots_rm(region_id, snapshots, region_volume_ids):
    '''
    Return snapshots RelationshipMap given region_id and a Snapshot
    ResultSet.
    '''
    snapshot_data = []
    for snapshot in snapshots:
        if snapshot.volume_id:
            if snapshot.volume_id in region_volume_ids:
                volume_id = prepId(snapshot.volume_id)
            else:
                volume_id = None
        else:
            volume_id = None
        snapshot_data.append({
            'id': prepId(snapshot.id),
            'title': name_or(snapshot.tags, snapshot.id),
            'description': snapshot.description,
            'size': format_size(snapshot.volume_size),
            'status': snapshot.status,
            'progress': snapshot.progress,
            'start_time': format_time(snapshot.start_time),
            'setVolumeId': volume_id,
        })

    return RelationshipMap(
        compname='regions/%s' % region_id,
        relname='snapshots',
        modname=MODULE_NAME['EC2Snapshot'],
        objmaps=snapshot_data)
    def process(self, result):
        """
        Parses resulting data into datapoints.
        """
        data = json.loads(result)
        node_id, region_id = self.component.split(NAME_SPLITTER)
        res = {}

        for node in version_diff(data["LiveNodes"]):
            if node_id == prepId(node['name']):
                for region in node["Region"]:
                    if region_id == prepId(region['name']):
                        res = {
                            'read_requests': (0, 'N'),
                            'write_requests': (0, 'N'),
                            'number_of_stores': (0, 'N'),
                            'number_of_store_files': (0, 'N'),
                            'store_file_size_mb': (0, 'N'),
                            'store_file_index_size_mb': (0, 'N'),
                            'memstore_size_mb': (0, 'N'),
                            'current_compacted_kv': (0, 'N'),
                            'total_compacting_kv': (0, 'N'),
                        }
                        return sum_perf_metrics(res, region)
        return res
示例#6
0
def volumes_rm(region_id, volumes):
    '''
    Return volumes RelationshipMap given region_id and a VolumeInfo
    ResultSet.
    '''
    volume_data = []
    for volume in volumes:
        if volume.attach_data.instance_id:
            instance_id = prepId(volume.attach_data.instance_id)
        else:
            instance_id = None

        volume_data.append({
            'id': prepId(volume.id),
            'title': name_or(volume.tags, volume.id),
            'volume_type': volume.type,
            'create_time': volume.create_time,
            'size': volume.size / (1024**3),
            'iops': volume.iops,
            'status': volume.status,
            'attach_data_status': volume.attach_data.status,
            'attach_data_devicepath': volume.attach_data.device,
            'setInstanceId': instance_id,
            'setZoneId': volume.zone,
        })

    return RelationshipMap(compname='regions/%s' % region_id,
                           relname='volumes',
                           modname=MODULE_NAME['EC2Volume'],
                           objmaps=volume_data)
示例#7
0
def instances_rm(region_id, reservations):
    '''
    Return instances RelationshipMap given region_id and an InstanceInfo
    ResultSet.
    '''
    instance_data = []
    for instance in chain.from_iterable(r.instances for r in reservations):
        zone_id = prepId(instance.placement) if instance.placement else None
        subnet_id = prepId(instance.subnet_id) if instance.subnet_id else None

        instance_data.append({
            'id': prepId(instance.id),
            'title': name_or(instance.tags, instance.id),
            'instance_id': instance.id,
            'public_dns_name': instance.public_dns_name,
            'private_ip_address': instance.private_ip_address,
            'image_id': instance.image_id,
            'instance_type': instance.instance_type,
            'launch_time': instance.launch_time,
            'state': instance.state,
            'platform': getattr(instance, 'platform', ''),
            'detailed_monitoring': instance.monitored,
            'setZoneId': zone_id,
            'setVPCSubnetId': subnet_id,
        })

    return RelationshipMap(compname='regions/%s' % region_id,
                           relname='instances',
                           modname=MODULE_NAME['EC2Instance'],
                           objmaps=instance_data)
示例#8
0
def instances_rm(region_id, reservations):
    '''
    Return instances RelationshipMap given region_id and an InstanceInfo
    ResultSet.
    '''
    instance_data = []
    for instance in chain.from_iterable(r.instances for r in reservations):
        zone_id = prepId(instance.placement) if instance.placement else None
        subnet_id = prepId(instance.subnet_id) if instance.subnet_id else None

        instance_data.append({
            'id': prepId(instance.id),
            'title': name_or(instance.tags, instance.id),
            'instance_id': instance.id,
            'public_dns_name': instance.public_dns_name,
            'private_ip_address': instance.private_ip_address,
            'image_id': instance.image_id,
            'instance_type': instance.instance_type,
            'launch_time': instance.launch_time,
            'state': instance.state,
            'platform': getattr(instance, 'platform', ''),
            'detailed_monitoring': instance.monitored,
            'setZoneId': zone_id,
            'setVPCSubnetId': subnet_id,
            })

    return RelationshipMap(
        compname='regions/%s' % region_id,
        relname='instances',
        modname=MODULE_NAME['EC2Instance'],
        objmaps=instance_data)
示例#9
0
def get_instance_data(instance, image_ids):
    zone_id = prepId(instance.placement) if instance.placement else None
    subnet_id = prepId(instance.subnet_id) if instance.subnet_id else None

    if instance.image_id in image_ids:
        instance_image_id = instance.image_id
    else:
        instance_image_id = None

    return {
        'id': prepId(instance.id),
        'title': name_or(instance.tags, instance.id),
        'instance_id': instance.id,
        'tags': tags_string(instance.tags),
        'public_dns_name': instance.public_dns_name,
        'public_ip': instance.ip_address,
        'private_ip_address': instance.private_ip_address,
        'instance_type': instance.instance_type,
        'launch_time': format_time(instance.launch_time),
        'state': instance.state,
        'platform': getattr(instance, 'platform', ''),
        'detailed_monitoring': instance.monitored,
        'setZoneId': zone_id,
        'setImageId': instance_image_id,
        'setVPCSubnetId': subnet_id,
    }
    def add_maps(self, res, ds):
        """
        Check for added/removed regionservers and return a RelationshipMap if
        any changes took place. Otherwise return ObjectMap which only cleares
        the events of non-existiong components.
        """
        # Check for removed/added region servers.
        dead_nodes = [prepId(dead_node_name(node)[0]) for node in self.dead]
        live_nodes = [prepId(node['name']) for node in self.live]
        nodes = set(dead_nodes + live_nodes)
        self.added = list(nodes.difference(set(ds.regionserver_ids)))
        self.removed = list(set(ds.regionserver_ids).difference(nodes))

        # Check for removed/added regions.
        regions = set(region.get('name') for node in self.live
                      for region in node.get('Region'))
        change = regions.symmetric_difference(ds.region_ids)
        # Remodel Regions and RegionServers only if some of them
        # were added/removed.
        if self.added or self.removed or change:
            ds.id = ds.device
            result = {'status': res, 'conf': None}
            return HBaseCollector().process(ds, result, log)
        # If nothing changed, just clear events.
        return [ObjectMap({'getClearEvents': True})]
    def onSuccess(self, results, config):

        data = self.new_data()
        ds0 = config.datasources[0]
        serviceinfo = results[results.keys()[0]]
        eventClass = ds0.eventClass if ds0.eventClass else "/Status"
        eventKey = ds0.eventKey if ds0.eventKey else "WindowsService"

        if serviceinfo[0].State != ds0.params['alertifnot']:

            evtmsg = 'Service Alert: {0} has changed to {1} state'.format(
                serviceinfo[0].Name,
                serviceinfo[0].State
            )

            data['events'].append({
                'eventClass': eventClass,
                'eventClassKey': 'WindowsServiceLog',
                'eventKey': eventKey,
                'severity': ds0.severity,
                'summary': evtmsg,
                'component': prepId(serviceinfo[0].Name),
                'device': config.id,
            })
        else:

            evtmsg = 'Service Recoverd: {0} has changed to {1} state'.format(
                serviceinfo[0].Name,
                serviceinfo[0].State
            )

            data['events'].append({
                'eventClass': eventClass,
                'eventClassKey': 'WindowsServiceLog',
                'eventKey': eventKey,
                'severity': ZenEventClasses.Clear,
                'summary': evtmsg,
                'component': prepId(serviceinfo[0].Name),
                'device': config.id,
            })

        # Event to provide notification that check has completed
        data['events'].append({
            'eventClass': eventClass,
            'device': config.id,
            'summary': 'Windows Service Check: successful service collection',
            'severity': ZenEventClasses.Clear,
            'eventKey': 'WindowsServiceCollection',
            'eventClassKey': 'WindowsServiceLogSuccess',
        })

        return data
示例#12
0
    def testPrepId(self):
        """
        Tests for Utils.prepId()
        Legal values for this test were determined by running existing prepId 
        code.  Note that there are still some corner cases where illegal ids 
        (as defined by /opt/zenoss/lib/python/OFS/ObjectManager.checkValidId())
        can be produced by prepId().
        """
        tests = []

        # Ensure that no legal chars are converted
        prog = re.compile(r'[a-zA-Z0-9-_,.$\(\) ]')
        legals = ''.join(chr(c) for c in xrange(256) if prog.match(chr(c)))
        legals = 'X' + legals   # prevents leading space from being trimmed
        tests.append((legals, legals))

        # Ensure that all illegal chars are converted
        illegals = ''.join(chr(c) for c in xrange(256) if not prog.match(chr(c)))
        tests.append((illegals, "-"))

        # Test various combinations of legals, illegals, and spaces
        tests.extend((
                    ("A", "A"),
                    ("A::A", "A__A"),
                    ("A: :A", "A_ _A"),
                    ("A : A", "A _ A"),
                    ("A A", "A A"),
                    (":A:", "A"),
                    ("::A::", "A"),
                    (" A ", "A"),
                    (": A :", "A"),
                    (u"A\u0100A", "A_A"), # test a unicode character
                    ))

        # The following tests produce illegal ids
        tests.extend((
                    (".", "."),
                    ("..", ".."),
                    ("A:: ", "A__"),
                    (("X__", '-'), "X__"),
                    ("aq_A", "aq_A"),
                    ("REQUEST", "REQUEST"),
                    ))

        for args, expected in tests:
            if not isinstance(args, tuple):
                actual = prepId(args)
            else:
                actual = prepId(*args)
            self.assertEqual(actual, expected, 
                            "prepId('%s') is '%s', should be '%s'" %
                            (args, actual, expected))
def _extract_cdp_lldp_maps(tabledata):
    oms = {}

    # CDP data
    for idx, data in tabledata.get("cdpCacheEntry", {}).items():
        idx = prepId("cdp_{}".format(idx))
        title = data.get('cdpCachePlatform', '')
        if idx in oms or not title:
            continue

        cdpCacheAddress = data.get('cdpCacheAddress')
        cdpCacheAddressType = data.get(
            'cdpCacheAddressType',
            CiscoNetworkProtocol.IPv4)

        ip_address = None

        if cdpCacheAddress:
            if cdpCacheAddressType == CiscoNetworkProtocol.IPv4:
                ip_address = asip(cdpCacheAddress)
            elif cdpCacheAddressType == CiscoNetworkProtocol.IPv6:
                ip_address = ipaddr.IPAddress(
                    int(cdpCacheAddress.encode('hex'), 16)).compressed

        oms[idx] = {
            'id': idx,
            'title': title,
            'description': data.get('cdpCacheSysName', ''),
            'device_port': data.get('cdpCacheDevicePort', ''),
            'ip_address': ip_address,
            'native_vlan': data.get('cdpCacheNativeVLAN', ''),
            'location': data.get('cdpCachePhysLocation', ''),
        }

    # LLDP data
    for idx, data in tabledata.get("lldpRemEntry", {}).items():
        idx = prepId("lldp_{}".format(idx))
        title = data.get('lldpRemSysName', '')
        if idx in oms or not title:
            continue

        oms[idx] = {
            'id': idx,
            'title': title,
            'description': data.get('lldpRemSysDesc', ''),
            'device_port': (
                data.get('lldpRemPortDesc', '') or
                data.get('lldpRemPortId', '')
            ),
        }
    return oms.values()
    def onSuccess(self, result, config):
        log.debug('Success - result is {}'.format(result))
        data = self.new_data()

        # SBA status
        if result:
            data['values'][config.id]['health_status'] = 0
        else:
            data['values'][config.id]['health_status'] = 5

        # TODO: Check that the component exists
        for application in result:
            app_name = application['name']
            app_id = prepId(app_name)
            app_status = application['status']
            if app_status not in self.status_maps:
                log.error(
                    'Application Status not mapped: {}'.format(app_status))
            data['values'][app_id]['health_status'] = self.status_maps.get(
                app_status, 3)
            # Application Instances
            app_instances = application['instances']
            for instance in app_instances:
                instance_id = prepId(instance.get('id', ''))
                instance_status = instance['statusInfo']['status']
                if instance_status not in self.status_maps:
                    log.error(
                        'Application Instance Status not mapped: {}'.format(
                            instance_status))
                data['values'][instance_id][
                    'health_status'] = self.status_maps.get(
                        instance_status, 3)
                details = instance['statusInfo']['details']
                for component, c_data in details.items():
                    component_id = prepId('{}_{}_{}'.format(
                        app_name, instance_id, component))
                    if isinstance(c_data, dict):
                        component_status = c_data['status']
                    else:
                        component_status = 'Exception'
                    if component_status not in self.status_maps:
                        log.error(
                            'Application Component Status not mapped: {}'.
                            format(component_status))
                        log.error('Component Data: {}'.format(c_data))
                    value = self.status_maps.get(component_status, 3)
                    data['values'][component_id]['health_status'] = value

        return data
示例#15
0
    def onSuccess(self, results, config):

        data = self.new_data()
        ds0 = config.datasources[0]
        serviceinfo = results[results.keys()[0]]
        eventClass = ds0.eventClass if ds0.eventClass else "/Status"
        eventKey = ds0.eventKey if ds0.eventKey else "WindowsService"

        if serviceinfo[0].State != ds0.params['alertifnot']:

            evtmsg = 'Service Alert: {0} has changed to {1} state'.format(
                serviceinfo[0].Name, serviceinfo[0].State)

            data['events'].append({
                'eventClass': eventClass,
                'eventClassKey': 'WindowsServiceLog',
                'eventKey': eventKey,
                'severity': ds0.severity,
                'summary': evtmsg,
                'component': prepId(serviceinfo[0].Name),
                'device': config.id,
            })
        else:

            evtmsg = 'Service Recoverd: {0} has changed to {1} state'.format(
                serviceinfo[0].Name, serviceinfo[0].State)

            data['events'].append({
                'eventClass': eventClass,
                'eventClassKey': 'WindowsServiceLog',
                'eventKey': eventKey,
                'severity': ZenEventClasses.Clear,
                'summary': evtmsg,
                'component': prepId(serviceinfo[0].Name),
                'device': config.id,
            })

        # Event to provide notification that check has completed
        data['events'].append({
            'eventClass': eventClass,
            'device': config.id,
            'summary': 'Windows Service Check: successful service collection',
            'severity': ZenEventClasses.Clear,
            'eventKey': eventKey,
            'eventClassKey': 'WindowsServiceLogSuccess',
        })

        return data
示例#16
0
def manage_addKPI(context, id, type='GAUGE'):
    """build a performance indicator object"""

    perfId = prepId(id)
    type = type.upper()
    perfIndicator = None

    # get KPI object from database if it already exists
    try:
       perfIndicator = context._getOb(perfId)
       return perfIndicator
    except:
       pass

    # call a proper KPI class
    # COUNTER to DERIVE conversion: There is a class BBCApplicationKPICounter. Reseting counterts lid
    # to a big amount of fake alerts, so we convert them to DERIVE with min value = 0
    if type == 'COUNTER' or type == 'DERIVE':
        perfIndicator = BBCApplicationKPIDerive(perfId)
    elif type == 'GAUGE':
        perfIndicator = BBCApplicationKPIGauge(perfId)
    elif type == 'ABSOLUTE':
        perfIndicator = BBCApplicationKPIAbsolute(perfId)
    
    if perfIndicator: context._setObject(perfId, perfIndicator)
    return perfIndicator
 def get_events(self, result, ds):
     """
     Return a list of event dictionaries informing about the health
     of the region server.
     """
     data = json.loads(result)
     # Check for dead servers.
     dead_nodes = [
         prepId(dead_node_name(node)[0])
         for node in version_diff(data["DeadNodes"])
     ]
     # Send error or clear event.
     severity = ((self.component in dead_nodes) and ZenEventClasses.Error
                 or ZenEventClasses.Clear)
     return [{
         'component':
         self.component,
         'summary':
         "Region server '{0}' is dead".format(
             self.component.replace('_', ':')),
         'eventKey':
         'hbase_regionserver_monitoring_error',
         'eventClass':
         '/Status',
         'severity':
         severity
     }]
    def getFrameworksRelMaps(self, frameworks):
        rel_maps = []
        obj_maps = []

        for name, data in frameworks.items():
            framework_id = prepId(name)
            obj_maps.append(
                ObjectMap(data=dict(
                    id=framework_id,
                    title=name,
                    cfName=name,
                    setCFDetection=data['detection'],
                )))

            rel_maps.extend(
                self.getRuntimesRelMaps(
                    data['runtimes'], 'cfFrameworks/{0}'.format(framework_id)))

            rel_maps.extend(
                self.getAppServersRelMaps(
                    data['appservers'],
                    'cfFrameworks/{0}'.format(framework_id)))

        return [
            RelationshipMap(relname='cfFrameworks',
                            modname='ZenPacks.zenoss.CloudFoundry.Framework',
                            objmaps=obj_maps)
        ] + rel_maps
    def getAppsRelMaps(self, apps):
        obj_maps = []
        rel_maps = []

        for data in apps:
            app_id = prepId(data['name'])
            obj_maps.append(ObjectMap(data=dict(
                id=app_id,
                title=data['name'],
                cfName=data['name'],
                cfVersion=data['version'],
                cfState=data['state'],
                cfMetaCreated=data['meta']['created'],
                cfMetaVersion=data['meta']['version'],
                setCFURIs=data['uris'],
                setCFServices=data['services'],
                cfStagingModel=data['staging']['model'],
                cfStagingStack=data['staging']['stack'],
                modeled_instances=len(data['instances']),
                modeled_runningInstances=data['runningInstances'],
                modeled_resourcesMemory=data['resources']['memory'] * 1048576,
                modeled_resourcesDisk=data['resources']['disk'] * 1048576,
                modeled_resourcesFDS=data['resources']['fds']
            )))

            rel_maps.extend(self.getAppInstancesRelMaps(
                app_id, data['instances'], 'cfApps/{0}'.format(app_id)))

        return [RelationshipMap(
            relname='cfApps',
            modname='ZenPacks.zenoss.CloudFoundry.App',
            objmaps=obj_maps)] + rel_maps
 def add_maps(self, result, ds):
     """
     Return a list of ObjectMaps with config properties updates
     for this regionserver and all it's regions.
     """
     oms = []
     conf = ConfWrapper(result)
     oms.append(
         ObjectMap({
             "compname": "hbase_servers/{}".format(self.component),
             "modname": "Region Server conf",
             'handler_count': conf.handler_count,
             'memstore_upper_limit': conf.memstore_upper_limit,
             'memstore_lower_limit': conf.memstore_lower_limit,
             'logflush_interval': conf.logflush_interval
         }))
     # All the regions within the region server will have the same
     # configuration as set in the region server's conf file.
     for region in ds.region_ids:
         oms.append(
             ObjectMap({
                 "compname":
                 "hbase_servers/{}/regions/{}{}{}".format(
                     ds.component, ds.component, NAME_SPLITTER,
                     prepId(region)),
                 "modname":
                 "Region conf",
                 'memstore_flush_size':
                 convToUnits(conf.memestore_flush_size),
                 'max_file_size':
                 convToUnits(conf.max_file_size)
             }))
     return oms
    def onSuccess(self, result, config):
        log.debug('Success job - result is {}'.format(result))
        # TODO : cleanup job onSuccess

        data = self.new_data()

        ds_data = {}
        for success, ddata in result:
            if success:
                ds = ddata[0]
                metrics = json.loads(ddata[1])
                ds_data[ds] = metrics

        ds0 = config.datasources[0]
        componentID = prepId(ds0.component)
        applicationNameID = ds0.params['applicationNameID']
        tag = '{}_{}'.format(ds0.datasource, applicationNameID)
        jvm_data = ds_data.get(tag, '')
        if not jvm_data:
            # TODO: Add event: no data collected
            return data
        for point in ds0.points:
            if point.id in jvm_data:
                data['values'][componentID][point.id] = jvm_data[point.id]

        mem_used = jvm_data['mem'] - jvm_data['mem.free']
        data['values'][componentID]['mem.used'] = mem_used
        data['values'][componentID]['mem.used_percentage'] = float(mem_used) / float(jvm_data['mem']) * 100.0
        data['values'][componentID]['heap.used_percentage'] = float(jvm_data['heap.used']) / float(jvm_data['heap']) \
                                                              * 100.0
        log.debug('Success job - data is {}'.format(data))
        return data
    def process(self, device, results, log):
        log.info('Modeler %s collecting queue info for ActiveMQ.' %
                 self.name())

        print device
        queues = []

        ipaddress = device.manageIp
        log.info('%s' % ipaddress)
        #user = device.zActiveMQUser
        #password = device.zActiveMQPassword
        user = "******"
        password = "******"

        conn = stomp.Connection([(ipaddress, 61613)], user, password)
        if conn:
            log.debug('Connection to %s successfully established' % ipaddress)
        listener = MyListener()
        conn.set_listener('', listener)
        listener.set_logger(log)
        log.info('Listener added')
        conn.start()
        conn.connect(wait=True)
        conn.subscribe(destination='/temp-queue/ActiveMQ.Queues',
                       ack='auto',
                       transformation="jms-map-json",
                       id="zenoss")

        log.info("Subscribed to /temp-queue/ActiveMQ.Queues")
        conn.send("",
                  destination='ActiveMQ.Statistics.Destination.>',
                  headers={'reply-to': '/temp-queue/ActiveMQ.Queues'})

        time.sleep(10)
        conn.unsubscribe(id="zenoss")
        conn.disconnect()
        log.info("Disconnected after timeout.")

        rm = self.relMap()
        rm.relname = "ActiveMQQueue"
        rm.modname = "ZenPacks.Eseye.ActiveMQ.ActiveMQQueue"

        queues = listener.get_queues()
        if len(queues) > 0:
            #RETURN QUEUES FOR ADDITION TO ZODB
            log.info('Queues: %d' % len(queues))
            for qid, qname in queues.items():
                om = self.objectMap()
                om.id = prepId(qid)
                om.title = qname
                om.classname = "ActiveMQQueue"
                for key, value in om.__dict__.items():
                    log.info("OM Key: %s, OM Value: %s" % (key, value))
                rm.append(om)
            log.info("RelationshipMap: %s", rm)
            return rm
        else:
            #ERROR
            log.info('No queues found!')
            return None
 def process(self, device, results, log):
     """collect WBEM information from this device"""
     log.info('processing %s for device %s', self.name(), device.id)
     rm = self.relMap()
     instances = results["CIM_FileSystem"]
     if not instances: return
     skipfsnames = getattr(device, 'zFileSystemMapIgnoreNames', None)
     skipfstypes = getattr(device, 'zFileSystemMapIgnoreTypes', None)
     for instance in instances:
         try:
             if skipfsnames and re.search(skipfsnames, instance['mount']):
                 log.info("Skipping %s as it matches zFileSystemMapIgnoreNames.",
                     instance['mount'])
                 continue
             if skipfstypes and instance['type'] in skipfstypes:
                 log.info("Skipping %s (%s) as it matches zFileSystemMapIgnoreTypes.",
                     instance['mount'], instance['type'])
                 continue
             om = self.objectMap(instance)
             om.id = prepId(om.mount)
             if not om.totalBlocks or not om.blockSize: continue
             om.totalBlocks = om.totalBlocks / om.blockSize
         except AttributeError:
             continue
         rm.append(om)
     return rm
def port_update(device, dmd, evt):
    evt.summary = event_summary("Port", evt)

    objmap = neutron_objmap(evt, "Port")
    _apply_neutron_traits(evt, objmap, 'port')
    _apply_trait_rel(evt, objmap, 'trait_network_id', 'network')

    # If device_owner is part of compute, then add device_id as set_instance
    if 'compute' in evt.trait_device_owner and evt.trait_device_id:
        _apply_trait_rel(evt, objmap, 'trait_device_id', 'server')

    if hasattr(evt, 'evt.trait_device_id'):
        port_instance = get_port_instance(evt.trait_device_owner,
                                           evt.trait_device_id)
        setattr(objmap, 'set_instance', port_instance)

    # get subnets and fixed_ips
    if hasattr(evt, 'trait_fixed_ips'):
        port_fips = ast.literal_eval(evt.trait_fixed_ips)
        _subnets = get_subnets_from_fixedips(port_fips)
        port_subnets = [prepId('subnet-{}'.format(x)) for x in _subnets]
        port_fixedips = get_port_fixedips(port_fips)
        setattr(objmap, 'set_subnets', port_subnets)
        setattr(objmap, 'fixed_ip_list', port_fixedips)

    return [objmap]
    def getSystemServicesRelMaps(self, services):
        obj_maps = []

        for type, type_data in services.items():
            for name, name_data in type_data.items():
                for version, data in name_data.items():
                    obj_maps.append(
                        ObjectMap(data=dict(
                            id=prepId(data['id']),
                            title=name,
                            cfId=data['id'],
                            cfName=name,
                            cfVersion=data['version'],
                            cfDescription=data['description'],
                            cfVendor=data['vendor'],
                            cfType=type,
                            setCFTiers=data['tiers'],
                        )))

        return [
            RelationshipMap(
                relname='cfSystemServices',
                modname='ZenPacks.zenoss.CloudFoundry.SystemService',
                objmaps=obj_maps)
        ]
    def process(self, result):
        """
        Parses resulting data into datapoints.
        """
        data = json.loads(result)

        for node in version_diff(data["LiveNodes"]):
            if self.component == prepId(node['name']):
                res = {
                    'requests_per_second': (node['requests'], 'N'),
                    'used_heap_mb': (node['heapSizeMB'], 'N'),
                    'max_heap_mb': (node['maxHeapSizeMB'], 'N'),
                    'regions': (len(node['Region']), 'N'),
                    'read_requests': (0, 'N'),
                    'write_requests': (0, 'N'),
                    'number_of_stores': (0, 'N'),
                    'number_of_store_files': (0, 'N'),
                    'store_file_size_mb': (0, 'N'),
                    'store_file_index_size_mb': (0, 'N'),
                    'memstore_size_mb': (0, 'N'),
                    'current_compacted_kv': (0, 'N'),
                    'total_compacting_kv': (0, 'N'),
                }
                for region in node["Region"]:
                    res = sum_perf_metrics(res, region)
                return res
        return {}
 def add_maps(self, result, ds):
     """
     Return a list of ObjectMaps with config properties updates
     for this regionserver and all it's regions.
     """
     oms = []
     conf = ConfWrapper(result)
     oms.append(ObjectMap({
         "compname": "hbase_servers/{}".format(self.component),
         "modname": "Region Server conf",
         'handler_count': conf.handler_count,
         'memstore_upper_limit': conf.memstore_upper_limit,
         'memstore_lower_limit': conf.memstore_lower_limit,
         'logflush_interval': conf.logflush_interval
     }))
     # All the regions within the region server will have the same
     # configuration as set in the region server's conf file.
     for region in ds.region_ids:
         oms.append(ObjectMap({
             "compname": "hbase_servers/{}/regions/{}{}{}".format(
                 ds.component, ds.component, NAME_SPLITTER, prepId(region)),
             "modname": "Region conf",
             'memstore_flush_size': convToUnits(conf.memestore_flush_size),
             'max_file_size': convToUnits(conf.max_file_size)
         }))
     return oms
    def getAppsRelMaps(self, apps):
        obj_maps = []
        rel_maps = []

        for data in apps:
            app_id = prepId(data['name'])
            obj_maps.append(
                ObjectMap(data=dict(
                    id=app_id,
                    title=data['name'],
                    cfName=data['name'],
                    cfVersion=data['version'],
                    cfState=data['state'],
                    cfMetaCreated=data['meta']['created'],
                    cfMetaVersion=data['meta']['version'],
                    setCFURIs=data['uris'],
                    setCFServices=data['services'],
                    cfStagingModel=data['staging']['model'],
                    cfStagingStack=data['staging']['stack'],
                    modeled_instances=len(data['instances']),
                    modeled_runningInstances=data['runningInstances'],
                    modeled_resourcesMemory=data['resources']['memory'] *
                    1048576,
                    modeled_resourcesDisk=data['resources']['disk'] * 1048576,
                    modeled_resourcesFDS=data['resources']['fds'])))

            rel_maps.extend(
                self.getAppInstancesRelMaps(app_id, data['instances'],
                                            'cfApps/{0}'.format(app_id)))

        return [
            RelationshipMap(relname='cfApps',
                            modname='ZenPacks.zenoss.CloudFoundry.App',
                            objmaps=obj_maps)
        ] + rel_maps
    def getProvisionedServicesRelMaps(self, services):
        obj_maps = []

        for data in services:
            obj_maps.append(
                ObjectMap(data=dict(
                    id=prepId(data['name']),
                    title=data['name'],
                    cfName=data['name'],
                    cfVersion=data['version'],
                    cfVendor=data['vendor'],
                    cfType=data['type'],
                    cfTier=data['tier'],
                    cfMetaCreated=data['meta']['created'],
                    cfMetaUpdated=data['meta']['updated'],
                    cfMetaVersion=data['meta']['version'],
                    setCFMetaTags=data['meta']['tags'],
                    setCFProperties=data['properties'],
                )))

        return [
            RelationshipMap(
                relname='cfProvisionedServices',
                modname='ZenPacks.zenoss.CloudFoundry.ProvisionedService',
                objmaps=obj_maps)
        ]
 def ManagerInfo(self,dataDict):
     """ Map MQ data to component attributes """
     name = "%s_%s" % (self.baseid, dataDict['QMNAME'])
     info = {'id' : prepId(name), 'managerName' : dataDict['QMNAME'], 'managerStatus': dataDict['STATUS'],}
     info['monitor'] = self.getStatus(dataDict['STATUS'])
     info['manager'] = dataDict['QMNAME']
     return info
 def filesystem(self):
     """Return filesystem mounting this disk."""
     try:
         # Assumes all FileSystem ids are prepId(mount). Currently they are.
         return self.device().os.filesystems._getOb(prepId(self.mount))
     except Exception:
         pass
 def filesystem(self):
     """Return filesystem mounting this logical volume."""
     try:
         # Assumes all FileSystem ids are prepId(mount). Currently they are.
         return self.device().os.filesystems._getOb(prepId(self.mountpoint))
     except Exception:
         pass
示例#33
0
 def apply_pattern(self, value, pattern):
     if pattern:
         m = re.search(pattern, value)
         if m:
             return prepId(m.group(1).upper())
     else:
         return value
    def getAppInstancesRelMaps(self, appId, instances, compname):
        obj_maps = []

        for data in instances:
            instance_id = prepId(str(data['index']))
            stats = data['stats']['stats']
            obj_maps.append(ObjectMap(data=dict(
                id='{0}_{1}'.format(appId, instance_id),
                title=instance_id,
                cfIndex=data['index'],
                cfState=data['state'],
                cfSince=data['since'],
                cfHost=stats['host'],
                cfPort=stats['port'],
                cfCores=stats['cores'],
                modeled_quotaMemory=stats['mem_quota'],
                modeled_quotaDisk=stats['disk_quota'],
                modeled_usageCPU=stats['usage']['cpu'],
                modeled_usageMemory=stats['usage']['mem'] * 1024,
                modeled_usageDisk=stats['usage']['disk'] * 1024,
            )))

        return [RelationshipMap(
            compname=compname,
            relname='cfAppInstances',
            modname='ZenPacks.zenoss.CloudFoundry.AppInstance',
            objmaps=obj_maps)]
示例#35
0
    def get_conditions(self, xml, componentclass):
        devicemap = get_devicemap()
        xml_attrs = devicemap.get(componentclass)
        components = self.parsexml(xml, xml_attrs.get('xml_obj_filter'))
        results = {}
        for component in components:
            id = prepId(component.get(xml_attrs.get('xml_obj_id')))
            relation = self.apply_pattern(
                component.get(xml_attrs.get('xml_obj_relation'), ''),
                xml_attrs.get('xml_obj_relation_pattern'),
                )

            hrea = component.get(xml_attrs.get('health-reason'), '')
            hrec = component.get(xml_attrs.get('health-recommendation'), '')
            props = {
                'compname': xml_attrs.get('compname', '') + relation,
                'modname': xml_attrs.get('modname'),
                'hrea': component.get('health-reason'),
                'hrec': component.get('health-recommendation'),
                'data': {
                    'id': id,
                    'relname': xml_attrs.get('relname'),
                },
            }
            for cond in xml_attrs.get('xml_obj_conditions'):
                props['data'].update({cond: component.get(cond)})

            results[id] = props

        return results
class ClusterNodes(PythonDataSourcePlugin):

    """ClusterNodes data source plugin."""

    @classmethod
    def config_key(cls, datasource, context):
        return (
            context.device().id,
            datasource.getCycleTime(context),
            'netapp-clusternodes',
        )

    @classmethod
    def params(cls, datasource, context):
        return {
            'ip': context.device().manageIp,
            'api': context.zNetAppAPI,
            'un': context.zNetAppUser,
            'pw': context.zNetAppPassword,
        }

    @inlineCallbacks
    def collect(self, config):
        data = self.new_data()

        params = config.datasources[0].params
        baseUrl = params['api']
        if not baseUrl:
            if not params['ip']:
                log.error("Please fill in zNetAppAPI property")
                returnValue(None)
            baseUrl = 'https://{ip}/api'.format(ip=params['ip'])

        un = params['un']
        pw = params['pw']
        if un and pw:
            basic = base64.encodestring('{un}:{pw}'.format(un=un, pw=pw))
            auth = {'Authorization': 'Basic {b}'.format(b=basic.strip())}
        else:
            auth = {}
            log.info(
                'Please consider using zNetAppUser and zNetAppPassword for authorization')

        try:
            response = yield getPage('{url}/cluster/nodes?fields=uptime&return_records=true&return_timeout=15'.format(url=baseUrl), headers=auth)
            response = json.loads(response)
        except Exception, e:
            log.error(e)
            returnValue(None)

        for datasource in config.datasources:
            for record in response['records']:
                if datasource.component == prepId(record['uuid']):
                    for datapoint_id in (x.id for x in datasource.points):
                        if datapoint_id == 'uptime':
                            value = int(record['uptime'])
                            dpname = '_'.join((datasource.datasource, datapoint_id))
                            data['values'][datasource.component][dpname] = (value, 'N')

        returnValue(data)
示例#37
0
    def process(self, device, results, log):
        log.info('Collecting Software Installed for device %s' % device.id)
        rm = self.relMap()

        #split on "Location" to get discrete packages
        for package in results.split('Location:'):
            om = self.objectMap()
            lines = package.split('\n')
            version = manufacturer = ''
            #second line is the software package
            om.id = prepId(lines[1].split(':')[0].strip())
            for line in package.split('\n'):
                key = value = ''
                if re.search(':', line):
                    key, value = line.split(':',1)
                    key = key.strip()
                if key == "Version":
                    version = value.strip()
                #the assumption is made that Modified Date is an appropriate
                #equivalent to Install date
                if key == "Last Modified":
                    om.setInstallDate = parse_date(value.strip())
                if key == "Get Info String":
                    manufacturer = parse_manufacturer(value.strip())
            if not manufacturer:
                manufacturer = "Unknown"
            om.setProductKey = MultiArgs(om.id,manufacturer)
            if om.id:
                rm.append(om)

        log.debug(rm)
        return rm
    def process(self, result):
        """
        Parses resulting data into datapoints.
        """
        data = json.loads(result)

        for node in version_diff(data["LiveNodes"]):
            if self.component == prepId(node['name']):
                res = {
                    'requests_per_second': (node['requests'], 'N'),
                    'used_heap_mb': (node['heapSizeMB'], 'N'),
                    'max_heap_mb': (node['maxHeapSizeMB'], 'N'),
                    'regions': (len(node['Region']), 'N'),
                    'read_requests': (0, 'N'),
                    'write_requests': (0, 'N'),
                    'number_of_stores': (0, 'N'),
                    'number_of_store_files': (0, 'N'),
                    'store_file_size_mb': (0, 'N'),
                    'store_file_index_size_mb': (0, 'N'),
                    'memstore_size_mb': (0, 'N'),
                    'current_compacted_kv': (0, 'N'),
                    'total_compacting_kv': (0, 'N'),
                }
                for region in node["Region"]:
                    res = sum_perf_metrics(res, region)
                return res
        return {}
    def getFileMap(self, device, files_string, dirRegex, dirMatch, compname, log):
        #log.debug('files_string is %s , dirRegex is %s , compname is %s ' % (files_string, dirRegex, compname))
        file_maps = []
        for file in files_string.split('\n'):
            # Split out the filename part and the directory part
            f = file.split('/')[-1]
            d = '/'.join(file.split('/')[:-1])
            # Only consider creating a file map if the directory matches the dirMatch parameter
            if d == dirMatch:
                for k, v in dirRegex.items():
                    if d == k:                # got directory match
                        if re.search( v, f):   # check the regex
                            # Got a regex match against filename f
                            file_id = prepId(f)
                            # Don't want to inherit compname or modname from plugin as we want to set this expicitly
                            # Use ObjectMap rather than om=self.objectMap()
                            file_maps.append(ObjectMap(data = {
                                'id': file_id,
                                'fileName' : f,
                                'fileDirName' : d,
                                'fileRegex' : v,
                                }))
                            log.info('Found dir %s and file %s match' % (d, f))
                            # Get out of for k, v in dirRegex.items(): loop - don't care if matches on >1 regex
                            break

        # Return file_maps relationship map with compname passed as parameter to this method
        # Again - don't want to inherit relname, modname or compname for this relationship as we want to set them explicitly
        # Use RelationshipMap rather then rm=self.relMap()(
        return RelationshipMap(
            compname = compname,
            relname = 'files',
            modname = 'ZenPacks.community.DirFile.File',
            objmaps = file_maps)
    def getQueueRelMap(self, queues_string, compname):
        object_maps = []
        for queue_string in queues_string.split('\n'):
            if not queue_string.strip():
                continue

            name, durable, auto_delete, arguments = \
                re.split(r'\s+', queue_string)

            if re.search(r'true', durable, re.I):
                durable = True
            else:
                durable = False

            if re.search(r'true', auto_delete, re.I):
                auto_delete = True
            else:
                auto_delete = False

            object_maps.append(
                ObjectMap(
                    data={
                        'id': prepId(name),
                        'title': name,
                        'durable': durable,
                        'auto_delete': auto_delete,
                        'arguments': arguments,
                    }))

        return RelationshipMap(
            compname=compname,
            relname='rabbitmq_queues',
            modname='ZenPacks.zenoss.RabbitMQ.RabbitMQQueue',
            objmaps=object_maps)
    def getAppInstancesRelMaps(self, appId, instances, compname):
        obj_maps = []

        for data in instances:
            instance_id = prepId(str(data['index']))
            stats = data['stats']['stats']
            obj_maps.append(
                ObjectMap(data=dict(
                    id='{0}_{1}'.format(appId, instance_id),
                    title=instance_id,
                    cfIndex=data['index'],
                    cfState=data['state'],
                    cfSince=data['since'],
                    cfHost=stats['host'],
                    cfPort=stats['port'],
                    cfCores=stats['cores'],
                    modeled_quotaMemory=stats['mem_quota'],
                    modeled_quotaDisk=stats['disk_quota'],
                    modeled_usageCPU=stats['usage']['cpu'],
                    modeled_usageMemory=stats['usage']['mem'] * 1024,
                    modeled_usageDisk=stats['usage']['disk'] * 1024,
                )))

        return [
            RelationshipMap(compname=compname,
                            relname='cfAppInstances',
                            modname='ZenPacks.zenoss.CloudFoundry.AppInstance',
                            objmaps=obj_maps)
        ]
    def parse_result(self, dsconfs, result):

        if result.exit_code != 0:
            counters = [dsconf.params['resource'] for dsconf in dsconfs]
            log.info(
                'Non-zero exit code ({0}) for counters, {1}, on {2}'.format(
                    result.exit_code, counters, dsconf.device))
            return
        # Parse values
        stdout = parse_stdout(result, check_stderr=True)
        if stdout:
            name, iscoregroup, ownernode, state, description, nodeid,\
                priority = stdout
            dsconf0 = dsconfs[0]

            compObject = ObjectMap()
            compObject.id = prepId(nodeid)
            compObject.title = name
            compObject.coregroup = iscoregroup
            compObject.ownernode = ownernode
            compObject.state = state
            compObject.description = description
            compObject.priority = priority
            compObject.compname = dsconf0.params['contextcompname']
            compObject.modname = dsconf0.params['contextmodname']
            compObject.relname = dsconf0.params['contextrelname']

            for dsconf in dsconfs:
                value = (name, state, compObject)
                timestamp = int(time.mktime(time.localtime()))
                yield dsconf, value, timestamp
        else:
            log.debug('Error in parsing cluster service data')
 def add_maps(self, res, ds):
     """
     Check for added/removed tables and return a RelationshipMap if
     any changes took place. Otherwise return empty list.
     """
     try:
         res = json.loads(res)
     except ValueError:
         log.error('Error parsing collected data for {} monitoring template'
                   .format(ds.template))
         res = []
     if not res:
         return []
     tables_update = set(table['name'] for table in res.get('table'))
     self.added = list(tables_update.difference(set(ds.table_ids)))
     self.removed = list(set(ds.table_ids).difference(tables_update))
     if self.added or self.removed:
         tables_oms = []
         for table in tables_update:
             tables_oms.append(ObjectMap({
                 'id': prepId(table),
                 'title': table
             }))
         return [RelationshipMap(
             relname='hbase_tables',
             modname=MODULE_NAME['HBaseTable'],
             objmaps=tables_oms)]
     return []
 def collect(self, device, log):
     ''''''
     output = []
     log.info("collecting %s for %s." % (self.name(), device.id))
     self.scan = JavaAppScan(device.manageIp, device.zJavaAppPortRange, device.zJmxUsername, device.zJmxPassword,
                             device.zJolokiaProxyHost, device.zJolokiaProxyPort, device.zJavaAppScanTimeout)
     self.scan.evalPorts()
     for jmx in self.scan.portdict.values():
         name = "%s_%s" % (self.baseid, str(jmx.port))
         info = {'id': prepId(name),
                 'port': jmx.port,
                 'auth': jmx.auth,
                 'isWorking' : jmx.connected,
                 'protocol': jmx.protocol,
                 'parameters': None,
                 'user': None,
                 'password': None,
                 'javaversion': jmx.javaversion,
                 'vendorname': jmx.vendorname,
                 'vendorproduct': jmx.vendorproduct,
                 }
         if jmx.connected is True and jmx.auth is True:
                 info['user'] = jmx.user
                 info['password'] = jmx.password
         output.append(info)
     return output
示例#45
0
 def add_maps(self, res, ds):
     """
     Check for added/removed tables and return a RelationshipMap if
     any changes took place. Otherwise return empty list.
     """
     try:
         res = json.loads(res)
     except ValueError:
         log.error(
             'Error parsing collected data for {} monitoring template'.
             format(ds.template))
         res = []
     if not res:
         return []
     tables_update = set(table['name'] for table in res.get('table'))
     self.added = list(tables_update.difference(set(ds.table_ids)))
     self.removed = list(set(ds.table_ids).difference(tables_update))
     if self.added or self.removed:
         tables_oms = []
         for table in tables_update:
             tables_oms.append(
                 ObjectMap({
                     'id': prepId(table),
                     'title': table
                 }))
         return [
             RelationshipMap(relname='hbase_tables',
                             modname=MODULE_NAME['HBaseTable'],
                             objmaps=tables_oms)
         ]
     return []
示例#46
0
def images_rm(region_id, images):
    '''
    Return images RelationshipMap given region_id and an ImageInfo
    ResultSet.
    '''
    image_data = []
    for image in images:
        image_data.append({
            'id': prepId(image.id),
            'title': image.name if image.name else image.id,
            'location': image.location,
            'state': image.state,
            'owner_id': image.owner_id,
            'architecture': image.architecture,
            # 'platform': getattr(image, 'platform', ''),
            'image_type': image.type,
            'kernel_id': image.kernel_id,
            'ramdisk_id': image.ramdisk_id,
            'description': image.description,
            'block_device_mapping': block_device(image.block_device_mapping),
            'root_device_type': image.root_device_type,
            'root_device_name': image.root_device_name,
            'virtualization_type': image.virtualization_type,
            'hypervisor': image.hypervisor,
        })

    return RelationshipMap(
        compname='regions/%s' % region_id,
        relname='images',
        modname=MODULE_NAME['EC2Image'],
        objmaps=image_data
    )
    def parse_result(self, dsconfs, result):

        if result.exit_code != 0:
            counters = [dsconf.params['resource'] for dsconf in dsconfs]
            log.info(
                'Non-zero exit code ({0}) for counters, {1}, on {2}'
                .format(
                    result.exit_code, counters, dsconf.device))
            return
        # Parse values
        stdout = parse_stdout(result, check_stderr=True)
        if stdout:
            name, iscoregroup, ownernode, state, description, nodeid,\
                priority = stdout
            dsconf0 = dsconfs[0]

            compObject = ObjectMap()
            compObject.id = prepId(nodeid)
            compObject.title = name
            compObject.coregroup = iscoregroup
            compObject.ownernode = ownernode
            compObject.state = state
            compObject.description = description
            compObject.priority = priority
            compObject.compname = dsconf0.params['contextcompname']
            compObject.modname = dsconf0.params['contextmodname']
            compObject.relname = dsconf0.params['contextrelname']

            for dsconf in dsconfs:
                value = (name, state, compObject)
                timestamp = int(time.mktime(time.localtime()))
                yield dsconf, value, timestamp
        else:
            log.debug('Error in parsing cluster service data')
示例#48
0
    def onSuccess(self, result, config):
        log.debug('Success job - result is {}'.format(result))
        # TODO : cleanup job onSuccess
        data = self.new_data()
        ds_data = {}
        for success, ddata in result:
            if success:
                ds = ddata[0]
                metrics = json.loads(ddata[1])
                ds_data[ds] = metrics

        ds0 = config.datasources[0]
        componentID = prepId(ds0.component)
        applicationNameID = ds0.params['applicationNameID']
        tag = '{}_{}'.format(ds0.datasource, applicationNameID)
        orders_data = ds_data.get(tag, '')

        total_check = 0
        total_metrics = 0
        for order in orders_data:
            order_status = str(order['status'])
            order_value = float(order['count'])
            data['values'][componentID][order_status.lower()] = order_value
            if order_status != 'TOTAL':
                total_check += order_value
            else:
                total_metrics = order_value
        data['values'][componentID]['total_check'] = total_metrics - total_check

        log.debug('Success job - data is {}'.format(data))
        return data
示例#49
0
def manage_addIpRouteEntry(context, dest, routemask, nexthopid, interface, 
                   routeproto, routetype, userCreated=None, REQUEST = None):
    """
    Make a IpRouteEntry from the ZMI
    """
    if not routemask:
        routemask = 0
    else:
        routemask = int(routemask)
    dest = '%s/%s' % (dest, routemask)
    id = prepId(dest)
    d = IpRouteEntry(id)
    context._setObject(id, d)
    d = context._getOb(id)
    d.setTarget(dest)
    d.setNextHopIp(nexthopid)
    d.setInterfaceName(interface)
    if userCreated: d.setUserCreateFlag()
    d.routeproto = routeproto
    d.routetype = routetype
    d.routemask = routemask
    
    if REQUEST is not None:
        REQUEST['RESPONSE'].redirect(context.absolute_url_path()
                                     +'/manage_main') 
    def getQueueRelMap(self, queues_string, compname):
        object_maps = []
        for queue_string in queues_string.split('\n'):
            if not queue_string.strip():
                continue

            name, durable, auto_delete, arguments = \
                re.split(r'\s+', queue_string)

            if re.search(r'true', durable, re.I):
                durable = True
            else:
                durable = False

            if re.search(r'true', auto_delete, re.I):
                auto_delete = True
            else:
                auto_delete = False

            object_maps.append(ObjectMap(data={
                'id': prepId(name),
                'title': name,
                'durable': durable,
                'auto_delete': auto_delete,
                'arguments': arguments,
                }))

        return RelationshipMap(
            compname=compname,
            relname='rabbitmq_queues',
            modname='ZenPacks.zenoss.RabbitMQ.RabbitMQQueue',
            objmaps=object_maps)
示例#51
0
def manage_addIpRouteEntry(context,
                           dest,
                           routemask,
                           nexthopid,
                           interface,
                           routeproto,
                           routetype,
                           userCreated=None,
                           REQUEST=None):
    """
    Make a IpRouteEntry from the ZMI
    """
    if not routemask:
        routemask = 0
    else:
        routemask = int(routemask)
    dest = '%s/%s' % (dest, routemask)
    id = prepId(dest)
    d = IpRouteEntry(id)
    context._setObject(id, d)
    d = context._getOb(id)
    d.setTarget(dest)
    d.setNextHopIp(nexthopid)
    d.setInterfaceName(interface)
    if userCreated: d.setUserCreateFlag()
    d.routeproto = routeproto
    d.routetype = routetype
    d.routemask = routemask

    if REQUEST is not None:
        REQUEST['RESPONSE'].redirect(context.absolute_url_path() +
                                     '/manage_main')
示例#52
0
    def onSuccess(self, result, config):
        log.debug('Success - result is {}'.format(result))
        data = self.new_data()
        now_time = time.time()  # Time in GMT, as on device
        current_window_start = now_time - now_time % isam_cycle
        prev_window_start = current_window_start - isam_cycle
        cycletime = config.datasources[0].cycletime
        for rproxy in result:
            component = prepId(rproxy['instance'])
            # records could be a dictionary, not a list ???
            # records holds values collected for each window of 10 minutes
            # the current window sees its value raising during the current interval of 10 minutes
            # this means that the current window has its value reset every 10 minutes
            records = rproxy['records']
            if records == 0:
                data['values'][component]['rpthroughput_requests'] = 0
            elif len(records) == 1:
                log.error('onSuccess: records not a list: {}'.format(recods))
            else:
                for poll in records:
                    poll_time = float(poll['t'])
                    if poll_time == prev_window_start:
                        # Divide value by cycletime and multiply by 60 to get number of requests per minute
                        data['values'][component]['rpthroughput_requests'] = (
                            float(poll['e']) / cycletime * 60,
                            current_window_start)
                        break

        log.debug('RPThroughput data: {}'.format(data))
        return data
    def onSuccess(self, result, config):
        log.debug('Success - result is {}'.format(result))

        data = self.new_data()
        jvm_name = config.datasources[0].component
        component = prepId(jvm_name)
        if 'jvm_memory' in result and result['jvm_memory']['http_code'] < 300:
            jvm_memory_values = result['jvm_memory']['body']['value']
            heap_values = jvm_memory_values['HeapMemoryUsage']
            data['values'][component]['heap_committed'] = heap_values[
                'committed']
            data['values'][component]['heap_max'] = heap_values['max']
            data['values'][component]['heap_used'] = heap_values['used']
            data['values'][component]['heap_used_percent'] = round(
                float(heap_values['used']) / heap_values['max'] * 100, 2)
            nonheap_values = jvm_memory_values['NonHeapMemoryUsage']
            data['values'][component]['nonheap_committed'] = nonheap_values[
                'committed']
            data['values'][component]['nonheap_used'] = nonheap_values['used']

        for k, v in result.items():
            if k.startswith('jvm_memorypool_'):
                values = v['body']['value']  # TODO: use get
                data['values'][component]['{}_used'.format(k)] = values['used']
                data['values'][component]['{}_committed'.format(
                    k)] = values['committed']
                data['values'][component]['{}_max'.format(k)] = values['max']
        log.debug('ActiveMQJVM onSuccess data: {}'.format(data))
        return data
示例#54
0
def id_from_ref(ref):
    '''
    Return a component id given a XenAPI OpaqueRef.
    '''
    if not ref or ref == 'OpaqueRef:NULL':
        return None

    return prepId(ref.split(':', 1)[1])
 def interfaceByDesc(self, device, desc):
     try:
         interface = device.os.interfaces._getOb(prepId(desc))
     except:
         # catch all, bad, but lazy
         log.warn("can't find interface %s on device %s" % (desc, device))
         return desc
     return interface
def make_id(evt, prefix, try_traits=[]):
    """Return a valid id in "<prefix>-<raw_id>" format"""
    for traitname in try_traits:
        raw_id = evt.get(traitname, None)
        if raw_id is not None:
            return prepId("{0}-{1}".format(prefix, raw_id))

    # unable to find a valid component ID in this event.
    return None
def manage_addVolume(context, id, userCreated, REQUEST=None):
    svid = prepId(id)
    sv = DellEqualLogicVolume(svid)
    context._setObject(svid, sv)
    sv = context._getOb(svid)
    if userCreated: sv.setUserCreatedFlag()
    if REQUEST is not None:
        REQUEST['RESPONSE'].redirect(context.absolute_url()+'/manage_main')
    return sv