def _buildMetric(self, context, dp, cf, extraRpn="", format=""):
        datasource = dp.datasource()
        dsId = datasource.id
        info = IInfo(dp)

        # find out our aggregation function
        agg = AGGREGATION_MAPPING.get(cf.lower(), cf.lower())
        rateOptions = info.getRateOptions()
        tags = self._buildTagsFromContextAndMetric(context, dsId)
        metricname = dp.name()
        key = self._get_key_from_tags(tags)
        search = _devname_pattern.match(key)
        if search:
            metricname = metrics.ensure_prefix(context.getMetricMetadata(),
                                               metricname)
        name = context.getResourceKey() + "|" + dp.name()
        metric = dict(metric=metricname,
                      aggregator=agg,
                      tags=tags,
                      rate=info.rate,
                      name=name)
        combined_metric = [metric]
        if rateOptions:
            metric['rateOptions'] = rateOptions
        if extraRpn:
            metric['emit'] = 'false'
            metric['name'] = "{}-raw".format(dp.name())
            new_metric = dict(expression="rpn:{}-raw,{}".format(
                dp.name(), extraRpn),
                              format=format,
                              name=name)
            combined_metric.append(new_metric)
        else:
            metric['format'] = format
        return combined_metric
    def getMetrics(self,
                   uuid,
                   dpNames,
                   cf='AVERAGE',
                   rate=False,
                   downsample="1h-avg",
                   start=None,
                   end=None,
                   deviceId=None,
                   returnSet="EXACT"):
        metrics = []
        if isinstance(dpNames, basestring):
            dpNames = [dpNames]
        for dpName in dpNames:
            # TODO find callers
            name = ensure_prefix(deviceId, dpName)
            metrics.append(
                dict(metric=name,
                     aggregator=self._aggMapping.get(cf.lower(), cf.lower()),
                     rpn='',
                     rate=rate,
                     format='%.2lf',
                     tags=dict(contextUUID=[uuid]),
                     name='%s_%s' % (uuid, dpName)))

        request = dict(returnset=returnSet,
                       start=start,
                       end=end,
                       downsample=downsample,
                       metrics=metrics)
        body = FileBodyProducer(StringIO(json.dumps(request)))
        d = self.agent.request('POST', self._metric_url, self._headers, body)
        return d
Exemple #3
0
    def _buildMetric(self, context, dp, cf, extraRpn="", format=""):
        datasource = dp.datasource()
        dsId = datasource.id
        info = IInfo(dp)

        # find out our aggregation function
        agg = AGGREGATION_MAPPING.get(cf.lower(), cf.lower())
        rateOptions = info.getRateOptions()
        tags = self._buildTagsFromContextAndMetric(context, dsId)
        metricname = dp.name()
        key = self._get_key_from_tags(tags)
        search = _devname_pattern.match(key)
        if search:
            prefix = search.groups()[0]
            metricname = metrics.ensure_prefix(prefix, metricname)
        metric = dict(
            metric=metricname,
            aggregator=agg,
            rpn=extraRpn,
            format=format,
            tags=tags,
            rate=info.rate,
            name=context.getResourceKey() + "|" + dp.name()
        )
        if rateOptions:
            metric['rateOptions'] = rateOptions
        return metric
    def getMetrics(self, uuid, dpNames, cf='AVERAGE', rate=False, downsample="1h-avg", start=None, end=None,
                   deviceId=None, returnSet="EXACT"):
        metrics = []
        if isinstance(dpNames, basestring):
            dpNames = [dpNames]
        for dpName in dpNames:
            # TODO find callers
            name = ensure_prefix(deviceId, dpName)
            metrics.append(dict(
                metric=name,
                aggregator=self._aggMapping.get(cf.lower(), cf.lower()),
                rpn='',
                rate=rate,
                format='%.2lf',
                tags=dict(contextUUID=[uuid]),
                name='%s_%s' % (uuid, dpName)
            ))

        request = dict(
            returnset=returnSet,
            start=start,
            end=end,
            downsample=downsample,
            metrics=metrics
        )
        body = FileBodyProducer(StringIO(json.dumps(request)))
        d = self.agent.request('POST', self._metric_url, self._headers, body)
        return d
Exemple #5
0
    def _buildMetric(self, context, dp, cf, extraRpn="", format=""):
        datasource = dp.datasource()
        dsId = datasource.id
        info = IInfo(dp)

        # find out our aggregation function
        agg = AGGREGATION_MAPPING.get(cf.lower(), cf.lower())
        rateOptions = info.getRateOptions()
        tags = self._buildTagsFromContextAndMetric(context, dsId)
        metricname = dp.name()
        key = self._get_key_from_tags(tags)
        search = _devname_pattern.match(key)
        if search:
            prefix = search.groups()[0]
            metricname = metrics.ensure_prefix(prefix, metricname)
        metric = dict(metric=metricname,
                      aggregator=agg,
                      rpn=extraRpn,
                      format=format,
                      tags=tags,
                      rate=info.rate,
                      name=context.getResourceKey() + "|" + dp.name())
        if rateOptions:
            metric['rateOptions'] = rateOptions
        return metric
Exemple #6
0
    def writeMetricWithMetadata(self,
                                metric,
                                value,
                                metricType,
                                timestamp='N',
                                min='U',
                                max='U',
                                threshEventData={},
                                metadata=None):

        metadata = metadata or {}
        try:
            key = metadata['contextKey']
            contextId = metadata['contextId']
            deviceId = metadata['deviceId']
            contextUUID = metadata['contextUUID']
            if metadata:
                metric_name = metrics.ensure_prefix(metadata, metric)
            else:
                metric_name = metric
        except KeyError as e:
            raise Exception("Missing necessary metadata: %s" % e.message)
        deviceUUID = metadata.get('deviceUUID')
        return self.writeMetric(key, metric_name, value, metricType, contextId,
                                timestamp, min, max, threshEventData, deviceId,
                                contextUUID, deviceUUID)
Exemple #7
0
    def _readLastValue(self, uuid, datasource, datapoint, rra='AVERAGE', rate=False, ago=300, targetConfig={}):
        from Products.ZenUtils.metrics import ensure_prefix

        metrics = []
        if targetConfig.get('device'):
            deviceId = targetConfig['device']['id']
        else:
            deviceId = targetConfig.get('id')
        if not deviceId:
            return None
        name = ensure_prefix(deviceId, datasource + "_" + datapoint)
        log.debug("should not need to fetch metric: %s %s_%s", name, uuid, datapoint)
 def _buildWildCardMetrics(self, device, metricName, cf='avg', isRate=False, format="%.2lf"):
     """
     method only works with
     """
     # find out our aggregation function
     agg = AGGREGATION_MAPPING.get(cf.lower(), cf.lower())
     metric = dict(
         metric=ensure_prefix(device.id, metricName),
         aggregator=agg,
         format=format,
         tags={'contextUUID': ['*']},
         rate=isRate,
         name=device.getResourceKey() + metricName
     )
     return metric
Exemple #9
0
 def _buildWildCardMetrics(self,
                           device,
                           metricName,
                           cf="avg",
                           isRate=False,
                           format="%.2lf"):
     # find out our aggregation function
     agg = AGGREGATION_MAPPING.get(cf.lower(), cf.lower())
     metric = dict(
         metric=ensure_prefix(device.id, metricName),
         aggregator=agg,
         format=format,
         tags={"contextUUID": ["*"]},
         rate=isRate,
         name=device.getResourceKey() + metricName,
     )
     return metric
Exemple #10
0
    def batchFetchMetrics(self, datasources):
        log.debug("Batch Fetching metrics from central query")
        from Products.ZenUtils.metrics import ensure_prefix
        from collections import defaultdict

        sourcetypes = defaultdict(int)

        dsPoints = set()

        for ds in datasources:
            for dp in ds.points:
                dsdpID = "%s/%s" % (dp.metadata["contextUUID"], dp.dpName)
                if dsdpID in dsPoints:
                    log.debug("already found in ds points %s", dsdpID)
                else:
                    dsPoints.add(dsdpID)
        metrics = {}
        for datasource in datasources:
            for dsname, datapoint, rra, rate in datasource.params['targetDatapoints']:
                for targetConfig in datasource.params['targets']:
                    targetValue = targetConfig.get(self._targetKey, None)
                    uuid = targetValue
                    # filter out target datapoints that match a datasource datapoint
                    # Target datapoints are what a datasource is made up of, if the
                    # target point is also a datasource datapoint that means we don't
                    # have to query for it since it will be calculated
                    filterKey = "%s/%s_%s" % (targetConfig.get("uuid", None), dsname, datapoint)
                    if filterKey in dsPoints:
                        log.debug("skipping target datapoint %s, since also a datasource datapoint", filterKey)
                        continue
                    cachekey = self._getKey(dsname, datapoint, rra, targetValue)
                    if not targetConfig.get('device'):
                        deviceId = targetConfig.get('id')
                    else:
                        deviceId = targetConfig['device']['id']
                    name = ensure_prefix(deviceId, dsname + "_" + datapoint)
                    dsclassname = datasource.params['datasourceClassName']
                    sourcetypes[dsclassname] += 1
                    self._insert_key(metrics, name, rra, rate, uuid, cachekey)
        if not len(metrics):
            return

        end, start = self._get_end_and_start()
        chunkSize = 1000
        yield self.fetchChunks(chunkSize, end, start, metrics.values(), sourcetypes)
 def _buildWildCardMetrics(self,
                           device,
                           metricName,
                           cf='avg',
                           isRate=False,
                           format="%.2lf"):
     """
     method only works with
     """
     # find out our aggregation function
     agg = AGGREGATION_MAPPING.get(cf.lower(), cf.lower())
     metric = dict(metric=ensure_prefix(device.id, metricName),
                   aggregator=agg,
                   format=format,
                   tags={'contextUUID': ['*']},
                   rate=isRate,
                   name=device.getResourceKey() + metricName)
     return metric
Exemple #12
0
    def writeMetric(self, contextKey, metric, value, metricType, contextId,
                    timestamp='N', min='U', max='U',
                    threshEventData={}, deviceId=None, contextUUID=None,
                    deviceUUID=None):

        """
        Writes the metric to the metric publisher.
        @param contextKey: This is who the metric applies to. This is usually
                            the return value of rrdPath() for a component or
                            device.
        @param metric: the name of the metric, we expect it to be of the form datasource_datapoint
        @param value: the value of the metric
        @param metricType: type of the metric (e.g. 'COUNTER', 'GAUGE', 'DERIVE' etc)
        @param contextId: used for the threshold events, the id of who this metric is for
        @param timestamp: defaults to time.time() if not specified, the time the metric occurred
        @param min: used in the derive the min value for the metric
        @param max: used in the derive the max value for the metric
        @param threshEventData: extra data put into threshold events
        @param deviceId: the id of the device for this metric
        @return: a deferred that fires when the metric gets published
        """
        timestamp = int(time.time()) if timestamp == 'N' else timestamp
        tags = {
            'contextUUID': contextUUID,
            'key': contextKey
        }
        metric_name = metric
        if deviceId:
            tags['device'] = deviceId
            metric_name = metrics.ensure_prefix(deviceId, metric_name)

        # write the raw metric to Redis
        self._metric_writer.write_metric(metric_name, value, timestamp, tags)

        # compute (and cache) a rate for COUNTER/DERIVE
        if metricType in {'COUNTER', 'DERIVE'}:
            dkey = "%s:%s" % (contextUUID, metric)
            value = self._derivative_tracker.derivative(
                dkey, (int(value), timestamp), min, max)

        # check for threshold breaches and send events when needed
        if value is not None:
            self._threshold_notifier.notify(contextUUID, contextId, metric,
                    timestamp, value, threshEventData)
Exemple #13
0
    def writeMetricWithMetadata(
            self,
            metric,
            value,
            metricType,
            timestamp='N',
            min='U',
            max='U',
            threshEventData=None,
            metadata=None,
            extraTags=None):

        metadata = metadata or {}
        try:
            key = metadata['contextKey']
            contextId = metadata['contextId']
            deviceId = metadata['deviceId']
            contextUUID = metadata['contextUUID']
            if metadata:
                metric_name = metrics.ensure_prefix(metadata, metric)
            else:
                metric_name = metric
        except KeyError as e:
            raise Exception("Missing necessary metadata: %s" % e.message)

        return self.writeMetric(
            key,
            metric_name,
            value,
            metricType,
            contextId,
            timestamp=timestamp,
            min=min,
            max=max,
            threshEventData=threshEventData,
            deviceId=deviceId,
            contextUUID=contextUUID,
            deviceUUID=metadata.get('deviceUUID'),
            extraTags=extraTags)
Exemple #14
0
    def _buildMetric(self, context, dp, cf, extraRpn="", format=""):
        datasource = dp.datasource()
        dsId = datasource.id
        info = IInfo(dp)

        # find out our aggregation function
        agg = AGGREGATION_MAPPING.get(cf.lower(), cf.lower())
        rateOptions = info.getRateOptions()
        tags = self._buildTagsFromContextAndMetric(context, dsId)
        metricname = dp.name()
        key = self._get_key_from_tags(tags)
        search = _devname_pattern.match(key)
        if search:
            metricname = metrics.ensure_prefix(context.getMetricMetadata(), metricname)
        name = context.getResourceKey() + "|" + dp.name()
        metric = dict(
            metric=metricname,
            aggregator=agg,
            tags=tags,
            rate=info.rate,
            name=name
        )
        combined_metric = [metric]
        if rateOptions:
            metric['rateOptions'] = rateOptions
        if extraRpn:
            metric['emit'] = 'false'
            metric['name'] = "{}-raw".format(dp.name())
            new_metric = dict(
                expression="rpn:{}-raw,{}".format(dp.name(), extraRpn),
                format=format,
                name=name
            )
            combined_metric.append(new_metric)
        else:
            metric['format'] = format
        return combined_metric
Exemple #15
0
    def getMetricsForContexts(self, contexts, metricNames, start=None,
                                 end=None, format="%.2lf", cf="avg",
                                 downsample=None, timeout=10, isRate=False):
        """
        getMetricsForContexts returns the metrics for all the contexts given
        :param contexts: the device or component with metrics, only needs to have the getMetricMetadata method
        :param metricNames: names of the metrics to be fetched for each context
        :param start: timestamp
        :param end: timestamp
        :param format:
        :param cf:
        :param downsample: eg. 5m-avg, no downsample by default
        :param timeout: timeout for request
        :param isRate: if the metric should be returned as a rate, False by default
        :returns map with keys of resource key (FQN of context) to map that contains metric name keys to list of
        datapoints, datapoints is a map with 'timestamp' and 'value' keys.
        """
        # series is the fully qualified metric name (with device[component] prefix)
        # this map tracks all the different contexts(device/component) with the same series name.
        seriesToCtxKey =  defaultdict(list)

        # each series name maps back to the original metricName requested
        seriesToMetricNames = {}
        for ctx in contexts:
            mData = ctx.getMetricMetadata()
            for mName in metricNames:
                seriesName = metrics.ensure_prefix(mData, mName)
                seriesToCtxKey[seriesName].append(mData['contextKey'])
                seriesToMetricNames[seriesName] = mName

        agg = AGGREGATION_MAPPING.get(cf.lower(), cf.lower())
        metricRequests = []

        if downsample is not None:
            downsample = self._formatDownsample(downsample)

        for seriesName, ctxKeys in seriesToCtxKey.items():
            contextKeys=ctxKeys
            #if we have a bunch just do a wildcard.
            if len(ctxKeys) > 200:
                contextKeys=['*']

            metricReq = dict(
                metric=seriesName,
                aggregator=agg,
                format=format,
                tags={'key': contextKeys},
                rate=isRate,
                downsample=downsample
            )
            metricRequests.append(metricReq)

        # Only EXACT resultsets are supported yet.
        returnSet = 'EXACT'
        start, end = self._defaultStartAndEndTime(start, end, returnSet)
        request = self._buildRequest([], metricRequests, start, end, returnSet, downsample=None)
        request['queries'] = request['metrics']
        del request['metrics']

        content = self._metrics_connection.request(WILDCARD_URL_PATH,
                                                   request,
                                                   timeout=timeout)
        if content is None:
            return {}

        # check for bad status and log what happened
        for status in content['statuses']:
            if status['status'] == u'ERROR' and u'No such name' not in status['message']:
                log.error(status['message'])

        results = {}
        for row in content['series']:
            mName = seriesToMetricNames[row['metric']]
            key = row['tags']['key']
            if key not in results:
                results[key] = defaultdict(list)
            for ts, value in row.get('datapoints', []):
                if value is not None and value != u'NaN':
                    results[key][mName].append(dict(timestamp=ts, value=value))

        return results
Exemple #16
0
 def metric(self):
     return metrics.ensure_prefix(self._context.device().id,
             self._object.dpName)
Exemple #17
0
 def metric(self):
     return metrics.ensure_prefix(self._context.getMetricMetadata(),
                                  self._object.dpName)
    def batchFetchMetrics(self, datasources):
        self.log.debug("Performing batch fetch of Central Query metrics")
        from Products.ZenUtils.metrics import ensure_prefix

        processed_ds_count = defaultdict(int)
        dsPoints = set()

        for ds in datasources:
            for dp in ds.points:
                if metaDataPrefix:
                    dsdpID = ensure_prefix(dp.metadata, dp.dpName)
                else:
                    dsdpID = "{uuid}/{datapoint}".format(
                        uuid=dp.metadata["contextUUID"],
                        datapoint=dp.dpName)

                if dsdpID not in dsPoints:
                    dsPoints.add(dsdpID)

        metrics = {}
        max_cycletime = 0
        for datasource in datasources:
            try:
                if datasource.cycletime > max_cycletime:
                    max_cycletime = datasource.cycletime
            except AttributeError:
                pass

            for dsname, datapoint, rra, rate, targets in datasource.params['targetDatapoints']:
                for targetConfig in targets:
                    targetValue = targetConfig.get(self._targetKey, None)
                    uuid = targetValue
                    # filter out target datapoints that match a datasource datapoint
                    # Target datapoints are what a datasource is made up of, if the
                    # target point is also a datasource datapoint that means we don't
                    # have to query for it since it will be calculated
                    # rrdpath in target config is json string of metric metadata
                    metricMeta = json.loads(targetConfig.get('rrdpath', "{}"))
                    dpName = "%s_%s" % (dsname, datapoint)
                    if metaDataPrefix:
                        filterKey = ensure_prefix(metricMeta, dpName)
                    else:
                        filterKey = "{uuid}/{ds}_{dp}".format(
                            uuid=targetConfig.get("uuid", None),
                            ds=dsname,
                            dp=datapoint)

                    if filterKey in dsPoints:
                        continue

                    cachekey = self._getKey(dsname, datapoint, rra, targetValue)
                    if metaDataPrefix:
                        name = ensure_prefix(metricMeta, dpName)
                    else:
                        if not targetConfig.get('device'):
                            deviceId = targetConfig.get('id')
                        else:
                            deviceId = targetConfig['device']['id']
                        name = ensure_prefix(deviceId, dsname + "_" + datapoint)

                    ds_class_name = datasource.params['datasourceClassName']
                    processed_ds_count[ds_class_name] += 1

                    self._insert_key(metrics, name, rra, rate, uuid, cachekey)

        self.log.debug(
            "Processed datasources to batch fetch: %s", processed_ds_count)

        if not len(metrics):
            self.log.debug("No metrics, skipping the batch fetch")
            returnValue(None)

        end, start = self._get_end_and_start(ago=(max_cycletime or 3600) * 5)

        yield self.fetchChunks(metrics.values(), start, end)
Exemple #19
0
 def metric(self):
     return metrics.ensure_prefix(self._context.getMetricMetadata(),
             self._object.dpName)
    def getMetricsForContexts(self,
                              contexts,
                              metricNames,
                              start=None,
                              end=None,
                              format="%.2lf",
                              cf="avg",
                              downsample=None,
                              timeout=10,
                              isRate=False):
        """
        getMetricsForContexts returns the metrics for all the contexts given
        :param contexts: the device or component with metrics, only needs to have the getMetricMetadata method
        :param metricNames: names of the metrics to be fetched for each context
        :param start: timestamp
        :param end: timestamp
        :param format:
        :param cf:
        :param downsample: eg. 5m-avg, no downsample by default
        :param timeout: timeout for request
        :param isRate: if the metric should be returned as a rate, False by default
        :returns map with keys of resource key (FQN of context) to map that contains metric name keys to list of
        datapoints, datapoints is a map with 'timestamp' and 'value' keys.
        """
        # series is the fully qualified metric name (with device[component] prefix)
        # this map tracks all the different contexts(device/component) with the same series name.
        seriesToCtxKey = defaultdict(list)

        # each series name maps back to the original metricName requested
        seriesToMetricNames = {}
        for ctx in contexts:
            mData = ctx.getMetricMetadata()
            for mName in metricNames:
                seriesName = metrics.ensure_prefix(mData, mName)
                seriesToCtxKey[seriesName].append(mData['contextKey'])
                seriesToMetricNames[seriesName] = mName

        agg = AGGREGATION_MAPPING.get(cf.lower(), cf.lower())
        metricRequests = []

        if downsample is not None:
            downsample = self._formatDownsample(downsample)

        for seriesName, ctxKeys in seriesToCtxKey.items():
            contextKeys = ctxKeys
            #if we have a bunch just do a wildcard.
            if len(ctxKeys) > 200:
                contextKeys = ['*']

            metricReq = dict(metric=seriesName,
                             aggregator=agg,
                             format=format,
                             tags={'key': contextKeys},
                             rate=isRate,
                             downsample=downsample)
            metricRequests.append(metricReq)

        # Only EXACT resultsets are supported yet.
        returnSet = 'EXACT'
        start, end = self._defaultStartAndEndTime(start, end, returnSet)
        request = self._buildRequest([],
                                     metricRequests,
                                     start,
                                     end,
                                     returnSet,
                                     downsample=None)
        request['queries'] = request['metrics']
        del request['metrics']

        content = self._metrics_connection.request(WILDCARD_URL_PATH,
                                                   request,
                                                   timeout=timeout)
        if content is None:
            return {}

        # check for bad status and log what happened
        for status in content['statuses']:
            if status['status'] == u'ERROR' and u'No such name' not in status[
                    'message']:
                log.error(status['message'])

        results = {}
        for row in content['series']:
            mName = seriesToMetricNames[row['metric']]
            key = row['tags']['key']
            if key not in results:
                results[key] = defaultdict(list)
            for ts, value in row.get('datapoints', []):
                if value is not None and value != u'NaN':
                    results[key][mName].append(dict(timestamp=ts, value=value))

        return results
Exemple #21
0
 def metric(self):
     return metrics.ensure_prefix(self._context.device().id,
                                  self._object.dpName)