示例#1
0
 def generate_latest(self, target, group):
     output = []
     for metric in self.collect(target, group):
         output.append('# HELP {0} {1}'.format(
             metric.name,
             metric.documentation.replace('\\', r'\\').replace('\n',
                                                               r'\n')))
         output.append('\n# TYPE {0} {1}\n'.format(metric.name,
                                                   metric.type))
         for sample in metric.samples:
             name = sample.name
             labels = sample.labels
             value = sample.value
             if labels:
                 labelstr = '{{{0}}}'.format(','.join([
                     '{0}="{1}"'.format(
                         k,
                         v.replace('\\', r'\\').replace('\n',
                                                        r'\n').replace(
                                                            '"', r'\"'))
                     for k, v in sorted(labels.items())
                 ]))
             else:
                 labelstr = ''
             output.append('{0}{1} {2}\n'.format(name, labelstr,
                                                 floatToGoString(value)))
     return ''.join(output).encode('utf-8')
示例#2
0
def generate_latest(registry=core.REGISTRY):
    '''Returns the metrics from the registry in latest text format as a string.'''
    output = []
    for metric in registry.collect():
        output.append('# HELP {0} {1}'.format(
            metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
        output.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type))
        for sample in metric.samples:
            if len(sample) == 3:
                name, labels, value = sample
                timestamp = None
            elif len(sample) == 4:
                name, labels, value, timestamp = sample
            else:
                name, labels, value, timestamp, exemplar = sample
            if labels:
                labelstr = '{{{0}}}'.format(','.join(
                    ['{0}="{1}"'.format(
                     k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
                     for k, v in sorted(labels.items())]))
            else:
                labelstr = ''
            output.append('{0}{1} {2}{3}\n'.format(name, labelstr, utils.floatToGoString(value),
                                                   ' %s' % timestamp if timestamp else ''))
    return ''.join(output).encode('utf-8')
示例#3
0
    def _expose_histogram(name, value, labels_keys, labels_values):
        vals, sumv = value
        buckets = [[floatToGoString(b), v] for v, b in zip(np.cumsum(vals), BINS[1:])]

        metric = HistogramMetricFamily(name, "", labels=labels_keys)
        metric.add_metric(labels_values, buckets, sum_value=sumv)
        return metric
示例#4
0
 def get_sample(self, sample, metric):
     return {
         "sample_name": sample.name,
         "labels": sample.labels,
         "value": floatToGoString(sample.value),
         "timestamp": sample.timestamp,
         "exemplar": self.get_exemplar(sample, metric)
     }
示例#5
0
def add_histogram_metrics(metric, labels, upper_bounds, values, sum_value):
    """Add a sample of values to histogram metrics with labels"""
    if values[0] != "n/a":
        buckets = []
        acc = 0
        # convert the point values to cumulative values for adding to histogram
        for index, bound in enumerate(upper_bounds):
            acc += float(values[index])
            buckets.append([floatToGoString(bound), acc])

        metric.add_metric(labels=labels, buckets=buckets, sum_value=sum_value)
示例#6
0
 def get_exemplar(sample, metric):
     if not sample.exemplar:
         return {}
     elif metric.type not in ('histogram', 'gaugehistogram'
                              ) or not sample.name.endswith('_bucket'):
         raise ValueError("Metric {} has exemplars, but is not a "
                          "histogram bucket".format(metric.name))
     return {
         "labels": sample.exemplar.labels,
         "value": floatToGoString(sample.exemplar.value),
         "timestamp": sample.exemplar.timestamp
     }
示例#7
0
 def sample_line(line) -> str:
     if line.labels:
         labelstr = '{{{0}}}'.format(','.join(
             ['{0}="{1}"'.format(
                 k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
                 for k, v in sorted(line.labels.items())]))
     else:
         labelstr = ''
     timestamp = ''
     if line.timestamp is not None:
         # Convert to milliseconds.
         timestamp = ' {0:d}'.format(int(float(line.timestamp) * 1000))
     return '{0}{1} {2}{3}\n'.format(
         line.name, labelstr, floatToGoString(line.value), timestamp)
示例#8
0
 def _metric_init(self):
     self._buckets = []
     self._created = RedisValueClass('gauge', self._name,
                                     self._name + '_created',
                                     self._labelnames, self._labelvalues)
     self._created.setnx(time.time())
     bucket_labelnames = self._labelnames + ('le', )
     self._count = RedisValueClass(self._type, self._name,
                                   self._name + '_count', self._labelnames,
                                   self._labelvalues)
     self._sum = RedisValueClass(self._type, self._name,
                                 self._name + '_sum', self._labelnames,
                                 self._labelvalues)
     for b in self._upper_bounds:
         self._buckets.append(
             RedisValueClass(self._type, self._name, self._name + '_bucket',
                             bucket_labelnames,
                             self._labelvalues + (floatToGoString(b), )))
示例#9
0
def generate_latest_with_timestamps(registry, timestamp):
    '''Returns the metrics from the registry in latest text format as a string. Based on the original
    prometheus_client generate_latest but adding support for setting the timestamp.'''
    output = []
    for metric in registry.collect():
        output.append('# HELP {0} {1}'.format(
            metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
        output.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type))
        for name, labels, value in metric.samples:
            if labels:
                labelstr = '{{{0}}}'.format(','.join(
                    ['{0}="{1}"'.format(
                     k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
                     for k, v in sorted(labels.items())]))
            else:
                labelstr = ''
            output.append('{0}{1} {2} {3}\n'.format(name, labelstr, floatToGoString(value), timestamp))
    return ''.join(output).encode('utf-8')
    def collect(self):
        """
        collect method collects the command output from device and
        return the metrics
        """
        self._device.enable_test_commands()
        output = self._device.exec('vppctl "show histogram verbose"')
        rows = self._parser.ParseText(output)

        if not rows:
            return []

        histogram_metrics = HistogramMetricFamily(
            "epc_vppctl_performance",
            "vppctl performance metrics.",
            labels=["dataset", "process", "index"])

        for row in rows:
            buckets = []
            sample_acc = 0
            for b_symbol, bucket, b_unit, samples in zip(
                    row[FIELD_SYMBOL], row[FIELD_BUCKET],
                    row[FIELD_BUCKET_UNIT], row[FIELD_BUCKET_SAMPLES]):
                if b_symbol == '>':
                    bucket = INF
                elif b_symbol == '<':
                    bucket = float(bucket) - 0.1
                else:
                    bucket = float(bucket)
                sample_acc += float(samples)
                buckets.append([
                    floatToGoString(bucket * _TIME_MULTIPLIER[b_unit]),
                    sample_acc
                ])

            histogram_metrics.add_metric(
                labels=[
                    row[FIELD_DATA_SET], row[FIELD_PROCESS], row[FIELD_INDEX]
                ],
                buckets=buckets,
                sum_value=(float(row[FIELD_TOTAL_DT]) *
                           _TIME_MULTIPLIER[row[FIELD_TOTAL_DT_UNIT]]))

        return [histogram_metrics]
示例#11
0
    def collect(self):
        start = time.time()

        if start - self._cache_updated_at <= self.cache_ttl:
            log.info('Returning cached result from %s',
                     datetime.fromtimestamp(self._cache_updated_at))
            return self._cache_value

        # Use a separate instance for each scrape request, to prevent
        # race conditions with simultaneous scrapes.
        metrics = {key: value.clone() for key, value in self.METRICS.items()}

        log.info('Retrieving data from Bugsnag API')
        for organization in self._paginate('/user/organizations'):
            for project in self._paginate('/organizations/%s/projects' %
                                          organization['id']):
                by_stage = collections.defaultdict(collections.Counter)
                for error in self._paginate(
                        '/projects/%s/errors' % project['id'], **{
                            'filters[error.status][][type]': 'eq',
                            'filters[error.status][][value]': 'open',
                            'sort': 'unsorted',
                        }):
                    for stage in error['release_stages']:
                        count = by_stage[stage]
                        value = error['events']
                        for bucket in self.buckets:
                            if value <= bucket:
                                count[bucket] += 1
                        count[INF] += 1
                        count['sum'] += value

                for stage, counts in by_stage.items():
                    sum_value = counts.pop('sum')
                    buckets = [(floatToGoString(x), counts[x])
                               for x in sorted(counts.keys())]
                    metrics['events'].add_metric((project['name'], stage),
                                                 buckets, sum_value)

        stop = time.time()
        metrics['scrape_duration'].add_metric((), stop - start)
        self._cache_value = metrics.values()
        self._cache_updated_at = stop
        return self._cache_value
示例#12
0
    def collect(self):
        start = time.time()

        if start - self._cache_updated_at <= self.cache_ttl:
            log.info('Returning cached result from %s',
                     datetime.fromtimestamp(self._cache_updated_at))
            return self._cache_value

        # Use a separate instance for each scrape request, to prevent
        # race conditions with simultaneous scrapes.
        metrics = {key: value.clone() for key, value in self.METRICS.items()}

        log.info('Retrieving data from Audisto API')

        # We assume crawls are ordered reverse chronologically
        seen = set()
        for crawl in self._request('/crawls/'):
            if crawl['status']['value'] != 'Finished':
                continue

            service = crawl['settings']['starting_point']
            if service in seen:  # We only look at the latest crawl
                continue
            seen.add(service)

            report = self._request('/crawls/%s/report' % crawl['id'])
            metrics['http_requests_total'].add_metric(
                [service], report['counters']['pages_crawled'])

            for status in self.HTTP_STATUS:
                data = self._request('/crawls/%s/report/httpstatus/%s' %
                                     (crawl['id'], status),
                                     chunk=0,
                                     chunksize=0)
                metrics['http_requests_total'].add_metric(
                    [service, str(status)], data['total'])
            for data in report['summary_indexable']:
                metrics['http_requests_total'].add_metric(
                    [service, str(600 + data['value']['id'])],
                    data['aggregated'])
            for data in report['summary_duplicate_content']:
                metrics['http_requests_total'].add_metric(
                    [service, str(700 + data['source']['id'])], data['total'])

            buckets = []
            for item in report['summary_response_times']:
                title = item['range']['value']
                title = title.replace(' ms', '')
                if title.startswith('>'):
                    continue
                low, high = title.split('-')
                buckets.append(int(high))

            count = collections.Counter()
            for item in report['summary_response_times']:
                title = item['range']['value']
                title = title.replace(' ms', '')
                if title.startswith('>'):
                    value = INF
                else:
                    low, high = title.split('-')
                    value = int(high)
                for bucket in buckets:
                    if value <= bucket:
                        count[bucket] += item['count']
                    count[INF] += item['count']

            buckets = [(floatToGoString(x), count[x])
                       for x in sorted(count.keys())]
            # We already get bucketed values, so we don't have a total sum.
            metrics['response_time'].add_metric([service],
                                                buckets,
                                                sum_value=0)

        stop = time.time()
        metrics['scrape_duration'].add_metric((), stop - start)
        self._cache_value = metrics.values()
        self._cache_updated_at = stop
        return self._cache_value