示例#1
0
 def test_summary(self):
     s = Summary("ss", "A summary", ["a", "b"], registry=self.registry)
     s.labels("c", "d").observe(17)
     self.assertEqual(
         b'# HELP ss A summary\n# TYPE ss summary\nss_count{a="c",b="d"} 1.0\nss_sum{a="c",b="d"} 17.0\n',
         generate_latest(self.registry),
     )
示例#2
0
class TestSummary(unittest.TestCase):
    def setUp(self):
        self.registry = CollectorRegistry()
        self.summary = Summary('s', 'help', registry=self.registry)

    def test_summary(self):
        self.assertEqual(0, self.registry.get_sample_value('s_count'))
        self.assertEqual(0, self.registry.get_sample_value('s_sum'))
        self.summary.observe(10)
        self.assertEqual(1, self.registry.get_sample_value('s_count'))
        self.assertEqual(10, self.registry.get_sample_value('s_sum'))

    def test_function_decorator(self):
        self.assertEqual(0, self.registry.get_sample_value('s_count'))

        @self.summary.time()
        def f():
            pass

        f()
        self.assertEqual(1, self.registry.get_sample_value('s_count'))

    def test_block_decorator(self):
        self.assertEqual(0, self.registry.get_sample_value('s_count'))
        with self.summary.time():
            pass
        self.assertEqual(1, self.registry.get_sample_value('s_count'))
示例#3
0
    def notify_success(self, source, hostname, filename, stats):
        registry = CollectorRegistry()

        s = Summary('backup_size', 'Size of backup file in bytes', registry=registry)
        s.observe(stats.size)
        s = Summary('backup_dumptime', 'Time taken to dump and compress/encrypt backup in seconds', registry=registry)
        s.observe(stats.dumptime)
        s = Summary('backup_uploadtime', 'Time taken to upload backup in seconds', registry=registry)
        s.observe(stats.uploadtime)
        if stats.retained_copies is not None:
            g = Gauge('backup_retained_copies', 'Number of retained backups found on destination', registry=registry)
            g.set(stats.retained_copies)
        g = Gauge('backup_timestamp', 'Time backup completed as seconds-since-the-epoch', registry=registry)
        g.set_to_current_time()

        def auth_handler(url, method, timeout, headers, data):
            return basic_auth_handler(url, method, timeout, headers, data, self.username, self.password)

        push_to_gateway(self.url, job=source.id, registry=registry, handler=auth_handler)

        logging.info("Pushed metrics for job '%s' to gateway (%s)" % (source.id, self.url))
示例#4
0
import json
import signal
from datetime import datetime, timedelta
from functools import partial
from time import time

from confluent_kafka import KafkaError
from prometheus_client import Info, Summary, start_http_server

from . import process
from .mq import consume, msgs, produce
from .utils import config, metrics, puptoo_logging
from .utils.puptoo_logging import threadctx

CONSUMER_WAIT_TIME = Summary("puptoo_consumer_wait_time",
                             "Time spent waiting on consumer iteration")
CONSUMER_ASSIGNMENTS = Info("puptoo_consumer_assignments",
                            "List of partitions assigned to the consumer")

logger = puptoo_logging.initialize_logging()


def start_prometheus():
    start_http_server(config.PROMETHEUS_PORT)


def get_extra(account="unknown", request_id="unknown"):
    threadctx.request_id = request_id
    threadctx.account = account
    return {"account": account, "request_id": request_id}
示例#5
0
def _evolve(source, name, time_execution_metrics=True, data_size_metrics=True):
    for c in source.downstreams:
        c.total_before = 0
        c.total_after = 0
        c.total_error = 0
        if hasattr(c, 'kwargs'):
            c.ignore_error = c.kwargs.pop('ignore_error', False)
        else:
            c.ignore_error = False

        if hasattr(c, 'func'):
            func_name = c.func.__name__
            c.got_func = True

        else:
            func_name = (str(c).replace('<',
                                        '').replace('>',
                                                    '').replace('; ', '_'))
            c.got_func = False

        c.is_dask = '.dask.' in str(type(c))

        f = f'{name}__{func_name}'
        c.name = f

        if c.got_func:

            c.total_before = Counter(f'total_before_{f}', f'total input {f}')
            c.total_after = Counter(f'total_after_{f}', f'total output {f}')
            c.total_error = Counter(f'total_error_{f}', f'total total {f}')
            c.time_execution_metrics = time_execution_metrics
            if c.time_execution_metrics:
                c.total_time = Summary(f'total_time_{f}',
                                       f'summary of time execution {f}')
                c.total_time_histogram = Histogram(
                    f'total_time_histogram_{f}',
                    f'histogram of time execution {f}',
                )

            c.data_size_metrics = data_size_metrics
            if c.data_size_metrics:
                c.data_size_before = Summary(
                    f'data_size_before_{f}',
                    f'summary of input data size {f} (KB)',
                )
                c.data_size_after = Summary(
                    f'data_size_after_{f}',
                    f'summary of output data size {f} (KB)',
                )
                c.data_size_before_histogram = Histogram(
                    f'data_size_before_histogram_{f}',
                    f'histogram of input data size {f} (KB)',
                )
                c.data_size_after_histogram = Histogram(
                    f'total_time_after_histogram_{f}',
                    f'histogram of output data size {f} (KB)',
                )

            if c.is_dask:

                def additional(self, x, who=None):
                    client = default_client()

                    result = client.submit(self.func, x, *self.args,
                                           **self.kwargs)
                    return self._emit(result)

            else:

                def additional(self, x, who=None):
                    self.total_before.inc()
                    if self.data_size_metrics:
                        self.data_size_before.observe(sys.getsizeof(x) / 1000)
                        self.data_size_before_histogram.observe(
                            sys.getsizeof(x) / 1000)

                    before = time()
                    try:
                        result = self.func(x)
                        after = time() - before
                    except Exception as e:
                        logger.exception(e)
                        if self.ignore_error:
                            self.total_error.inc()
                        else:
                            raise
                    else:
                        self.total_after.inc()

                        if self.time_execution_metrics:
                            self.total_time.observe(after)
                            self.total_time_histogram.observe(after)

                        if self.data_size_metrics:
                            self.data_size_after.observe(
                                sys.getsizeof(x) / 1000)
                            self.data_size_after_histogram.observe(
                                sys.getsizeof(x) / 1000)

                        return self._emit(result)

            c.update = functools.partial(additional, c)
        c = _evolve(
            c,
            name=f,
            time_execution_metrics=time_execution_metrics,
            data_size_metrics=data_size_metrics,
        )

    return source
示例#6
0
#!/usr/bin/env python
import yaml

from argparse import ArgumentParser
from dataclasses import dataclass
from dns.resolver import Resolver, NoNameservers
from prometheus_client import make_wsgi_app, Summary
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from wsgiref.simple_server import make_server

parser = ArgumentParser()
parser.add_argument('-c',
                    '--config',
                    default='/etc/dnsttl_reporter/config.yml')

COLLECT_TIME = Summary('dnsrecordttl_collect_time',
                       'Time spent collecting ttl records')

RESOLVERS = set()
QUERIES = set()


def parse_config(config_file):
    with open(config_file) as f:
        config = yaml.safe_load(f)

    RESOLVERS.update(make_resolver(r) for r in config['resolvers'])
    QUERIES.update((tuple(q) for q in config['queries']))


@dataclass
class QueryResult:
示例#7
0
import re
import time
import requests
import argparse
from pprint import pprint

import os
from sys import exit
from prometheus_client import Summary, make_wsgi_app
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from wsgiref.simple_server import make_server

DEBUG = int(os.environ.get('DEBUG', '0'))

COLLECTION_TIME = Summary('jenkins_collector_collect_seconds',
                          'Time spent to collect metrics from Jenkins')


class JenkinsCollector(object):
    # The build statuses we want to export about.
    statuses = [
        "lastBuild", "lastCompletedBuild", "lastFailedBuild",
        "lastStableBuild", "lastSuccessfulBuild", "lastUnstableBuild",
        "lastUnsuccessfulBuild"
    ]

    def __init__(self, target, user, password, insecure):
        self._target = target.rstrip("/")
        self._user = user
        self._password = password
        self._insecure = insecure
示例#8
0
import datetime
from cgi_models import Miner
import time
from loguru import logger
from prometheus_client import start_http_server, Summary
import confuse

# Metrics summary
antminer_uptime_seconds = Summary('antminer_uptime_seconds',
                                  'General uptime of miner in seconds')

# you need at least 6 seconds per miner, increase this number if monitoring 16 miners or more
SECONDS_4_CHECKS = 95


def is_str_canbe_int(s):
    try:
        int(s)
        return True
    except ValueError:
        return False


if __name__ == '__main__':
    # Start up the server to expose the metrics.
    start_http_server(8000)
    # Generate some requests.
    while True:
        try:
            config = confuse.Configuration('antminer_exporter', __name__)
            config.set_file('antminer_exporter/config.yaml')
示例#9
0
    await asyncio.wait([
        read_stream(p.stdout, lambda x: print(x.decode(), end='')),
        read_stream(p.stderr,
                    lambda x: print(x.decode(), end='', file=sys.stderr))
    ])

    return await p.wait()


registry = CollectorRegistry()

errors = Counter('failures_total',
                 'Number of errors thrown by subprocess',
                 registry=registry)
duration = Summary('duration_seconds',
                   'How long the subprocess took to run',
                   registry=registry)


@duration.time()
@errors.count_exceptions()
def runner() -> int:
    loop = asyncio.get_event_loop()
    res = loop.run_until_complete(runner_async())
    loop.close()
    return res


res = 255
try:
    res = runner()
示例#10
0
from prometheus_client import Summary
from prometheus_client import Histogram
from prometheus_async.aio import time

logger = logging.getLogger(__name__)

#
# Utility
#

expect_ok = asutil.expect_ok_closure(exception.CommandError)

git = git_provider.git_provider()

REQ_TIME = Summary("clone_req_time", "time spent with clone endpoint")
REQ_HISTOGRAM_TIME = Histogram(
    "clone_req_histogram",
    "Histogram for clone endpoint",
    buckets=[1, 10, 60, 120, 300, 600, 900, 1200, 1500, 1800, 3600])


async def push_sync_changes(work_dir,
                            ref,
                            remote="origin",
                            origin_remote="origin"):
    """ This function is used when we want to sync a repository with another one
        It assumes that you have already set the remote to be the 'other' repository

        If the ref is a tag, the tag is pushed to the remote
        If the ref is a branch, the branch, and its changes, are pushed to the remote
示例#11
0
from prometheus_client import start_http_server, Gauge, Summary, Counter
import time
import argparse
from pyawair.auth import *
from pyawair.conn import *
from pyawair.data import *
import pyawair
import traceback

REQUEST_TIME = Summary('awair_equest_processing_seconds',
                       'Time spent processing request', ['method'])

awair_device_api_usage_time = REQUEST_TIME.labels(method="retrieve_api_usage")
awair_device_data_time = REQUEST_TIME.labels(method="retrieve_data_usage")
RESPONSE_CODE = Counter('awair_reponse_code', 'HTTP Response Codes',
                        ['http_code'])
FAILURE_COUNT = Counter('awair_failure_count', 'AWAIR API FAILURES',
                        ['method'])
AWAIR_SCORE = Gauge("awair_device_score", "Awair score of device", ['device'])
AWAIR_TEMP = Gauge("awair_device_temp", "Awair temp of device", ['device'])
AWAIR_HUMID = Gauge("awair_device_humid", "Awair humidity of device",
                    ['device'])
AWAIR_CO2 = Gauge("awair_device_co2", "Awair co2 level of device", ['device'])
AWAIR_VOC = Gauge("awair_device_voc", "Awair voc of device", ['device'])
AWAIR_PM25 = Gauge("awair_device_pm25", "Awair pm25 of device", ['device'])
AWAIR_USAGE = Gauge("awair_device_api_usage", "Api usage of device",
                    ['device', 'scope'])


def get_data_usage(auth, id, type, base_url, data_url, args=''):
    """
        request_time = datetime.now().strftime('%d.%m.%Y %H:%M:%S')
        actual_response = fn(*args, **kwargs)
        # modify this to log exactly what you need:
        if 'ready' not in request.url:
            logger.info('%s %s %s %s' % (request.remote_addr, request.method,
                                         request.url, response.status))
        return actual_response

    return _log_to_logger


app = Bottle()
app.install(log_to_logger)

# Create a metric to track time spent and requests made.
REQUEST_TIME = Summary('gitlabservice_request_processing_seconds',
                       'Time spent processing request')
REQUEST_TIME.observe(4.7)
#IN_PROGRESS = Gauge("gitlabservice_inprogress_requests", "help")
REQUESTS = Counter('gitlabservice_http_requests_total',
                   'Description of counter', ['method', 'endpoint'])
INFO = Info('gitlabservice_version', 'Description of info')
INFO.info({'version': '1.1', 'port': '3001'})


def enable_cors(fn):
    def _enable_cors(*args, **kwargs):
        # set CORS headers
        response.headers['Access-Control-Allow-Origin'] = '*'
        response.headers[
            'Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
        response.headers[
示例#13
0
from bottle import response
bottle.BaseRequest.MEMFILE_MAX = 1600000000

import prometheus_client
from prometheus_client import Gauge, Summary

import reststore
from reststore import config

proxy_requests = False


# prometheus metrics state
request_summary = Summary(
    'reststore_api_request_duration_seconds',
    'Time spent processing api request',
    ['resource', 'method']
)
request_timer = lambda *x: request_summary.labels(*x).time()

file_count_gauge = Gauge(
    'reststore_stored_files',
    'Number of files in reststore',
    ['store']
)
file_count_gauge._samples = lambda: _counts()

file_size_summary = Summary(
    'reststore_file_size_bytes',
    'Size of files stored/fetched in bytes',
    ['store', 'direction']
示例#14
0
 def test_summary(self):
     s = Summary('ss', 'A summary', ['a', 'b'], registry=self.registry)
     s.labels('c', 'd').observe(17)
     self.assertEqual(b'# HELP ss A summary\n# TYPE ss summary\nss_count{a="c",b="d"} 1.0\nss_sum{a="c",b="d"} 17.0\n', generate_latest(self.registry))
示例#15
0
 def setUp(self):
     self.registry = CollectorRegistry()
     self.summary = Summary('s', 'help', registry=self.registry)
示例#16
0
class Operations(llfuse.Operations):
    """This is the main interface with llfuse.

    The methods on this object are called by llfuse threads to service FUSE
    events to query and read from the file system.

    llfuse has its own global lock which is acquired before calling a request handler,
    so request handlers do not run concurrently unless the lock is explicitly released
    using 'with llfuse.lock_released:'

    """

    fuse_time = Summary('arvmount_fuse_operations_seconds',
                        'Time spent during FUSE operations',
                        labelnames=['op'])
    read_time = fuse_time.labels(op='read')
    write_time = fuse_time.labels(op='write')
    destroy_time = fuse_time.labels(op='destroy')
    on_event_time = fuse_time.labels(op='on_event')
    getattr_time = fuse_time.labels(op='getattr')
    setattr_time = fuse_time.labels(op='setattr')
    lookup_time = fuse_time.labels(op='lookup')
    forget_time = fuse_time.labels(op='forget')
    open_time = fuse_time.labels(op='open')
    release_time = fuse_time.labels(op='release')
    opendir_time = fuse_time.labels(op='opendir')
    readdir_time = fuse_time.labels(op='readdir')
    statfs_time = fuse_time.labels(op='statfs')
    create_time = fuse_time.labels(op='create')
    mkdir_time = fuse_time.labels(op='mkdir')
    unlink_time = fuse_time.labels(op='unlink')
    rmdir_time = fuse_time.labels(op='rmdir')
    rename_time = fuse_time.labels(op='rename')
    flush_time = fuse_time.labels(op='flush')

    def __init__(self,
                 uid,
                 gid,
                 api_client,
                 encoding="utf-8",
                 inode_cache=None,
                 num_retries=4,
                 enable_write=False):
        super(Operations, self).__init__()

        self._api_client = api_client

        if not inode_cache:
            inode_cache = InodeCache(cap=256 * 1024 * 1024)
        self.inodes = Inodes(inode_cache, encoding=encoding)
        self.uid = uid
        self.gid = gid
        self.enable_write = enable_write

        # dict of inode to filehandle
        self._filehandles = {}
        self._filehandles_counter = itertools.count(0)

        # Other threads that need to wait until the fuse driver
        # is fully initialized should wait() on this event object.
        self.initlock = threading.Event()

        # If we get overlapping shutdown events (e.g., fusermount -u
        # -z and operations.destroy()) llfuse calls forget() on inodes
        # that have already been deleted. To avoid this, we make
        # forget() a no-op if called after destroy().
        self._shutdown_started = threading.Event()

        self.num_retries = num_retries

        self.read_counter = arvados.keep.Counter()
        self.write_counter = arvados.keep.Counter()
        self.read_ops_counter = arvados.keep.Counter()
        self.write_ops_counter = arvados.keep.Counter()

        self.events = None

    def init(self):
        # Allow threads that are waiting for the driver to be finished
        # initializing to continue
        self.initlock.set()

    def metric_samples(self):
        return self.fuse_time.collect()[0].samples

    def metric_op_names(self):
        ops = []
        for cur_op in [
                sample.labels['op'] for sample in self.metric_samples()
        ]:
            if cur_op not in ops:
                ops.append(cur_op)
        return ops

    def metric_value(self, opname, metric):
        op_value = [
            sample.value for sample in self.metric_samples()
            if sample.name == metric and sample.labels['op'] == opname
        ]
        return op_value[0] if len(op_value) == 1 else None

    def metric_sum_func(self, opname):
        return lambda: self.metric_value(
            opname, "arvmount_fuse_operations_seconds_sum")

    def metric_count_func(self, opname):
        return lambda: int(
            self.metric_value(opname, "arvmount_fuse_operations_seconds_count")
        )

    @destroy_time.time()
    @catch_exceptions
    def destroy(self):
        self._shutdown_started.set()
        if self.events:
            self.events.close()
            self.events = None

        # Different versions of llfuse require and forbid us to
        # acquire the lock here. See #8345#note-37, #10805#note-9.
        if LLFUSE_VERSION_0 and llfuse.lock.acquire():
            # llfuse < 0.42
            self.inodes.clear()
            llfuse.lock.release()
        else:
            # llfuse >= 0.42
            self.inodes.clear()

    def access(self, inode, mode, ctx):
        return True

    def listen_for_events(self):
        self.events = arvados.events.subscribe(
            self._api_client,
            [["event_type", "in", ["create", "update", "delete"]]],
            self.on_event)

    @on_event_time.time()
    @catch_exceptions
    def on_event(self, ev):
        if 'event_type' not in ev or ev["event_type"] not in ("create",
                                                              "update",
                                                              "delete"):
            return
        with llfuse.lock:
            properties = ev.get("properties") or {}
            old_attrs = properties.get("old_attributes") or {}
            new_attrs = properties.get("new_attributes") or {}

            for item in self.inodes.inode_cache.find_by_uuid(
                    ev["object_uuid"]):
                item.invalidate()
                if ev.get("object_kind") == "arvados#collection":
                    pdh = new_attrs.get("portable_data_hash")
                    # new_attributes.modified_at currently lacks
                    # subsecond precision (see #6347) so use event_at
                    # which should always be the same.
                    stamp = ev.get("event_at")
                    if (stamp and pdh and item.writable()
                            and item.collection is not None
                            and item.collection.modified()
                            and new_attrs.get("is_trashed") is not True):
                        item.update(to_record_version=(stamp, pdh))

            oldowner = old_attrs.get("owner_uuid")
            newowner = ev.get("object_owner_uuid")
            for parent in (self.inodes.inode_cache.find_by_uuid(oldowner) +
                           self.inodes.inode_cache.find_by_uuid(newowner)):
                parent.child_event(ev)

    @getattr_time.time()
    @catch_exceptions
    def getattr(self, inode, ctx=None):
        if inode not in self.inodes:
            raise llfuse.FUSEError(errno.ENOENT)

        e = self.inodes[inode]

        entry = llfuse.EntryAttributes()
        entry.st_ino = inode
        entry.generation = 0
        entry.entry_timeout = 0
        entry.attr_timeout = e.time_to_next_poll() if e.allow_attr_cache else 0

        entry.st_mode = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
        if isinstance(e, Directory):
            entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IFDIR
        else:
            entry.st_mode |= stat.S_IFREG
            if isinstance(e, FuseArvadosFile):
                entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH

        if self.enable_write and e.writable():
            entry.st_mode |= stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH

        entry.st_nlink = 1
        entry.st_uid = self.uid
        entry.st_gid = self.gid
        entry.st_rdev = 0

        entry.st_size = e.size()

        entry.st_blksize = 512
        entry.st_blocks = (entry.st_size // 512) + 1
        if hasattr(entry, 'st_atime_ns'):
            # llfuse >= 0.42
            entry.st_atime_ns = int(e.atime() * 1000000000)
            entry.st_mtime_ns = int(e.mtime() * 1000000000)
            entry.st_ctime_ns = int(e.mtime() * 1000000000)
        else:
            # llfuse < 0.42
            entry.st_atime = int(e.atime)
            entry.st_mtime = int(e.mtime)
            entry.st_ctime = int(e.mtime)

        return entry

    @setattr_time.time()
    @catch_exceptions
    def setattr(self, inode, attr, fields=None, fh=None, ctx=None):
        entry = self.getattr(inode)

        if fh is not None and fh in self._filehandles:
            handle = self._filehandles[fh]
            e = handle.obj
        else:
            e = self.inodes[inode]

        if fields is None:
            # llfuse < 0.42
            update_size = attr.st_size is not None
        else:
            # llfuse >= 0.42
            update_size = fields.update_size
        if update_size and isinstance(e, FuseArvadosFile):
            with llfuse.lock_released:
                e.arvfile.truncate(attr.st_size)
                entry.st_size = e.arvfile.size()

        return entry

    @lookup_time.time()
    @catch_exceptions
    def lookup(self, parent_inode, name, ctx=None):
        name = str(name, self.inodes.encoding)
        inode = None

        if name == '.':
            inode = parent_inode
        else:
            if parent_inode in self.inodes:
                p = self.inodes[parent_inode]
                self.inodes.touch(p)
                if name == '..':
                    inode = p.parent_inode
                elif isinstance(p, Directory) and name in p:
                    inode = p[name].inode

        if inode != None:
            _logger.debug(
                "arv-mount lookup: parent_inode %i name '%s' inode %i",
                parent_inode, name, inode)
            self.inodes[inode].inc_ref()
            return self.getattr(inode)
        else:
            _logger.debug(
                "arv-mount lookup: parent_inode %i name '%s' not found",
                parent_inode, name)
            raise llfuse.FUSEError(errno.ENOENT)

    @forget_time.time()
    @catch_exceptions
    def forget(self, inodes):
        if self._shutdown_started.is_set():
            return
        for inode, nlookup in inodes:
            ent = self.inodes[inode]
            _logger.debug("arv-mount forget: inode %i nlookup %i ref_count %i",
                          inode, nlookup, ent.ref_count)
            if ent.dec_ref(nlookup) == 0 and ent.dead:
                self.inodes.del_entry(ent)

    @open_time.time()
    @catch_exceptions
    def open(self, inode, flags, ctx=None):
        if inode in self.inodes:
            p = self.inodes[inode]
        else:
            raise llfuse.FUSEError(errno.ENOENT)

        if isinstance(p, Directory):
            raise llfuse.FUSEError(errno.EISDIR)

        if ((flags & os.O_WRONLY) or (flags & os.O_RDWR)) and not p.writable():
            raise llfuse.FUSEError(errno.EPERM)

        fh = next(self._filehandles_counter)
        self._filehandles[fh] = FileHandle(fh, p)
        self.inodes.touch(p)

        # Normally, we will have received an "update" event if the
        # parent collection is stale here. However, even if the parent
        # collection hasn't changed, the manifest might have been
        # fetched so long ago that the signatures on the data block
        # locators have expired. Calling checkupdate() on all
        # ancestors ensures the signatures will be refreshed if
        # necessary.
        while p.parent_inode in self.inodes:
            if p == self.inodes[p.parent_inode]:
                break
            p = self.inodes[p.parent_inode]
            self.inodes.touch(p)
            p.checkupdate()

        _logger.debug("arv-mount open inode %i flags %x fh %i", inode, flags,
                      fh)

        return fh

    @read_time.time()
    @catch_exceptions
    def read(self, fh, off, size):
        _logger.debug("arv-mount read fh %i off %i size %i", fh, off, size)
        self.read_ops_counter.add(1)

        if fh in self._filehandles:
            handle = self._filehandles[fh]
        else:
            raise llfuse.FUSEError(errno.EBADF)

        self.inodes.touch(handle.obj)

        r = handle.obj.readfrom(off, size, self.num_retries)
        if r:
            self.read_counter.add(len(r))
        return r

    @write_time.time()
    @catch_exceptions
    def write(self, fh, off, buf):
        _logger.debug("arv-mount write %i %i %i", fh, off, len(buf))
        self.write_ops_counter.add(1)

        if fh in self._filehandles:
            handle = self._filehandles[fh]
        else:
            raise llfuse.FUSEError(errno.EBADF)

        if not handle.obj.writable():
            raise llfuse.FUSEError(errno.EPERM)

        self.inodes.touch(handle.obj)

        w = handle.obj.writeto(off, buf, self.num_retries)
        if w:
            self.write_counter.add(w)
        return w

    @release_time.time()
    @catch_exceptions
    def release(self, fh):
        if fh in self._filehandles:
            _logger.debug("arv-mount release fh %i", fh)
            try:
                self._filehandles[fh].flush()
            except Exception:
                raise
            finally:
                self._filehandles[fh].release()
                del self._filehandles[fh]
        self.inodes.inode_cache.cap_cache()

    def releasedir(self, fh):
        self.release(fh)

    @opendir_time.time()
    @catch_exceptions
    def opendir(self, inode, ctx=None):
        _logger.debug("arv-mount opendir: inode %i", inode)

        if inode in self.inodes:
            p = self.inodes[inode]
        else:
            raise llfuse.FUSEError(errno.ENOENT)

        if not isinstance(p, Directory):
            raise llfuse.FUSEError(errno.ENOTDIR)

        fh = next(self._filehandles_counter)
        if p.parent_inode in self.inodes:
            parent = self.inodes[p.parent_inode]
        else:
            raise llfuse.FUSEError(errno.EIO)

        # update atime
        self.inodes.touch(p)
        self._filehandles[fh] = DirectoryHandle(
            fh, p, [('.', p), ('..', parent)] + listitems(p))
        return fh

    @readdir_time.time()
    @catch_exceptions
    def readdir(self, fh, off):
        _logger.debug("arv-mount readdir: fh %i off %i", fh, off)

        if fh in self._filehandles:
            handle = self._filehandles[fh]
        else:
            raise llfuse.FUSEError(errno.EBADF)

        e = off
        while e < len(handle.entries):
            if handle.entries[e][1].inode in self.inodes:
                yield (handle.entries[e][0].encode(self.inodes.encoding),
                       self.getattr(handle.entries[e][1].inode), e + 1)
            e += 1

    @statfs_time.time()
    @catch_exceptions
    def statfs(self, ctx=None):
        st = llfuse.StatvfsData()
        st.f_bsize = 128 * 1024
        st.f_blocks = 0
        st.f_files = 0

        st.f_bfree = 0
        st.f_bavail = 0

        st.f_ffree = 0
        st.f_favail = 0

        st.f_frsize = 0
        return st

    def _check_writable(self, inode_parent):
        if not self.enable_write:
            raise llfuse.FUSEError(errno.EROFS)

        if inode_parent in self.inodes:
            p = self.inodes[inode_parent]
        else:
            raise llfuse.FUSEError(errno.ENOENT)

        if not isinstance(p, Directory):
            raise llfuse.FUSEError(errno.ENOTDIR)

        if not p.writable():
            raise llfuse.FUSEError(errno.EPERM)

        return p

    @create_time.time()
    @catch_exceptions
    def create(self, inode_parent, name, mode, flags, ctx=None):
        name = name.decode(encoding=self.inodes.encoding)
        _logger.debug("arv-mount create: parent_inode %i '%s' %o",
                      inode_parent, name, mode)

        p = self._check_writable(inode_parent)
        p.create(name)

        # The file entry should have been implicitly created by callback.
        f = p[name]
        fh = next(self._filehandles_counter)
        self._filehandles[fh] = FileHandle(fh, f)
        self.inodes.touch(p)

        f.inc_ref()
        return (fh, self.getattr(f.inode))

    @mkdir_time.time()
    @catch_exceptions
    def mkdir(self, inode_parent, name, mode, ctx=None):
        name = name.decode(encoding=self.inodes.encoding)
        _logger.debug("arv-mount mkdir: parent_inode %i '%s' %o", inode_parent,
                      name, mode)

        p = self._check_writable(inode_parent)
        p.mkdir(name)

        # The dir entry should have been implicitly created by callback.
        d = p[name]

        d.inc_ref()
        return self.getattr(d.inode)

    @unlink_time.time()
    @catch_exceptions
    def unlink(self, inode_parent, name, ctx=None):
        name = name.decode(encoding=self.inodes.encoding)
        _logger.debug("arv-mount unlink: parent_inode %i '%s'", inode_parent,
                      name)
        p = self._check_writable(inode_parent)
        p.unlink(name)

    @rmdir_time.time()
    @catch_exceptions
    def rmdir(self, inode_parent, name, ctx=None):
        name = name.decode(encoding=self.inodes.encoding)
        _logger.debug("arv-mount rmdir: parent_inode %i '%s'", inode_parent,
                      name)
        p = self._check_writable(inode_parent)
        p.rmdir(name)

    @rename_time.time()
    @catch_exceptions
    def rename(self,
               inode_parent_old,
               name_old,
               inode_parent_new,
               name_new,
               ctx=None):
        name_old = name_old.decode(encoding=self.inodes.encoding)
        name_new = name_new.decode(encoding=self.inodes.encoding)
        _logger.debug(
            "arv-mount rename: old_parent_inode %i '%s' new_parent_inode %i '%s'",
            inode_parent_old, name_old, inode_parent_new, name_new)
        src = self._check_writable(inode_parent_old)
        dest = self._check_writable(inode_parent_new)
        dest.rename(name_old, name_new, src)

    @flush_time.time()
    @catch_exceptions
    def flush(self, fh):
        if fh in self._filehandles:
            self._filehandles[fh].flush()

    def fsync(self, fh, datasync):
        self.flush(fh)

    def fsyncdir(self, fh, datasync):
        self.flush(fh)
示例#17
0
# -*- coding: utf-8 -*-
from prometheus_client import Summary

story_request = Summary('asyncy_engine_http_run_story_processing_seconds',
                        'Time spent processing story run requests',
                        ['app_id', 'story_name'])

story_run_success = Summary('asyncy_engine_success_seconds',
                            'Time spent executing a story (successfully)',
                            ['app_id', 'story_name'])

story_run_failure = Summary('asyncy_engine_failure_seconds',
                            'Time spent executing a story (failed)',
                            ['app_id', 'story_name'])

story_run_total = Summary('asyncy_engine_total_seconds',
                          'Time spent executing a story (total)',
                          ['app_id', 'story_name'])

container_exec_seconds_total = Summary(
    'asyncy_engine_container_exec_seconds',
    'Time spent executing commands in containers',
    ['app_id', 'story_name', 'service'])

container_start_seconds_total = Summary(
    'asyncy_engine_container_start_seconds',
    'Time spent executing commands in containers',
    ['app_id', 'story_name', 'service'])
import json
import time
import argparse
from urllib.request  import urlopen, Request
from urllib.error import URLError, HTTPError
from prometheus_client import Summary
from prometheus_client.core import GaugeMetricFamily
from dateutil.parser import parse

COLLECTION_TIME = Summary('gitlab_job_collector_collect_seconds',
                          'Time spent to collect metrics from Gitlab')

# metrics:
#
# gitlab_job_last_id{'GitRepo','Branch','Scope'}: number
# gitlab_job_last_created_timestamp{'GitRepo','Branch','Scope'}: unix timestamp
# gitlab_job_last_finished_timestamp{'GitRepo','Branch','Scope'}: unix timestamp
# gitlab_job_last_starting_duration_seconds{'GitRepo','Branch','Scope'}: seconds
# gitlab_job_last_running_duration_seconds{'GitRepo','Branch','Scope'}: seconds
# gitlab_job_last_total_duration_seconds{'GitRepo','Branch','Scope'}: seconds

def parse_args():
    parser = argparse.ArgumentParser(
        description='gitlab job exporter args using config file'
    )

    parser.add_argument(
        '-c', '--config',
        required=True,
        help='gitlab job exporter config file using json format',
    )
示例#19
0
import re

from huey import RedisHuey
from huey.consumer import EVENT_FINISHED, EVENT_STARTED, EVENT_ERROR_TASK
from prometheus_client import Summary, Counter
from huey_exporter.RedisEnqueuedEventHuey import EVENT_ENQUEUED

# Create a metric to track time spent and requests made.
ENQUEUED_COUNTER = Counter('huey_enqueued_tasks', 'Huey Tasks enqueued', ['queue_name', 'task_name'])
STARTED_COUNTER = Counter('huey_started_tasks', 'Huey Tasks started', ['queue_name', 'task_name'])
FINISHED_COUNTER = Counter('huey_finished_tasks', 'Huey Tasks Finished', ['queue_name', 'task_name'])
ERROR_COUNTER = Counter('huey_error_tasks', 'Huey Task Errors', ['queue_name', 'task_name'])
TASK_DURATION_SUMMARY = Summary('huey_task_duration_seconds', 'Time spent processing tasks', ['queue_name', 'task_name'])


class EventQueue:

    prefix = 'queue_task_'

    def __init__(self, name, connection_pool):
        self.name = name
        self.clean_name = self.clean_queue_name(self.name)
        self.huey = RedisHuey(self.name, connection_pool=connection_pool)
        self.event_handlers = {
            EVENT_FINISHED: self.event_finished,
            EVENT_ENQUEUED: self.event_enqueued,
            EVENT_STARTED: self.event_started,
            EVENT_ERROR_TASK: self.event_error,
        }

    def listen(self):
示例#20
0
from prometheus_client import Summary

# Summaries:
http_api_latency_total = Summary(
    "bg_http_api_latency_seconds",
    "Total number of seconds each API endpoint is taking to respond.",
    ["method", "route", "status"],
)
示例#21
0
from flask import Flask, flash, render_template, redirect, url_for, request, session
from module.database import Database
from prometheus_client import start_http_server, Counter, Summary

call_metric = Counter('opsschool_monitor_flask_main_count',
                      'Number of visits to main', ["service", "endpoint"])
time_metric = Summary('opsschool_monitor_flask_request_processing_seconds',
                      'Time spent processing request', ["method"])

app = Flask(__name__)
app.secret_key = "mys3cr3tk3y"
db = Database()

hello_world_timer = time_metric.labels(method="hello_world")


@hello_world_timer.time()
def hello_world():
    call_metric.labels(service='opsschool_flask', endpoint='main').inc(1)
    return 'Hey, we have a hello world!'


@app.route('/')
def index():
    data = db.read(None)

    return render_template('index.html', data=data)


@app.route('/add/')
def add():
示例#22
0
from prometheus_client import start_http_server, Summary
from random import random
from time import sleep

summary = Summary('request_processing_seconds',
                  'time spent processing request')


@summary.time()
def process_request(t):
    sleep(t)


start_http_server(7999)
while True:
    process_request(random())
示例#23
0
        print("PROFILE_DIR '{}' is invalid, not enabling profiling".format(
            PROFILE_DIR))
        PROFILE_DIR = None
"""
Master Fleet Runner

Instantiates the specified number of Bjoern WSGI server processes,
each taking orders on their own unix socket and passing requests to
the respective WSGI app (rest, notify or metrics).

"""

# metrics
if PROMETHEUS:
    REQUEST_TIME = Summary('kopano_mfr_request_processing_seconds',
                           'Time spent processing request',
                           ['method', 'endpoint'])
    EXCEPTION_COUNT = Counter('kopano_mfr_total_unhandled_exceptions',
                              'Total number of unhandled exceptions')
    MEMORY_GAUGE = Gauge('kopano_mfr_virtual_memory_bytes',
                         'Virtual memory size in bytes', ['worker'])
    CPUTIME_GAUGE = Gauge('kopano_mfr_cpu_seconds_total',
                          'Total user and system CPU time spent in seconds',
                          ['worker'])


def error_handler(ex, req, resp, params, with_metrics):
    if not isinstance(ex, (falcon.HTTPError, falcon.HTTPStatus)):
        if with_metrics:
            if PROMETHEUS:
                EXCEPTION_COUNT.inc()
import re
import time
import requests
import argparse
from pprint import pprint

import os
from sys import exit
from prometheus_client import start_http_server, Summary
from prometheus_client.core import GaugeMetricFamily, REGISTRY

from azure_status import AzureStatus

DEBUG = int(os.environ.get('DEBUG', '0'))

COLLECTION_TIME = Summary('azure_cloud_status_collector_collect_seconds',
                          'Time spent to collect metrics from Azure Status')
STATUSES = {'good': 0, 'warning': -1, 'error': 2}


class AzureStatusCollector(object):
    def collect(self):
        start = time.time()

        # Request data from Azure Status
        status = self._request_data()

        for region_section in status[1]:
            for category in status[1][region_section]:
                for service in status[1][region_section][category]:
                    for region in status[1][region_section][category][service]:
                        metric_name = "azure_status_{}_{}_status".format(
示例#25
0
from flask import Flask, request, render_template
from src.predict import prediction
import time
from prometheus_client import start_http_server
from prometheus_client import Counter
from prometheus_client import Summary

REQUESTS = Counter('flask_twitter_webapp_calls_total', 'Number of calls')
LATENCY = Summary('flask_twitter_webapp_latency_seconds',
                  'Time needed for a request for the home page')
LATENCY_SEARCH = Summary('flask_twitter_webapp_search_latency_seconds',
                         'Time needed for a search request')

app = Flask(__name__)


def get_tweet(sentence, start):

    tweets_block = ""
    if sentence:
        res, score = prediction(sentence, "data/tweets.csv", "model/d2v.model")
        for index in range(len(res)):
            tweets_block += "<tr id=" + str(index) + " > <td>" + str(
                index) + "</td> <td>" + res[index] + "</td> </tr>"

    LATENCY_SEARCH.observe(time.time() - start)
    return render_template('index.html', tweets=tweets_block)


@app.route('/', methods=['GET', 'POST'])
def index():
示例#26
0
from datetime import datetime, timedelta
from prometheus_client import Summary
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from aliyunsdkcore.client import AcsClient
from aliyunsdkcms.request.v20180308 import QueryMetricLastRequest
from aliyunsdkrds.request.v20140815 import DescribeDBInstancePerformanceRequest
from ratelimiter import RateLimiter

from aliyun_exporter.info_provider import InfoProvider

rds_performance = 'rds_performance'
special_projects = {
    rds_performance: lambda collector : RDSPerformanceCollector(collector),
}

requestSummary = Summary('cloudmonitor_request_latency_seconds', 'CloudMonitor request latency', ['project'])
requestFailedSummary = Summary('cloudmonitor_failed_request_latency_seconds', 'CloudMonitor failed request latency', ['project'])

class CollectorConfig(object):
    def __init__(self,
                 pool_size=10,
                 rate_limit=10,
                 credential=None,
                 metrics=None,
                 info_metrics=None,
                 ):
        # if metrics is None:
        # raise Exception('Metrics config must be set.')

        self.credential = credential
        self.metrics = metrics
示例#27
0
import logging
import typing as t

import psutil
from prometheus_client import Gauge, Summary

logger = logging.getLogger(__name__)

COLLECT_TIME = Summary('airflow_collecting_stats_seconds',
                       'Time spent processing collecting stats')


class ProcessMetrics(t.NamedTuple):
    dag: str
    operator: str
    exec_date: str
    is_local: bool
    is_raw: bool

    mem_rss: int
    mem_vms: int
    mem_shared: int
    mem_text: int
    mem_data: int
    mem_lib: int
    mem_uss: int
    mem_pss: int
    mem_swap: int

    # cpu_num: int
    cpu_percent: float
示例#28
0
#!/usr/bin/env python3
import requests
import os
import sys
import time
import logging

from dataclasses import dataclass
from prometheus_client import Counter, Gauge, Summary
from prometheus_client import CollectorRegistry, push_to_gateway
from pythonjsonlogger import jsonlogger

PROMREG = CollectorRegistry()

EXTRACT_TIME = Summary('record_extraction_seconds',
                       'Time spent extracting an individual record',
                       registry=PROMREG)
DURATION = Gauge('mining_duration_seconds',
                 'Time spent mining data',
                 registry=PROMREG)
DATA_DURATION = Gauge('data_download_seconds',
                      'Time spent downloading public data',
                      registry=PROMREG)
RECORD_COUNT = Gauge('record_count_total',
                     'total records found',
                     registry=PROMREG)
TELEMETRY_COUNT = Gauge('telemetry_push_attempts_total',
                        'total attempts to push vehicle telemetry',
                        registry=PROMREG)
TELEMETRY_DURATION = Summary(
    'telemetry_server_push_seconds',
示例#29
0
from prometheus_client import Counter, Summary

from app import utils as utils
from app.models import Category, Language

logger = utils.setup_logger('routes_logger')
latency_summary = Summary('request_latency_seconds', 'Length of request')
failures_counter = Counter('my_failures', 'Number of exceptions raised')


def unauthorized_response():
    message = "The email or password you submitted is incorrect " \
              "or your account is not allowed api access"
    payload = {'errors': {"unauthorized": {"message": message}}}
    return utils.standardize_response(payload=payload, status_code=401)


def get_attributes(json):
    languages_list = Language.query.all()
    categories_list = Category.query.all()

    language_dict = {l.key(): l for l in languages_list}
    category_dict = {c.key(): c for c in categories_list}

    langs = []
    for lang in json.get('languages') or []:
        language = language_dict.get(lang)
        if not language:
            language = Language(name=lang)
        langs.append(language)
    categ = category_dict.get(json.get('category'),
# This is a skeleton server (unfinished).
# The mtd_exporter queries prometheus for every scrape. This is can generate a lot of traffic/pressure on promehetus.
# One alternative is only query if you run a GET on a particular port.
# If this is required, feel free to extend this file.

import http.server
from prometheus_client import start_http_server
from prometheus_client import Summary

import time

LATENCY = Summary('hello_world_latency_seconds', 'Time for a request Hello World.')

class Update_metrics(http.server.BaseHTTPRequestHandler):
    def do_GET(self):
        print("handler fired")
        start = time.time()
        self.send_response(200)
        self.end_headers()
        self.wfile.write(b"metrics were updated\n")
        LATENCY.observe(time.time() - start)


if __name__ == "__main__":
    print("Starting http server")
    APP_METRICS_PORT = 9000
    update_metrics = 8080

    start_http_server(APP_METRICS_PORT) # Server system metrics on port
    server = http.server.HTTPServer(('localhost', update_metrics), Update_metrics)
    server.serve_forever()
示例#31
0
from repour.adjust.scala_provider import get_scala_provider
from repour import asutil, clone, exception
from repour.config import config
from repour.lib.logs import log_util
from repour.lib.scm import git, asgit

from repour.adjust import (
    gradle_provider,
    noop_provider,
    pme_provider,
    process_provider,
    project_manipulator_provider,
    util,
)

REQ_TIME = Summary("adjust_req_time", "time spent with adjust endpoint")
REQ_HISTOGRAM_TIME = Histogram(
    "adjust_req_histogram",
    "Histogram for adjust endpoint",
    buckets=[
        1,
        10,
        60,
        120,
        300,
        600,
        900,
        1200,
        1500,
        1800,
        2100,
示例#32
0
    "Total errors encountered by the copy operation",
    ["bucket"],
)
storage_copy_success = Counter(
    "storage_broker_object_copy_success_count",
    "Total successful object moves",
    ["bucket"],
)
invalid_validation_status = Counter(
    "storage_broker_invalid_status_count",
    "Total invalid status messages received",
    ["service"],
)

# Summaries
payload_size = Summary(
    "storage_broker_payload_size_bytes",
    "Total size in bytes of payload to store",
    ["service"],
)

# Histograms
get_key_time = Histogram(
    "storage_broker_get_key_function_time_seconds",
    "Total time to get the key and bucket destination for payload",
)
storage_copy_time = Histogram(
    "storage_broker_object_copy_time_seconds",
    "Total time it takes to copy an object from one bucket to another",
)
示例#33
0
#!/bin/python
from prometheus_client import start_http_server, Summary
import random
import time

# Create a metric to track time spent and requests made.
REQUEST_TIME = Summary('request_processing_seconds',
                       'Time spent processing request')


# Decorate function with metric.
@REQUEST_TIME.time()
def process_request(t):
    """A dummy function that takes some time."""
    time.sleep(t)


if __name__ == '__main__':
    # Start up the server to expose the metrics.
    start_http_server(8181)
    # Generate some requests.
    while True:
        process_request(random.random())
示例#34
0
from uvicorn import run
from fastapi import FastAPI

from prometheus_client import generate_latest, REGISTRY, Counter, Gauge, Histogram, Summary

app = FastAPI()

PROMETHEUS_COUNTER: Counter = Counter('test_counter', 'count')
PROMETHEUS_GAUGE: Gauge = Gauge('test_gauge', 'gauge')
PROMETHEUS_HISTOGRAM: Histogram = Histogram('test_histogram', 'histogram')
PROMETHEUS_SUMMARY: Summary = Summary('test_summary', 'summary')


@app.get('/summary/{num}')
def summary(num: int) -> None:
    PROMETHEUS_SUMMARY.observe(num)


@app.get('/histogram/{num}')
def histogram(num: int) -> None:
    PROMETHEUS_HISTOGRAM.observe(num)


@app.get('/gauge/+/{num}')
def gauge_inc(num: int) -> None:
    PROMETHEUS_GAUGE.inc(num)


@app.get('/gauge/-/{num}')
def gauge_dec(num: int) -> None:
    PROMETHEUS_GAUGE.dec(num)
示例#35
0
logging.basicConfig(level=logging.os.environ.get('LOG_LEVEL', 'INFO'))
SC_API_KEY = str(os.environ.get('SC_API_KEY'))
SC_USERNAME = str(os.environ.get('SC_USERNAME'))
SC_ENDPOINT = 'https://app.statuscake.com/API/Tests/'
AUTH_HEADERS = {'API': SC_API_KEY, 'Username': SC_USERNAME}
scheduler = BackgroundScheduler()

baselabels = ['website', 'testid']
checklabels = ['status', 'location']
metrics = {
    'up':
    Gauge('statuscake_status', 'check if site is up', baselabels),
    'uptime':
    Gauge('statuscake_uptime', '7 day % uptime for this site', baselabels),
    'performance':
    Summary('statuscake_performance', 'load time in ms',
            baselabels + checklabels)
}


def get_tests():
    logging.info('Retrieving tests list')
    req = requests.get(f'{SC_ENDPOINT}', headers=AUTH_HEADERS)
    logging.info(f'{req.status_code} {req.url}')
    tests = req.json()
    for test in tests:
        if test['Status'] == 'Up':
            metrics['up'].labels(test['WebsiteName'], test['TestID']).set(1)
        else:
            metrics['up'].labels(test['WebsiteName'], test['TestID']).set(0)

        metrics['uptime'].labels(test['WebsiteName'],
示例#36
0
from prometheus_client import Summary, Counter

GET_FILE = Summary("puptoo_get_file_seconds",
                   "Time spent retrieving file from S3")
EXTRACT = Summary("puptoo_total_extraction_seconds",
                  "Total time spent extracting facts")
SYSTEM_PROFILE = Summary("puptoo_system_profile_seconds",
                         "Total time spent extracting system profile")

msg_count = Counter("puptoo_messages_consumed_total",
                    "Total messages consumed from the kafka topic")
extract_failure = Counter("puptoo_failed_extractions_total",
                          "Total archives that failed to extract")
msg_processed = Counter("puptoo_messages_processed_total",
                        "Total messages successful process")
msg_produced = Counter("puptoo_messages_produced_total",
                       "Total messages produced")