Ejemplo n.º 1
0
def setup_status(app) -> prometheus_client.CollectorRegistry:
    """Add /status to serve Prometheus-driven runtime metrics."""
    registry = prometheus_client.CollectorRegistry(auto_describe=True)
    app["request_count"] = prometheus_client.Counter(
        "requests_total",
        "Total Request Count",
        ["app_name", "method", "endpoint", "http_status"],
        registry=registry,
    )
    app["request_latency"] = prometheus_client.Histogram(
        "request_latency_seconds",
        "Request latency",
        ["app_name", "endpoint"],
        registry=registry,
    )
    app["request_in_progress"] = prometheus_client.Gauge(
        "requests_in_progress_total",
        "Requests in progress",
        ["app_name", "endpoint", "method"],
        registry=registry,
    )
    prometheus_client.Info("server", "API server version",
                           registry=registry).info({
                               "version":
                               metadata.__version__,
                               "commit":
                               getattr(metadata, "__commit__", "null"),
                               "build_date":
                               getattr(metadata, "__date__", "null"),
                           })
    app.middlewares.insert(0, instrument)
    # passing StatusRenderer(registry) without __call__ triggers a spurious DeprecationWarning
    # FIXME(vmarkovtsev): https://github.com/aio-libs/aiohttp/issues/4519
    app.router.add_get("/status", StatusRenderer(registry).__call__)
    return registry
Ejemplo n.º 2
0
def run(args):
    s3uri = args.s3uri
    localpath = args.localpath
    excludes = args.exclude
    interval = args.interval

    i = pc.Info('s3insync_version',
                'Version and config information for the client')
    i.info({
        'version': s3insync.__version__,
        'aws_repo': s3uri,
        'localpath': localpath,
    })
    start_time = pc.Gauge('s3insync_start_time',
                          'Time the sync process was started')
    start_time.set_to_current_time()

    last_sync = pc.Gauge('s3insync_last_sync_time',
                         'Time the last sync completed')
    op_count = pc.Counter('s3insync_operations',
                          'Count of operations',
                          labelnames=('type', ))
    failed_op_count = pc.Counter('s3insync_failed_operations',
                                 'Count of failed operations',
                                 labelnames=('type', ))
    files_in_s3 = pc.Gauge(
        's3insync_files_in_s3',
        'Number of files in S3',
    )

    pc.start_http_server(8087)
    src = r.S3Repo('s3', s3uri)
    dest = r.LocalFSRepo('fs', localpath,
                         os.path.join(os.getenv('HOME'), ".s3insync"))
    dest.ensure_directories()

    sync = sd.SyncDecider(excludes)

    set_exit = setup_signals()

    while not set_exit.is_set():
        logger.debug("Starting sync")
        start = time.monotonic()

        try:
            success, failures = sync.execute_sync(src, dest)
            files_in_s3.set(success.pop('total', 0))
            set_op_counts(success, op_count)
            set_op_counts(failures, failed_op_count)
            last_sync.set_to_current_time()
        except Exception:
            logger.exception("Failed to excute sync")

        duration = time.monotonic() - start
        logger.debug("Stopping sync after %g secs", duration)

        set_exit.wait(max(30, interval - duration))
Ejemplo n.º 3
0
    def __init__(self, p_logger, p_config):

        self._logger = p_logger
        self._config = p_config
        self._gauge_monitored_users = prometheus_client.Gauge(
            self._config.prefix + "monitored_users",
            "number of monitored users")
        self._gauge_active_users = prometheus_client.Gauge(
            self._config.prefix + "active_users", "number of active users",
            ['username'])
        self._gauge_configured_users = prometheus_client.Gauge(
            self._config.prefix + "configured_users",
            "number of configured users")
        self._gauge_monitored_hosts = prometheus_client.Gauge(
            self._config.prefix + "monitored_hosts",
            "number of monitored hosts", ['hostname'])
        self._gauge_monitored_devices = prometheus_client.Gauge(
            self._config.prefix + "monitored_devices",
            "number of monitored devices")
        self._gauge_active_devices = prometheus_client.Gauge(
            self._config.prefix + "active_devices", "number of active devices",
            ['devicename'])
        self._gauge_device_response_time = prometheus_client.Gauge(
            self._config.prefix + "device_response_time",
            "response time of device [ms]", ['devicename'])
        self._gauge_device_moving_average_response_time = \
            prometheus_client.Gauge(self._config.prefix + "device_moving_average_response_time",
                                    "moving average of response time of device [ms]",
                                    ['devicename'])
        self._counter_forced_logouts = prometheus_client.Counter(
            self._config.prefix + "forced_logouts", "number of forced logouts",
            ['username'])

        self._summary_http_requests = prometheus_client.Summary(
            self._config.prefix + "http_requests",
            "request duration [ms] and count", ['service', 'hostname'])

        self._info_system = prometheus_client.Info(
            self._config.prefix + "system", "system information")
        self._info_system.info({
            "version":
            settings.settings['version'],
            "revision":
            settings.extended_settings['debian_package_revision']
        })

        self._gauge_uptime = prometheus_client.Gauge(
            self._config.prefix + "uptime", "uptime in seconds")
        self._start_time = time.time()
        self._gauge_uptime.set_function(lambda: time.time() - self._start_time)
Ejemplo n.º 4
0
    def __init__(self, chart, graphmgr_addr):
        super().__init__()

        self.graphCommHandler = AsyncGraphCommHandler(graphmgr_addr.name,
                                                      graphmgr_addr.comm,
                                                      ctx=chart.ctx)
        self.graph_name = graphmgr_addr.name
        self.metadata = None

        self.currentFileName = None
        self.chart = chart
        self.chartWidget = FlowchartWidget(chart, self)

        self.ui = EditorTemplate.Ui_Toolbar()
        self.ui.setupUi(parent=self, chart=self.chartWidget)
        self.ui.create_model(self.ui.node_tree,
                             self.chart.library.getLabelTree())
        self.ui.create_model(self.ui.source_tree,
                             self.chart.source_library.getLabelTree())

        self.chart.sigNodeChanged.connect(self.ui.setPending)

        self.features = Features(self.graphCommHandler)

        self.ui.actionNew.triggered.connect(self.clear)
        self.ui.actionOpen.triggered.connect(self.openClicked)
        self.ui.actionSave.triggered.connect(self.saveClicked)
        self.ui.actionSaveAs.triggered.connect(self.saveAsClicked)

        self.ui.actionConfigure.triggered.connect(self.configureClicked)
        self.ui.actionApply.triggered.connect(self.applyClicked)
        self.ui.actionReset.triggered.connect(self.resetClicked)
        # self.ui.actionProfiler.triggered.connect(self.profilerClicked)

        self.ui.actionHome.triggered.connect(self.homeClicked)
        self.ui.navGroup.triggered.connect(self.navClicked)

        self.chart.sigFileLoaded.connect(self.setCurrentFile)
        self.chart.sigFileSaved.connect(self.setCurrentFile)

        self.sourceConfigure = SourceConfiguration()
        self.sourceConfigure.sigApply.connect(self.configureApply)

        self.libraryEditor = EditorTemplate.LibraryEditor(self, chart.library)
        self.libraryEditor.sigApplyClicked.connect(self.libraryUpdated)
        self.libraryEditor.sigReloadClicked.connect(self.libraryReloaded)
        self.ui.libraryConfigure.clicked.connect(self.libraryEditor.show)

        self.graph_info = pc.Info('ami_graph', 'AMI Client graph',
                                  ['hutch', 'name'])
Ejemplo n.º 5
0
    def __init__(self, session: boto3.Session,
                 check_classes: typing.List[QuotaCheck],
                 settings: PrometheusExporterSettings):
        self.session = session
        self.check_classes = check_classes
        self.checks = []
        self.settings = settings

        # unregister default collectors
        for name in list(prom.REGISTRY._names_to_collectors.values()):
            with contextlib.suppress(KeyError):
                prom.REGISTRY.unregister(name)

        prom.Info(f'{self.settings.namespace}',
                  'AWS quota checker info').info({**self.default_labels})
Ejemplo n.º 6
0
    def _config(self, fields):
        metrics = {}
        labels = ['serial_number', 'product_id']

        for f in defs.FIELDS:
            label = f.label.replace('#', '')
            name = 'victron_%s' % label.lower()
            kind = f.kind()
            if isinstance(kind, pint.Quantity):
                unit = str(kind.units)
            else:
                unit = _UNITS.get(f.unit, f.unit)

            if unit == 'hour * watt':
                unit = 'wh'

            if kind == str:
                metrics[f.label] = prometheus_client.Info(name,
                                                          f.description,
                                                          labelnames=labels)
            elif _is_enum(kind):
                states = [x.name.lower() for x in kind]
                metrics[f.label] = prometheus_client.Enum(
                    name,
                    f.description,
                    labelnames=['serial_number', 'product_id'],
                    states=states)
                metrics[f.label + '_value'] = prometheus_client.Gauge(
                    name + '_value',
                    f.description,
                    labelnames=['serial_number', 'product_id'])
            else:
                metrics[f.label] = prometheus_client.Gauge(
                    name,
                    f.description,
                    labelnames=['serial_number', 'product_id'],
                    unit=unit)

        updated = prometheus_client.Gauge(
            'victron_updated',
            'Last time a block was received from the device',
            labelnames=labels)
        blocks = prometheus_client.Counter(
            'victron_blocks',
            'Number of blocks received from the device',
            labelnames=labels)

        return metrics, updated, blocks
Ejemplo n.º 7
0
    def __init__(self) -> None:
        """Find all possible sensor signatures with the current sensor config."""
        logger.debug("Getting sensor signature lookup table...")
        self.signatures = self.get_sensor_signature_lookup_table()
        logger.debug("Initiating Prometheus collector registry...")
        self.registry = prometheus_client.CollectorRegistry()

        # qwiic_build_info
        build_info = prometheus_client.Info(
            "qwiic_build",
            "Information about the qwiic_exporter itself.",
            registry=self.registry,
        )
        build_info.info({
            "version": __version__,
            "pyserial_version": serial.__version__
        })
Ejemplo n.º 8
0
    def __init__(self, bot):
        self.bot = bot
        self.bot.socket_events = prometheus_client.Counter(
            'socket_events',
            documentation='Socket events',
            namespace='life',
            labelnames=['event'])
        self.bot.counts = prometheus_client.Gauge('counts',
                                                  documentation='Life counts',
                                                  namespace='life',
                                                  labelnames=['count'])
        self.bot.stats = prometheus_client.Counter('stats',
                                                   documentation='Life stats',
                                                   namespace='life',
                                                   labelnames=['stat'])
        self.bot.info = prometheus_client.Info('misc',
                                               documentation='Life info',
                                               namespace='life')

        self.collect_stats.start()
Ejemplo n.º 9
0
                                   ['resource_type', 'validator_name'])
HBBFT_PERF = prometheus_client.Gauge(
    'validator_hbbft_perf',
    'HBBFT performance metrics from perf, only applies when in CG',
    ['resource_type', 'subtype', 'validator_name'])
CONNECTIONS = prometheus_client.Gauge('validator_connections',
                                      'Number of libp2p connections ',
                                      ['resource_type', 'validator_name'])
SESSIONS = prometheus_client.Gauge('validator_sessions',
                                   'Number of libp2p sessions',
                                   ['resource_type', 'validator_name'])
LEDGER_PENALTY = prometheus_client.Gauge(
    'validator_ledger', 'Validator performance metrics ',
    ['resource_type', 'subtype', 'validator_name'])
VALIDATOR_VERSION = prometheus_client.Info(
    'validator_version', 'Version number of the miner container',
    ['validator_name'])
BALANCE = prometheus_client.Gauge('validator_api_balance',
                                  'Balance of the validator owner account',
                                  ['validator_name'])
UPTIME = prometheus_client.Gauge('validator_container_uptime',
                                 'Time container has been at a given state',
                                 ['state_type', 'validator_name'])
miner_facts = {}


def try_int(v):
    if re.match(r"^\-?\d+$", v):
        return int(v)
    return v
import prometheus_client
import fritzconnection
import time
import os
from fritzconnection.lib.fritzstatus import FritzStatus
from prometheus_client.core import REGISTRY
from pprint import pprint
from fritzstats import FritzStats

fc = None
registry = REGISTRY

info_metric = prometheus_client.Info(
    'fritzconnection', 'Fritz!Box information', registry=registry)
uptime_metric = prometheus_client.Gauge(
    'fritzconnection_uptime', "System uptime", registry=registry)
wan_info_metric = prometheus_client.Info(
    'fritzconnection_wan', 'WAN information', registry=registry)
max_bitrate_downstream_metric = prometheus_client.Gauge('fritzconnection_wan_max_bitrate_downstream',
                                                        "Downstream max downstream", registry=registry)
max_bitrate_upstream_metric = prometheus_client.Gauge('fritzconnection_wan_max_bitrate_upstream',
                                                      "Downstream max upstream", registry=registry)
link_metric = prometheus_client.Gauge('fritzconnection_wan_physical_link_up',
                                      "Physical link status", ["link_status"], registry=registry)
wan_bytes_received_metric = prometheus_client.Gauge('fritzconnection_wan_total_bytes_received',
                                                    "WAN total bytes received", registry=registry)
wan_bytes_sent_metric = prometheus_client.Gauge('fritzconnection_wan_total_bytes_sent',
                                                "WAN total bytes sent", registry=registry)
external_ip_info_metric = prometheus_client.Info("fritzconnection_external_ip", "External IP adresses",
                                                 registry=registry)
lan_total_bytes_received_metric = prometheus_client.Gauge('fritzconnection_lan_total_bytes_received',
Ejemplo n.º 11
0
    "grandchallenge_algorithm_jobs_active_total",
    "The number of active algorithm jobs",
)
ALGORITHM_EVALUATION_JOBS_PENDING = prometheus_client.Gauge(
    "grandchallenge_algorithm_evaluation_jobs_pending_total",
    "The number of pending algorithm evaluation jobs",
)
ALGORITHM_EVALUATION_JOBS_ACTIVE = prometheus_client.Gauge(
    "grandchallenge_algorithm_evaluation_jobs_active_total",
    "The number of active algorithm evaluation jobs",
)
EVALUATION_JOBS_PENDING = prometheus_client.Gauge(
    "grandchallenge_evaluation_jobs_pending_total",
    "The number of pending evaluation jobs",
)
EVALUATION_JOBS_ACTIVE = prometheus_client.Gauge(
    "grandchallenge_evaluation_jobs_active_total",
    "The number of active evaluation jobs",
)
UPLOAD_SESSIONS_PENDING = prometheus_client.Gauge(
    "grandchallenge_upload_sessions_pending_total",
    "The number of pending upload sessions",
)
UPLOAD_SESSIONS_ACTIVE = prometheus_client.Gauge(
    "grandchallenge_upload_sessions_active_total",
    "The number of active upload sessions",
)
BUILD_VERSION = prometheus_client.Info("grandchallenge_build_version",
                                       "The build version")
BUILD_VERSION.info({"grandchallenge_commit_id": settings.COMMIT_ID})
Ejemplo n.º 12
0
if args.port is not None:
    localConf['port'] = args.port

if args.interval is not None:
    localConf['interval'] = args.interval

print('[{}] Running width {}'.format(datetime.now().strftime('%FT%H:%M%Z%z'), localConf))

instance_url = localConf['protocol'] + '://' + localConf['instance']

host = localConf['address']
port = int(localConf['port'])

interval = localConf['interval']

last_query = pc.Info('mastodon_last_query', 'Unix timestamp of the last query of the api')
version = pc.Info('mastodon_version', 'Version of the mastodon instance')
title = pc.Info('mastodon_title', 'Title of the mastodon instance')
uri = pc.Info('mastodon_uri', 'Uri of the mastodon instance')
short_description = pc.Info('mastodon_short_description', 'Short description of the mastodon instance')
description = pc.Info('mastodon_description', 'Description of the mastodon instance')
email = pc.Info('mastodon_email', 'Email of the mastodon instance')

user_count = pc.Gauge('mastodon_user_count', 'Count of the users on this instance')
status_count = pc.Gauge('mastodon_status_count', 'Count of the statuses on this instance')
domain_count = pc.Gauge('mastodon_domain_count', 'Count of the connected domains to this instance')
custom_emoji_count = pc.Gauge('mastodon_custom_emoji_count', 'Count of the custom emojis on this instance')

trends = pc.Info('mastodon_trends', 'Trends on this mastodon instance')

def collectMetrics ():
Ejemplo n.º 13
0

def app_run():
    logger.info("App starting", extra={"http_port": os.environ['HTTP_PORT']})
    app = prometheus_client.make_wsgi_app()
    httpd = make_server('', int(os.environ['HTTP_PORT']), app)
    httpd.serve_forever()


# global constants
PROMETHEUS_CONFIG = json.loads(os.environ['PROMETHEUS_CONFIG'])

# Basic metrics
# @TODO: get VERSION from somehere :P
i = prometheus_client.Info('system',
                           'Version and environment information',
                           namespace=PROMETHEUS_CONFIG['namespace'])
i.info({'version': "2002.0", 'env': os.environ['APP_ENV']})

# Defines the scrappers to run every 15 seconds
scheduler = BackgroundScheduler(timezone="UTC")
scheduler.start()

inited_metrics = {}

JOBS_CONFIG = json.loads(os.environ['JOBS_CONFIG'])


def add_scrapper(scrapper, instances):
    for instance in instances:
        interval = instance[
Ejemplo n.º 14
0
class BigipAS3RestClient(object):
    _metric_httpstatus = prometheus.metrics.Counter(
        'octavia_as3_httpstatus',
        'Number of HTTP statuses in responses to AS3 requests',
        ['method', 'statuscode'])
    _metric_post_duration = prometheus.metrics.Summary(
        'octavia_as3_post_duration',
        'Time it needs to send a POST request to AS3')
    _metric_post_exceptions = prometheus.metrics.Counter(
        'octavia_as3_post_exceptions',
        'Number of exceptions at POST requests sent to AS3')
    _metric_patch_duration = prometheus.metrics.Summary(
        'octavia_as3_patch_duration',
        'Time it needs to send a PATCH request to AS3')
    _metric_patch_exceptions = prometheus.metrics.Counter(
        'octavia_as3_patch_exceptions',
        'Number of exceptions at PATCH request sent to AS3')
    _metric_delete_duration = prometheus.metrics.Summary(
        'octavia_as3_delete_duration',
        'Time it needs to send a DELETE request to AS3')
    _metric_delete_exceptions = prometheus.metrics.Counter(
        'octavia_as3_delete_exceptions',
        'Number of exceptions at DELETE request sent to AS3')
    _metric_authorization = prometheus.metrics.Counter(
        'octavia_as3_authorization',
        'How often the F5 provider driver had to (re)authorize before performing an AS3 request'
    )
    _metric_authorization_duration = prometheus.metrics.Summary(
        'octavia_as3_authorization_duration', 'Time it needs to (re)authorize')
    _metric_authorization_exceptions = prometheus.metrics.Counter(
        'octavia_as3_authorization_exceptions',
        'Number of exceptions during (re)authorization')
    _metric_failover = prometheus.metrics.Counter(
        'octavia_as3_failover',
        'How often the F5 provider driver switched to another BigIP device')
    _metric_failover_exceptions = prometheus.metrics.Counter(
        'octavia_as3_failover_exceptions',
        'Number of exceptions during failover')
    _metric_version = prometheus.Info('octavia_as3_version', 'AS3 Version')

    def __init__(self,
                 bigip_urls,
                 enable_verify=True,
                 enable_token=True,
                 esd=None):
        self.bigips = [
            parse.urlsplit(url, allow_fragments=False) for url in bigip_urls
        ]
        # Use the first BigIP device by default
        self.active_bigip = self.bigips[0]
        self.enable_verify = enable_verify
        self.enable_token = enable_token
        self.token = None
        self.session = self._create_session()
        self.esd = esd
        try:
            info = self.info()
            info.raise_for_status()
            info_dict = dict(device=self.active_bigip.hostname, **info.json())
            self._metric_version.info(info_dict)
        except requests.exceptions.HTTPError as e:
            # Failed connecting to AS3 endpoint, gracefully terminate
            LOG.error('Could not connect to AS3 endpoint: %s', e)
            os.kill(os.getpid(), signal.SIGTERM)

    def _url(self, path):
        return parse.urlunsplit(
            parse.SplitResult(scheme=self.active_bigip.scheme,
                              netloc=self.active_bigip.hostname,
                              path=path,
                              query='',
                              fragment=''))

    def _create_session(self):
        session = requests.Session()
        retry = Retry(
            total=5,
            backoff_factor=0.3,
            status_forcelist=(500, 502, 503, 504),
        )
        session.mount('https://', HTTPAdapter(max_retries=retry))
        session.verify = self.enable_verify
        return session

    @check_response
    @authorized
    @failover_on_connection_error
    def _call_method(self, method, url, **kwargs):
        meth = getattr(self.session, method)
        response = meth(url, **kwargs)
        self._metric_httpstatus.labels(method=method,
                                       statuscode=response.status_code).inc()
        LOG.debug("%s to %s finished with %d", method,
                  self.active_bigip.hostname, response.status_code)
        return response

    @_metric_failover_exceptions.count_exceptions()
    def _failover(self):
        self._metric_failover.inc()
        for bigip in self.bigips:
            if bigip != self.active_bigip:
                LOG.debug("Failover to {}".format(bigip.hostname))
                self.active_bigip = bigip
                return
        raise exceptions.FailoverException("No BigIP to failover to")

    @_metric_authorization_exceptions.count_exceptions()
    @_metric_authorization_duration.time()
    def reauthorize(self):
        self._metric_authorization.inc()
        # Login
        credentials = {
            "username": self.active_bigip.username,
            "password": self.active_bigip.password,
            "loginProviderName": "tmos"
        }

        self.session.headers.pop('X-F5-Auth-Token', None)
        r = self.session.post(self._url(AS3_LOGIN_PATH), json=credentials)
        self._metric_httpstatus.labels(method='post',
                                       statuscode=r.status_code).inc()
        r.raise_for_status()
        self.token = r.json()['token']['token']

        self.session.headers.update({'X-F5-Auth-Token': self.token})

        patch_timeout = {"timeout": "36000"}
        r = self.session.patch(self._url(AS3_TOKENS_PATH.format(self.token)),
                               json=patch_timeout)
        self._metric_httpstatus.labels(method='patch',
                                       statuscode=r.status_code).inc()
        LOG.debug("Reauthorized!")

    @_metric_post_exceptions.count_exceptions()
    @_metric_post_duration.time()
    def post(self, **kwargs):
        LOG.debug("Calling POST with JSON %s", kwargs.get('json'))
        return self._call_method('post', self._url(AS3_DECLARE_PATH), **kwargs)

    @_metric_patch_exceptions.count_exceptions()
    @_metric_patch_duration.time()
    def patch(self, operation, path, **kwargs):
        LOG.debug("Calling PATCH %s with path %s", operation, path)
        if 'value' in kwargs:
            LOG.debug(json.dumps(kwargs['value'], indent=4, sort_keys=True))
        params = kwargs.copy()
        params.update({'op': operation, 'path': path})
        return self._call_method('patch',
                                 self._url(AS3_DECLARE_PATH),
                                 json=[params])

    @_metric_delete_exceptions.count_exceptions()
    @_metric_delete_duration.time()
    def delete(self, **kwargs):
        tenants = kwargs.get('tenants', None)
        if not tenants:
            LOG.error(
                "Delete called without tenant, would wipe all AS3 Declaration, ignoring!"
            )
            return None

        LOG.debug("Calling DELETE for tenants %s", tenants)
        url = self._url('{}/{}'.format(AS3_DECLARE_PATH, ','.join(tenants)))
        return self._call_method('delete', url)

    def info(self):
        return self._call_method('get', self._url(AS3_INFO_PATH))