Beispiel #1
0
import cloudpickle
from flask import Flask, request, jsonify, Response
import prometheus_client
from prometheus_client import Counter, Histogram

REQUEST_COUNT = Counter('request_count', 'App Request Count', [])
REQUEST_LATENCY = Histogram('request_latency_seconds', 'Request latency', [])

app = Flask(__name__)
with open('model.pkl', 'rb') as f:
    model = cloudpickle.load(f)


@app.route('/predict', methods=["POST"])
@REQUEST_LATENCY.time()
def predict():
    REQUEST_COUNT.inc()
    req = request.json
    print(req)
    return jsonify(model.predict(req))


CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8')


@app.route('/metrics', methods=["GET"])
def metrics():
    return Response(prometheus_client.generate_latest(),
                    mimetype=CONTENT_TYPE_LATEST)

Beispiel #2
0
    PersistedEventPosition,
    RoomStreamToken,
    StreamKeyType,
    StreamToken,
    UserID,
)
from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
from synapse.util.metrics import Measure
from synapse.visibility import filter_events_for_client

if TYPE_CHECKING:
    from synapse.server import HomeServer

logger = logging.getLogger(__name__)

notified_events_counter = Counter("synapse_notifier_notified_events", "")

users_woken_by_stream_counter = Counter(
    "synapse_notifier_users_woken_by_stream", "", ["stream"])

T = TypeVar("T")


# TODO(paul): Should be shared somewhere
def count(func: Callable[[T], bool], it: Iterable[T]) -> int:
    """Return the number of items in it for which func returns true."""
    n = 0
    for x in it:
        if func(x):
            n += 1
    return n
Beispiel #3
0
import requests
import json
import datetime
from flask import Flask, Response, jsonify
from flask_apscheduler import APScheduler
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from prometheus_client import make_wsgi_app, Counter, Gauge

app = Flask(__name__)
app.wsgi_app = DispatcherMiddleware(app.wsgi_app,
                                    {'/metrics': make_wsgi_app()})

count = Counter('counter', 'Count of requests')
block = Gauge('latest_block', 'Bitcoin latest block')
price = Gauge('latest_price', 'Bitcoin latest price')
volume = Gauge('trade_volume', 'Estimated transaction volume (BTC)')


@app.route('/')
def hello_world():
    r = requests.get('https://api.blockchain.info/stats')
    response = r.json()

    latest_block = response['n_blocks_total']
    block.set(latest_block)

    latest_price = response['market_price_usd']
    price.set(latest_price)

    trade_volume = response['trade_volume_btc']
    volume.set(trade_volume)
Beispiel #4
0
            user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
            yield user

            sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
            sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
            yield sys


REGISTRY.register(CPUMetrics())


#
# Federation Metrics
#

sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "")

events_processed_counter = Counter("synapse_federation_client_events_processed", "")

event_processing_loop_counter = Counter(
    "synapse_event_processing_loop_count", "Event processing loop iterations", ["name"]
)

event_processing_loop_room_count = Counter(
    "synapse_event_processing_loop_room_count",
    "Rooms seen per event processing loop iteration",
    ["name"],
)


# Used to track where various components have processed in the event stream,
Beispiel #5
0
import time
import prometheus_client
from prometheus_client.core import CollectorRegistry
from prometheus_client import Summary, Counter, Histogram, Gauge

_INF = float("inf")

graphs = {}
graphs['c'] = Counter('python_request_operations_total',
                      'The total number of processed requests')
graphs['h'] = Histogram('python_request_duration_seconds',
                        'Histogram for the duration in seconds.',
                        buckets=(1, 2, 5, 6, 10, _INF))


def count_process():
    start = time.time()
    graphs['c'].inc()

    time.sleep(0.600)
    end = time.time()
    graphs['h'].observe(end - start)
    RequestSendFailed,
)
from synapse.events import EventBase
from synapse.federation.units import Edu
from synapse.handlers.presence import format_user_presence_state
from synapse.metrics import sent_transactions_counter
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage import UserPresenceState
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter

# This is defined in the Matrix spec and enforced by the receiver.
MAX_EDUS_PER_TRANSACTION = 100

logger = logging.getLogger(__name__)

sent_edus_counter = Counter("synapse_federation_client_sent_edus",
                            "Total number of EDUs successfully sent")

sent_edus_by_type = Counter(
    "synapse_federation_client_sent_edus_by_type",
    "Number of sent EDUs successfully sent, by event type",
    ["type"],
)


class PerDestinationQueue(object):
    """
    Manages the per-destination transmission queues.

    Args:
        hs (synapse.HomeServer):
        transaction_sender (TransactionManager):
Beispiel #7
0
from synapse.replication.http.federation import (
    ReplicationFederationSendEduRestServlet,
    ReplicationGetQueryRestServlet,
)
from synapse.types import get_domain_from_id
from synapse.util import glob_to_regex
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.caches.response_cache import ResponseCache

# when processing incoming transactions, we try to handle multiple rooms in
# parallel, up to this limit.
TRANSACTION_CONCURRENCY_LIMIT = 10

logger = logging.getLogger(__name__)

received_pdus_counter = Counter("synapse_federation_server_received_pdus", "")

received_edus_counter = Counter("synapse_federation_server_received_edus", "")

received_queries_counter = Counter(
    "synapse_federation_server_received_queries", "", ["type"])


class FederationServer(FederationBase):
    def __init__(self, hs):
        super(FederationServer, self).__init__(hs)

        self.auth = hs.get_auth()
        self.handler = hs.get_handlers().federation_handler

        self._server_linearizer = Linearizer("fed_server")
Beispiel #8
0
import grpc
import greeter_pb2_grpc
import greeter_pb2
import prometheus_client
from prometheus_client import Counter

from flask import Response, Flask, request, make_response
from interceptor import header_adder_interceptor
from log import init_log

init_log()
app = Flask(__name__)

requests_total = Counter("request_count", "Total request cout of the host")
channel = grpc.insecure_channel('test-greeter:50051')

header_interceptor = header_adder_interceptor(request)

intercept_channel = grpc.intercept_channel(channel, header_interceptor)

greeter_client = greeter_pb2_grpc.GreeterStub(intercept_channel)

retry_count = {}


@app.route("/metrics")
def requests_count():
    return Response(prometheus_client.generate_latest(requests_total),
                    mimetype="text/plain")

Beispiel #9
0
from rucio.daemons.conveyor.common import submit_transfer, bulk_group_transfer, get_conveyor_rses, USER_ACTIVITY
from rucio.db.sqla.constants import RequestState

try:
    from ConfigParser import NoOptionError  # py2
except Exception:
    from configparser import NoOptionError  # py3

graceful_stop = threading.Event()

USER_TRANSFERS = config_get('conveyor', 'user_transfers', False, None)
TRANSFER_TOOL = config_get('conveyor', 'transfertool', False, None)
TRANSFER_TYPE = config_get('conveyor', 'transfertype', False, 'single')

GET_TRANSFERS_COUNTER = Counter(
    'rucio_daemons_conveyor_submitter_get_transfers',
    'Number of transfers retrieved')


def submitter(once=False,
              rses=None,
              mock=False,
              bulk=100,
              group_bulk=1,
              group_policy='rule',
              source_strategy=None,
              activities=None,
              sleep_time=600,
              max_sources=4,
              retry_other_fts=False):
    """
Beispiel #10
0
from flask import request, redirect
from sqlalchemy import or_, func
from sqlalchemy.exc import IntegrityError
from algoliasearch.exceptions import AlgoliaUnreachableHostException, AlgoliaException
from app.api import bp
from app.api.auth import is_user_oc_member, authenticate
from app.api.validations import validate_resource, requires_body
from app.models import Language, Resource, Category, Key
from app import Config, db, index
from dateutil import parser
from datetime import datetime
from prometheus_client import Counter, Summary
import app.utils as utils

# Metrics
failures_counter = Counter('my_failures', 'Number of exceptions raised')
latency_summary = Summary('request_latency_seconds', 'Length of request')

logger = utils.setup_logger('routes_logger')


# Routes
@latency_summary.time()
@failures_counter.count_exceptions()
@bp.route('/resources', methods=['GET'], endpoint='get_resources')
def resources():
    return get_resources()


@latency_summary.time()
@failures_counter.count_exceptions()
Beispiel #11
0
    COMMAND_MAP,
    VALID_CLIENT_COMMANDS,
    VALID_SERVER_COMMANDS,
    ErrorCommand,
    NameCommand,
    PingCommand,
    PositionCommand,
    RdataCommand,
    ReplicateCommand,
    ServerCommand,
    SyncCommand,
    UserSyncCommand,
)
from .streams import STREAMS_MAP

connection_close_counter = Counter(
    "synapse_replication_tcp_protocol_close_reason", "", ["reason_type"])

# A list of all connected protocols. This allows us to send metrics about the
# connections.
connected_connections = []


logger = logging.getLogger(__name__)


PING_TIME = 5000
PING_TIMEOUT_MULTIPLIER = 5
PING_TIMEOUT_MS = PING_TIME * PING_TIMEOUT_MULTIPLIER


class ConnectionStates(object):
Beispiel #12
0
from insert_realtime_data.src.crawl_one_minute_stick import run as stick_run
from insert_realtime_data.src.crawl_five_minutes_exchange import run as exchange_run
from insert_realtime_data.src.crawl_brokers import run as broker_exchange_run
from insert_realtime_data.src.crawl_cmoney import run as cmoney_run


tpe = pytz.timezone('Asia/Taipei')


scheduler = AsyncIOScheduler()

collectors = list(REGISTRY._collector_to_names.keys())
for collector in collectors:
    REGISTRY.unregister(collector)

total_one_minute_stick = Counter('crawl_one_minute_k_stick', 'Total one minute k stick count')
total_five_seconds_exchange = Counter('crawl_five_seconds_exchange', 'Total five seconds exchange')
total_brokers_exchange = Counter('crawl_brokers', 'Total brokers')
total_tradersum = Counter('total_tradersum', 'crawl total_tradersum')
total_stock_info = Counter('total_stock_info', 'crawl total_stock_info')
total_stock_revenue = Counter('total_stock_revenue', 'crawl total_stock_revenue')
total_reinvestment = Counter('total_reinvestment', 'crawl total_reinvestment')
total_per_pbr = Counter('total_per_pbr', 'crawl total_per_pbr')
total_balance_sheet = Counter('total_balance_sheet', 'crawl total_balance_sheet')
total_income_statement = Counter('total_income_statement', 'crawl total_income_statement')
total_cash_flow = Counter('total_cash_flow', 'crawl total_cash_flow')
total_financial_ratios = Counter('total_financial_ratios', 'crawl total_financial_ratios')


def get_application():
    _app = FastAPI(title=settings.PROJECT_NAME, openapi_url=f'{settings.API_V1_STR}/openapi.json')
Beispiel #13
0
from synapse.metrics.background_process_metrics import (
    run_as_background_process,
    wrap_as_background_process,
)
from synapse.types import JsonDict, ReadReceipt, RoomStreamToken
from synapse.util import Clock
from synapse.util.metrics import Measure

if TYPE_CHECKING:
    from synapse.events.presence_router import PresenceRouter
    from synapse.server import HomeServer

logger = logging.getLogger(__name__)

sent_pdus_destination_dist_count = Counter(
    "synapse_federation_client_sent_pdu_destinations:count",
    "Number of PDUs queued for sending to one or more destinations",
)

sent_pdus_destination_dist_total = Counter(
    "synapse_federation_client_sent_pdu_destinations:total",
    "Total number of PDUs queued for sending across all destinations",
)

# Time (in s) after Synapse's startup that we will begin to wake up destinations
# that have catch-up outstanding.
CATCH_UP_STARTUP_DELAY_SEC = 15

# Time (in s) to wait in between waking up each destination, i.e. one destination
# will be woken up every <x> seconds after Synapse's startup until we have woken
# every destination has outstanding catch-up.
CATCH_UP_STARTUP_INTERVAL_SEC = 5
Beispiel #14
0
    '--tz', dest='tz',
    help="Timezone used by the celery app."
)
parser.add_argument(
    '--queue',
    dest='queues',
    action='append',
    help='Celery queues to check length for'
)

# We have these counters because sometimes we might not
# be able to find out the queue time or runtime of a task,
# so we can't mark an observation in the histogram
task_submissions = Counter(
    'celery_task_submissions',
    'number of times a task has been submitted',
    ['task_name', 'exchange'],
)
task_completions = Counter(
    'celery_task_completions',
    'number of times a task has been completed',
    ['task_name', 'state', 'exchange'],
)
task_queuetime_seconds = Histogram(
    'celery_task_queuetime_seconds',
    'number of seconds spent in queue for celery tasks',
    ['task_name', 'exchange'],
    buckets=(
        .005, .01, .025, .05, .075, .1, .25, .5,
        .75, 1.0, 2.5, 5.0, 7.5, 10.0, 100.0, float('inf')
    )
Beispiel #15
0
from typing import Any, Callable, Optional, Type, TypeVar, cast

from prometheus_client import Counter
from typing_extensions import Protocol

from synapse.logging.context import (
    ContextResourceUsage,
    LoggingContext,
    current_context,
)
from synapse.metrics import InFlightGauge
from synapse.util import Clock

logger = logging.getLogger(__name__)

block_counter = Counter("synapse_util_metrics_block_count", "", ["block_name"])

block_timer = Counter("synapse_util_metrics_block_time_seconds", "",
                      ["block_name"])

block_ru_utime = Counter("synapse_util_metrics_block_ru_utime_seconds", "",
                         ["block_name"])

block_ru_stime = Counter("synapse_util_metrics_block_ru_stime_seconds", "",
                         ["block_name"])

block_db_txn_count = Counter("synapse_util_metrics_block_db_txn_count", "",
                             ["block_name"])

# seconds spent waiting for db txns, excluding scheduling time, in this block
block_db_txn_duration = Counter(
Beispiel #16
0
 def test_counter(self):
     c = Counter('cc', 'A counter', registry=self.registry)
     c.inc()
     self.assertEqual(b'# HELP cc A counter\n# TYPE cc counter\ncc 1.0\n',
                      generate_latest(self.registry))
Beispiel #17
0
class Stats(object):
    metrics_label_names = [
        'tenant', 'namespace', 'function', 'instance_id', 'cluster'
    ]

    TOTAL_PROCESSED = 'pulsar_function_processed_total'
    TOTAL_SUCCESSFULLY_PROCESSED = 'pulsar_function_processed_successfully_total'
    TOTAL_SYSTEM_EXCEPTIONS = 'pulsar_function_system_exceptions_total'
    TOTAL_USER_EXCEPTIONS = 'pulsar_function_user_exceptions_total'
    PROCESS_LATENCY_MS = 'pulsar_function_process_latency_ms'
    LAST_INVOCATION = 'pulsar_function_last_invocation'
    TOTAL_RECEIVED = 'pulsar_function_received_total'

    # Declare Prometheus
    stat_total_processed = Counter(TOTAL_PROCESSED,
                                   'Total number of messages processed.',
                                   metrics_label_names)
    stat_total_processed_successfully = Counter(
        TOTAL_SUCCESSFULLY_PROCESSED,
        'Total number of messages processed successfully.',
        metrics_label_names)
    stat_total_sys_exceptions = Counter(TOTAL_SYSTEM_EXCEPTIONS,
                                        'Total number of system exceptions.',
                                        metrics_label_names)
    stat_total_user_exceptions = Counter(TOTAL_USER_EXCEPTIONS,
                                         'Total number of user exceptions.',
                                         metrics_label_names)

    stat_process_latency_ms = Summary(PROCESS_LATENCY_MS,
                                      'Process latency in milliseconds.',
                                      metrics_label_names)

    stat_last_invocation = Gauge(
        LAST_INVOCATION,
        'The timestamp of the last invocation of the function.',
        metrics_label_names)

    stat_total_received = Counter(
        TOTAL_RECEIVED, 'Total number of messages received from source.',
        metrics_label_names)

    latest_user_exception = []
    latest_sys_exception = []

    def add_user_exception(self):
        self.latest_sys_exception.append(
            (traceback.format_exc(), int(time.time() * 1000)))
        if len(self.latest_sys_exception) > 10:
            self.latest_sys_exception.pop(0)

    def add_sys_exception(self):
        self.latest_sys_exception.append(
            (traceback.format_exc(), int(time.time() * 1000)))
        if len(self.latest_sys_exception) > 10:
            self.latest_sys_exception.pop(0)

    def reset(self, metrics_labels):
        self.latest_user_exception = []
        self.latest_sys_exception = []
        self.stat_total_processed.labels(*metrics_labels)._value.set(0.0)
        self.stat_total_processed_successfully.labels(
            *metrics_labels)._value.set(0.0)
        self.stat_total_user_exceptions.labels(*metrics_labels)._value.set(0.0)
        self.stat_total_sys_exceptions.labels(*metrics_labels)._value.set(0.0)
        self.stat_process_latency_ms.labels(*metrics_labels)._sum.set(0.0)
        self.stat_process_latency_ms.labels(*metrics_labels)._count.set(0.0)
        self.stat_last_invocation.labels(*metrics_labels).set(0.0)
        self.stat_total_received.labels(*metrics_labels)._value.set(0.0)
Beispiel #18
0
 def test_unicode(self):
     c = Counter('cc', '\u4500', ['l'], registry=self.registry)
     c.labels('\u4500').inc()
     self.assertEqual(
         b'# HELP cc \xe4\x94\x80\n# TYPE cc counter\ncc{l="\xe4\x94\x80"} 1.0\n',
         generate_latest(self.registry))
Beispiel #19
0
MEM_TOTAL = Gauge('mem_total', 'memory total')
MEM_AVAILABLE = Gauge('mem_available', 'memory available')
MEM_USED = Gauge('mem_used', 'memory used')
MEM_FREE = Gauge('mem_free', 'memory free')
DISK_PERCENT = Gauge(
    'disk_percent', 'Percent of disk space used for the '
    'volume mounted at root')
BYTES_SENT = Gauge('bytes_sent', 'System-wide network I/O bytes sent')
BYTES_RECEIVED = Gauge('bytes_received',
                       'System-wide network I/O bytes received')
TEMPERATURE = Gauge('temperature', 'Temperature readings from system sensors',
                    ['sensor'])
CHECKIN_STATUS = Gauge('checkin_status',
                       '1 for checkin success, and 0 for failure')
BOOTSTRAP_EXCEPTION = Counter('bootstrap_exception',
                              'Count for exceptions raised by bootstrapper',
                              ['cause'])
UNEXPECTED_SERVICE_RESTARTS = Counter('unexpected_service_restarts',
                                      'Count of unexpected restarts',
                                      ['service_name'])
UNATTENDED_UPGRADE_STATUS = Gauge(
    'unattended_upgrade_status', 'Unattended Upgrade update status'
    '1 for active, 0 for inactive')

SERVICE_RESTART_STATUS = Counter('service_restart_status',
                                 'Count of service restarts',
                                 ['service_name', 'status'])


def _get_ping_params(config):
    ping_params = []
Beispiel #20
0
 def test_escaping(self):
     c = Counter('cc', 'A\ncount\\er', ['a'], registry=self.registry)
     c.labels('\\x\n"').inc(1)
     self.assertEqual(
         b'# HELP cc A\\ncount\\\\er\n# TYPE cc counter\ncc{a="\\\\x\\n\\""} 1.0\n',
         generate_latest(self.registry))
Beispiel #21
0
from prometheus_client import Gauge, Info, Counter

GROW_INFO = Info('growing', 'Grow info')
AIR_TEMPERATURE = Gauge('air_temperature', 'Air temperature')
AIR_HUMIDITY = Gauge('air_humidity', 'Air humidity')
WATER_LEVEL = Gauge('water_level', 'Water level')
PI_TEMPERATURE = Gauge('pi_temperature', 'Raspberry PI CPU temperature')
LIGHT_BRIGHTNESS = Gauge('light_brightness', 'Light brightness')
FAN_SPEED = Gauge('fan_speed', 'Fan speed')
TARGET_TEMPERATURE = Gauge('target_temperature', 'Target temperature')
TARGET_HUMIDITY = Gauge('target_humidity', 'Target humidity')
HUMIDIFIER_USAGE = Counter('humidifier_usage', 'Humidifier usage')
OUTSIDE_AIR_TEMPERATURE = Gauge('outside_air_temperature',
                                'Outside air temperature')
OUTSIDE_AIR_HUMIDITY = Gauge('outside_air_humidity', 'Outside air humidity')
Beispiel #22
0
from synapse.util.async_helpers import Linearizer
from synapse.util.caches import CacheMetric, register_cache
from synapse.util.caches.descriptors import lru_cache
from synapse.util.caches.lrucache import LruCache
from synapse.util.metrics import measure_func

from ..storage.state import StateFilter
from .push_rule_evaluator import PushRuleEvaluatorForEvent

if TYPE_CHECKING:
    from synapse.server import HomeServer

logger = logging.getLogger(__name__)

push_rules_invalidation_counter = Counter(
    "synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter",
    "")
push_rules_state_size_counter = Counter(
    "synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter", "")

# Measures whether we use the fast path of using state deltas, or if we have to
# recalculate from scratch
push_rules_delta_state_cache_metric = register_cache(
    "cache",
    "push_rules_delta_state_cache_metric",
    cache=[],  # Meaningless size, as this isn't a cache that stores values
    resizable=False,
)

STATE_EVENT_TYPES_TO_MARK_UNREAD = {
    EventTypes.Topic,
Beispiel #23
0
 def _counter(self, var, var_help, labels):
     return Counter(var, var_help, labels, registry=self._reg)  # pylint: disable=unexpected-keyword-arg
Beispiel #24
0
app = Flask('__main__')
app.logger.setLevel(logging.INFO)

CART = os.getenv('CART_HOST', 'cart')
USER = os.getenv('USER_HOST', 'user')
CART_URL = 'http://' + CART + ':8080'
CART_URL = 'https://172.17.0.1/api/v1/web/guest/robotshop/cart'
USER_URL = 'http://' + USER + ':8080'
USER_URL = 'https://172.17.0.1/api/v1/web/guest/robotshop/user'
os.environ['SHOP_PAYMENT_PORT'] = '8082'
PAYMENT_GATEWAY = os.getenv('PAYMENT_GATEWAY', 'https://paypal.com/')

# Prometheus
PromMetrics = {}
PromMetrics['SOLD_COUNTER'] = Counter('sold_count',
                                      'Running count of items sold')
PromMetrics['AUS'] = Histogram('units_sold',
                               'Avergae Unit Sale',
                               buckets=(1, 2, 5, 10, 100))
PromMetrics['AVS'] = Histogram('cart_value',
                               'Avergae Value Sale',
                               buckets=(100, 200, 500, 1000, 2000, 5000,
                                        10000))


@app.errorhandler(Exception)
def exception_handler(err):
    app.logger.error(str(err))
    return str(err), 500

import cgi
import simplejson as json
import logging
import random
import sys
import urllib
from six.moves.urllib import parse as urlparse
from six import string_types

from prometheus_client import Counter

logger = logging.getLogger(__name__)
outbound_logger = logging.getLogger("synapse.http.outbound")

outgoing_requests_counter = Counter(
    "synapse_http_matrixfederationclient_requests", "", ["method"])
incoming_responses_counter = Counter(
    "synapse_http_matrixfederationclient_responses", "", ["method", "code"])

MAX_LONG_RETRIES = 10
MAX_SHORT_RETRIES = 3


class MatrixFederationEndpointFactory(object):
    def __init__(self, hs):
        self.tls_server_context_factory = hs.tls_server_context_factory

    def endpointForURI(self, uri):
        destination = uri.netloc

        return matrix_federation_endpoint(
Beispiel #26
0
    def export_defaults(self,
                        buckets=None,
                        group_by='path',
                        prefix='flask',
                        app=None,
                        **kwargs):
        """
        Export the default metrics:
            - HTTP request latencies
            - Number of HTTP requests

        :param buckets: the time buckets for request latencies
            (will use the default when `None`)
        :param group_by: group default HTTP metrics by
            this request property, like `path`, `endpoint`, `rule`, etc.
            (defaults to `path`)
        :param prefix: prefix to start the default metrics names with
            or `NO_PREFIX` (to skip prefix)
        :param app: the Flask application
        """

        if app is None:
            app = self.app or current_app

        if not prefix:
            prefix = self._defaults_prefix or 'flask'

        # use the default buckets from prometheus_client if not given here
        buckets_as_kwargs = {}
        if buckets is not None:
            buckets_as_kwargs['buckets'] = buckets

        if kwargs.get('group_by_endpoint') is True:
            warnings.warn(
                'The `group_by_endpoint` argument of '
                '`PrometheusMetrics.export_defaults` is deprecated since 0.4.0, '
                'please use the new `group_by` argument.', DeprecationWarning)

            duration_group = 'endpoint'

        elif group_by:
            duration_group = group_by

        else:
            duration_group = 'path'

        if callable(duration_group):
            duration_group_name = duration_group.__name__

        else:
            duration_group_name = duration_group

        if prefix == NO_PREFIX:
            prefix = ""
        else:
            prefix = prefix + "_"

        additional_labels = self._static_labels.items()

        histogram = Histogram('%shttp_request_duration_seconds' % prefix,
                              'Flask HTTP request duration in seconds',
                              ('method', duration_group_name, 'status') +
                              tuple(map(lambda kv: kv[0], additional_labels)),
                              registry=self.registry,
                              **buckets_as_kwargs)

        counter = Counter('%shttp_request_total' % prefix,
                          'Total number of HTTP requests',
                          ('method', 'status') +
                          tuple(map(lambda kv: kv[0], additional_labels)),
                          registry=self.registry)

        self.info('%sexporter_info' % prefix,
                  'Information about the Prometheus Flask exporter',
                  version=self.version,
                  **self._static_labels)

        def before_request():
            request.prom_start_time = default_timer()

        def after_request(response):
            if hasattr(request, 'prom_do_not_track'):
                return response

            if hasattr(request, 'prom_start_time'):
                total_time = max(default_timer() - request.prom_start_time, 0)

                if callable(duration_group):
                    group = duration_group(request)
                else:
                    group = getattr(request, duration_group)

                histogram.labels(request.method, group, response.status_code,
                                 *map(lambda kv: kv[1],
                                      additional_labels)).observe(total_time)

            counter.labels(request.method, response.status_code,
                           *map(lambda kv: kv[1], additional_labels)).inc()

            return response

        app.before_request(before_request)
        app.after_request(after_request)
Beispiel #27
0
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

from prometheus_client import Counter, Histogram

# Counters for Diameter/S6a application
S6A_AUTH_SUCCESS_TOTAL = Counter(
    's6a_auth_success',
    'Total successful S6a auth requests',
)
S6A_AUTH_FAILURE_TOTAL = Counter(
    's6a_auth_failure',
    'Total failed S6a auth requests with reason',
    ['code'],
)
S6A_LUR_TOTAL = Counter(
    's6a_location_update',
    'Total S6a location update requests',
)

DIAMETER_AUTHENTICATION_REJECTED = 4001
DIAMETER_ERROR_USER_UNKNOWN = 5001
DIAMETER_AUTHORIZATION_REJECTED = 5003
# Counters for Diameter base application
Beispiel #28
0
# Set environment variables
REDIS_HOST = os.environ['REDIS_HOST']
REDIS_PASSWORD = os.environ['REDIS_PASSWORD']

app = Flask(__name__)

# intialize logger
json_logging.init_flask(enable_json=True)
json_logging.init_request_instrument(app)
logger = logging.getLogger("flask-counter")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))

cache = redis.Redis(host=REDIS_HOST, port=6379, password=REDIS_PASSWORD)
c = Counter("redis_calls", "Number of calls to redis", ["status"])


def get_hit_count():
    retries = 5
    while True:
        try:
            hits = cache.incr('hits')
            c.labels(status="success").inc()
            return hits
        except redis.exceptions.ConnectionError as exc:
            if retries == 0:
                c.labels(status="failure").inc()
                raise exc
            retries -= 1
            time.sleep(0.5)
"""Som Model Adapter - Working with custom implementation of SOM."""
import logging
import uuid
import numpy as np
from anomaly_detector.adapters import BaseModelAdapter
from anomaly_detector.decorator.utils import latency_logger
from anomaly_detector.exception import ModelLoadException, ModelSaveException
from anomaly_detector.model import SOMPYModel, W2VModel
import os
from prometheus_client import Gauge, Counter, Histogram
from urllib.parse import quote

ANOMALY_COUNT = Gauge("aiops_lad_anomaly_count", "count of anomalies runs", ['anomaly_status'])
ANOMALY_SCORE = Gauge("aiops_lad_anomaly_avg_score", "avg anomaly score")
LOG_LINES_COUNT = Gauge("aiops_lad_loglines_count", "count of log lines processed runs")
FALSE_POSITIVE_COUNT = Counter("aiops_lad_false_positive_count", "count of false positives processed runs", ['id'])
ANOMALY_HIST = Histogram("aiops_hist", "histogram of anomalies runs")
THRESHOLD = Gauge("aiops_lad_threshold", "Threshold of marker for anomaly")


class SomModelAdapter(BaseModelAdapter):
    """Self organizing map custom logic to train model. Includes logic to train and predict anomalies in logs."""

    def __init__(self, storage_adapter):
        """Init storage provider which provides config and storage interface with storage systems."""
        self.storage_adapter = storage_adapter
        update_model = self.storage_adapter.TRAIN_UPDATE_MODEL
        self.update_model = os.path.isfile(self.storage_adapter.MODEL_PATH) and update_model
        self.update_w2v_model = os.path.isfile(self.storage_adapter.W2V_MODEL_PATH) and update_model
        self.recreate_models = False
        self.model = SOMPYModel(config=storage_adapter.config)
Beispiel #30
0
from prometheus_client import Counter

counter_one = Counter('arbitrary_counter_one',
                      'number of times button clicked')
counter_two = Counter('arbitrary_counter_two',
                      'number of times button clicked time two')


def increment_counter_one():
    counter_one.inc()


def get_current_counter_one_value():
    return counter_one._value.get()


def increment_counter_two():
    counter_two.inc(2)


def get_current_counter_two_value():
    return counter_two._value.get()