示例#1
0
from flask import Flask
from prometheus_client import Gauge, CollectorRegistry, generate_latest
import helper
import logging
import app_config
import json

LOGGER = logging.getLogger(__name__)

# the flask app
app = Flask(__name__)

# create the Gauge object to track the offset change
registry = CollectorRegistry()
producerOffsetTracker = Gauge('kafka_offset',
                              'the latest available offsets',
                              ['service', 'topic', 'partition'],
                              registry=registry)
consumerGroupOffsetTracker = Gauge(
    'cg_kafka_offset',
    'the consumer group offsets',
    ['service', 'topic', 'partition', 'consumergroup'],
    registry=registry)


def readEnvConfig(env_var_name, default=""):
    env_var_val = os.environ.get(env_var_name, default)
    return env_var_val.strip()


@app.route('/ping')
def ping():
示例#2
0
class SessionMetrics(object):

    lastusers = set()
    g_sessions = Gauge('omero_sessions_active', 'Active OMERO sessions',
                       ['username'])
    g_users_total = Gauge('omero_users_total', 'Number of OMERO users',
                          ['active'])
    g_groups_total = Gauge('omero_groups_total', 'Number of OMERO groups')

    def __init__(self, client, verbose=False):
        self.client = client
        self.verbose = verbose

    @SESSION_REQUEST_TIME.time()
    def update(self):
        # https://github.com/openmicroscopy/openmicroscopy/blob/v5.4.0-m1/components/tools/OmeroPy/src/omero/plugins/sessions.py#L714

        cb = None
        try:
            cb = self.client.submit(omero.cmd.CurrentSessionsRequest(), 60,
                                    500)
            rsp = cb.loop(60, 500)
            counts = collections.Counter(
                c.userName.decode('utf-8', 'replace') for c in rsp.contexts)
            missing = self.lastusers.difference(counts.keys())
            for m in missing:
                if self.verbose:
                    print('%s: %d' % (m, 0))
                self.g_sessions.labels(m).set(0)
            for username, n in counts.iteritems():
                if self.verbose:
                    print('%s: %d' % (username, n))
                self.g_sessions.labels(username).set(n)
                self.lastusers.add(username)
        except omero.CmdError as ce:
            # Unwrap the CmdError due to failonerror=True
            raise Exception(ce.err)
        finally:
            if cb:
                cb.close(True)

        adminservice = self.client.getSession().getAdminService()

        user_group_id = adminservice.getSecurityRoles().userGroupId
        users_active = 0
        users_inactive = 0
        for user in adminservice.lookupExperimenters():
            if user_group_id in (unwrap(g.getId())
                                 for g in user.linkedExperimenterGroupList()):
                users_active += 1
            else:
                users_inactive += 1
        self.g_users_total.labels(1).set(users_active)
        self.g_users_total.labels(0).set(users_inactive)

        group_count = len(adminservice.lookupGroups())
        self.g_groups_total.set(group_count)

        if self.verbose:
            print('Users (active/inactive): %d/%d' %
                  (users_active, users_inactive))
            print('Groups: %d' % group_count)
示例#3
0
from prometheus_client import Counter
from prometheus_client import Gauge

c = Counter('my_failures', 'Description of counter')
print(c)
c.inc()  # Increment by 1
print(c)
c.inc(1.6)  # Increment by given value
print(c)
print(c.collect())

g = Gauge("my_gauge", "my description of gauge")
g.set(1)
g.inc(1)
g.dec(2)
g.track_inprogress()  # 在进入时加1,在退出时减一


@g.track_inprogress()
def f():
    pass
示例#4
0
)

LOG = logging.getLogger(__name__)

OUTPUT_BUFFER_SIZE = 8_388_608

RPKI_CLIENT_DURATION = Histogram(
    "rpkiclient_duration_seconds",
    "Time spent calling rpki-client",
    buckets=[
        1, 3, 6, 12, 18, 24, 30, 44, 60, 72, 84, 96, 108, 120, 150, 180, 240,
        300
    ],
)
RPKI_CLIENT_LAST_DURATION = Gauge(
    "rpkiclient_last_duration_seconds",
    "Duration of the last call to rpki-client",
)
RPKI_CLIENT_LAST_UPDATE = Gauge(
    "rpkiclient_last_update",
    "Timestamp of the last successful call to rpki-client",
)
RPKI_CLIENT_UPDATE_COUNT = Counter("rpkiclient_update",
                                   "Number of rpki-client updates",
                                   ["returncode"])
RPKI_CLIENT_RUNNING = Gauge("rpkiclient_running",
                            "Number of running rpki-client instances")
RPKI_OBJECTS_COUNT = Gauge("rpki_objects", "Number of objects by type",
                           ["type"])
RPKI_CLIENT_WARNINGS = Gauge("rpkiclient_warnings",
                             "Warnings from rpki-client", ["hostname", "type"])
RPKI_CLIENT_PULLING = Gauge(
示例#5
0
 def setMetricInfo(self, metricname, labels):
     newMetricName = metric_prefix+metricname
     if not (newMetricName in self.instance.metrics ):
         self.instance.metrics[newMetricName] = Gauge(newMetricName, newMetricName+" measurement metric",
                                                        labelnames=labels.keys())                                                                                    
示例#6
0
文件: metrics.py 项目: go-magma/magma
Copyright 2020 The Magma Authors.

This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

from prometheus_client import Gauge, Counter

# Gauges for current eNodeB status
STAT_ENODEB_CONNECTED = Gauge('enodeb_mgmt_connected',
                              'ENodeB management plane connected')
STAT_ENODEB_CONFIGURED = Gauge('enodeb_mgmt_configured',
                               'ENodeB is in configured state')
STAT_OPSTATE_ENABLED = Gauge('enodeb_opstate_enabled',
                             'ENodeB operationally enabled')
STAT_RF_TX_ENABLED = Gauge('enodeb_rf_tx_enabled',
                           'ENodeB RF transmitter enabled')
STAT_RF_TX_DESIRED = Gauge('enodeb_rf_tx_desired',
                           'ENodeB RF transmitter desired state')
STAT_GPS_CONNECTED = Gauge('enodeb_gps_connected', 'ENodeB GPS synchronized')
STAT_PTP_CONNECTED = Gauge('enodeb_ptp_connected',
                           'ENodeB PTP/1588 synchronized')
STAT_MME_CONNECTED = Gauge('enodeb_mme_connected', 'ENodeB connected to MME')
STAT_ENODEB_REBOOT_TIMER_ACTIVE = Gauge('enodeb_reboot_timer_active',
                                        'Timer for ENodeB reboot active')
STAT_ENODEB_REBOOTS = Counter('enodeb_reboots', 'ENodeB reboots by enodebd',
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import random
import time
# import board
import adafruit_dht
from prometheus_client import start_http_server, Gauge

# Create a metric to track time spent and requests made.
g_temperature = Gauge('dht_temperature', 'Temperature in degrees celsius',
                      ['room'])
g_humidity = Gauge('dht_humidity', 'Relative Humidity in percent', ['room'])


def update_sensor_data(dht_device, room):
    """Get sensor data and set on prometheus client."""

    try:
        temperature = dht_device.temperature
        humidity = dht_device.humidity
    except RuntimeError as error:
        # Errors happen fairly often, DHT's are hard to read, give up until next data refresh.
        # print(error.args[0])
        return

    # Valid values for AM2303 are -40 to +80C +/- 0.5
    if abs(temperature) < 100:
        g_temperature.labels(room).set('{0:0.1f}'.format(temperature))

    # Valid humidity values for AM2303 are 0 to 99.9% +/- 2%RH
示例#8
0
#!/usr/bin/python2.7
#for UDP or TCP
import sys
import time
import pcapy
import pandas as pd
from pcapfile import savefile
from collections import OrderedDict
import table
from prometheus_client import start_http_server, Gauge

g = Gauge('Test_date1', 'A test date from virtual', ['s', 'p'])
start_http_server(8003)
#g2=Gauge('Test_date2','A test date from virtual')
#g3=Gauge('Test_date3','A test date from virtual')
#g4=Gauge('Test_date4','A test date from virtual')
#g5=Gauge('Test_date5','A test date from virtual')
#g6=Gauge('Test_date6','A test date from virtual')
#g7=Gauge('Test_date7','A test date from virtual')

#
hdr_eth = OrderedDict()
hdr_ipv4 = OrderedDict()
hdr_tcp = OrderedDict()
hdr_udp = OrderedDict()
#hdr_vxlan_gpe=OrderedDict()
#hdr_vxlan_gpe_int=OrderedDict()
hdr_int_header = OrderedDict()
hdr_int_switch_id = OrderedDict()
hdr_int_ingress_port = OrderedDict()
hdr_int_ingress_ts = OrderedDict()
示例#9
0
        edis = Edistribucion(USER, PASSWORD)
        edis.login()
        meter = edis.get_meter(cups)
        print('Meter: ', meter)
        potenciaActual.labels('edistribucion').set(
            meter['data']['potenciaActual'])
        totalizador.labels('edistribucion').set(meter['data']['totalizador'])
        estadoICP.labels('edistribucion').set(
            meter['data']['estadoICP'] == "Abierto")
        potenciaContratada.labels('edistribucion').set(
            meter['data']['potenciaContratada'])
    except Exception as e:
        print(e)


kw = Gauge('preuKW_normal', 'preu €/KW tarifa ', ['tarifa'])
potenciaActual = Gauge('potenciaActual', 'potencia Actual', ['job'])
totalizador = Gauge('totalizador',
                    'total enegira consumida per perioda de facturacio',
                    ['job'])
estadoICP = Gauge('estadoICP', 'estat del ICP del contador ', ['job'])
potenciaContratada = Gauge('potenciaContratada', 'KW de potencia contractada',
                           ['job'])

start_http_server(9092)
while True:
    get_endesa_price("https://tarifaluzhora.es/")
    get_endesa_price("https://tarifaluzhora.es/?tarifa=discriminacion")
    get_endesa_price("https://tarifaluzhora.es/?tarifa=coche_electrico")
    get_edistribucion()
    time.sleep(60)
示例#10
0
from data.encryption import DecryptionFailureException
from data.model.repo_mirror import claim_mirror, release_mirror
from data.model.user import retrieve_robot_token
from data.logs_model import logs_model
from data.registry_model import registry_model
from data.database import RepoMirrorStatus
from data.model.oci.tag import delete_tag, retarget_tag, lookup_alive_tags_shallow
from notifications import spawn_notification
from util.audit import wrap_repository

from workers.repomirrorworker.repo_mirror_model import repo_mirror_model as model

logger = logging.getLogger(__name__)

unmirrored_repositories = Gauge(
    "quay_repository_rows_unmirrored",
    "number of repositories in the database that have not yet been mirrored",
)


class PreemptedException(Exception):
    """
    Exception raised if another worker analyzed the image before this worker was able to do so.
    """


class RepoMirrorSkopeoException(Exception):
    """
    Exception from skopeo.
    """
    def __init__(self, message, stdout, stderr):
        self.message = message
示例#11
0
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This is Thoth investigator consumer metrics."""

from . import __service_version__

from prometheus_client import Gauge, Counter, CollectorRegistry

registry = CollectorRegistry()

# add the application version info metric
investigator_info = Gauge(
    "investigator_consumer_info",
    "Investigator Version Info",
    labelnames=["version"],
    registry=registry,
)
investigator_info.labels(version=__service_version__).inc()

# Metrics for Kafka
in_progress = Gauge(
    "investigators_in_progress",
    "Total number of investigation messages currently being processed.",
    labelnames=["message_type"],
    registry=registry,
)
exceptions = Counter(
    "investigator_exceptions",
    "Number of investigation messages which failed to be processed.",
    labelnames=["message_type"],
#!/usr/bin/env python

from sets import Set

import logging
import os
import time

from prometheus_client import start_http_server, Gauge
from kubernetes import client, config
from kubernetes.client import ApiClient, Configuration
from openshift.dynamic import DynamicClient

MACHINE_STATUS = Gauge('machine_api_status',"1 if machine has an associated node", labelnames=['machine_name','namespace'])

# A list (implemented as a Set) of all active Machines
ACTIVE_MACHINES = Set([])

def get_machines(dynamic_client,namespace):
    """Gets all of the Machine objects from the cluster from the specified namespace.
    """
    machines = dynamic_client.resources.get(kind='Machine')
    return machines.get(namespace=namespace).items

def collect(dynamic_client, namespace):
    """
    Collect the current data from the AWS API.
    """

    # List of volumes that we've actually had data back for the API
    seen_machines = Set([])
config = default_config
if fileconf:
    config.update(fileconf)
logger.debug("Running with config %s" % config)

# initialise serial
meter = kamstrup.kamstrup(config['serialport'])

# get register var
register_var = getattr(kamstrup, config['registervar'])

# define metrics
metrics = {}
for register, name in register_var.items():
    try:
        value, unit = meter.readvar(register)
    except IndexError:
        logger.error("Register %s does not exist on the meter" % register)
        continue
    if unit == "ASCII":
        metrics[register] = Gauge('kamstrup_register_%s_%s' % (register, name),
                                  str(register), ['asciistring'])
    else:
        metrics[register] = Gauge('kamstrup_register_%s_%s' % (register, name),
                                  str(register))

start_http_server(config['webport'])

while True:
    process_request()
示例#14
0
文件: _base.py 项目: yvwvnacb/synapse
from inspect import signature
from typing import Dict, List, Tuple

from prometheus_client import Counter, Gauge

from synapse.api.errors import HttpResponseException, SynapseError
from synapse.http import RequestTimedOutError
from synapse.logging.opentracing import inject_active_span_byte_dict, trace
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import random_string

logger = logging.getLogger(__name__)

_pending_outgoing_requests = Gauge(
    "synapse_pending_outgoing_replication_requests",
    "Number of active outgoing replication requests, by replication method name",
    ["name"],
)

_outgoing_request_counter = Counter(
    "synapse_outgoing_replication_requests",
    "Number of outgoing replication requests, by replication method name and result",
    ["name", "code"],
)


class ReplicationEndpoint(metaclass=abc.ABCMeta):
    """Helper base class for defining new replication HTTP endpoints.

    This creates an endpoint under `/_synapse/replication/:NAME/:PATH_ARGS..`
    (with a `/:txn_id` suffix for cached requests), where NAME is a name,
示例#15
0
import Adafruit_DHT
import datetime
import Adafruit_BMP.BMP085 as BMP085
from prometheus_client import start_http_server, Summary, Gauge

DHT_SENSOR = Adafruit_DHT.DHT22
DHT_PIN = 4
baro_sensor = BMP085.BMP085(busnum=1)
hum_metric = Gauge('humidity', 'Current humidity')
pressure_metric = Gauge('pressure', 'Current pressure')
temp_metric = Gauge('temperature', 'Current temperature')

def do_work():
    humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
    if humidity == None:
       print("cannot read humidity")
       return
    if humidity > 100:
       print("Abnormal value for humidity")
       return
    hum_metric.set(humidity)
    pressure = baro_sensor.read_pressure()
    if pressure == None:
       return
    pressure_metric.set(pressure)
    alt = baro_sensor.read_altitude()
    baro_temp = baro_sensor.read_temperature()
    temp_metric.set(baro_temp)
    print("Date = {0}, Temp = {1:0.1f}*C, Humidity = {2:0.1f}%, Pressure = {3:0.2f} Pa, Altitide = {4:0.2f} m, Barotemp = {5:0.1f}*C".format(datetime.datetime.now(), temperature, humidity, pressure, alt, baro_temp))
    #print("Temp = {0:0.2f} *C".format(baro_sensor.read_temperature()))
    #print("Pressure = {0:0.2f} Pa".format(baro_sensor.read_pressure()))
示例#16
0
 def initialize_metrics(self):
     self.prom_metrics['info'] = Info('poseidon_version',
                                      'Info about Poseidon')
     self.prom_metrics['ipv4_table'] = Gauge(
         'poseidon_endpoint_ip_table', 'IP Table',
         ['mac', 'tenant', 'segment', 'port', 'role', 'ipv4_os', 'hash_id'])
     self.prom_metrics['roles'] = Gauge('poseidon_endpoint_roles',
                                        'Number of endpoints by role',
                                        ['role'])
     self.prom_metrics['oses'] = Gauge('poseidon_endpoint_oses',
                                       'Number of endpoints by OS',
                                       ['ipv4_os'])
     self.prom_metrics['current_states'] = Gauge(
         'poseidon_endpoint_current_states',
         'Number of endpoints by current state', ['current_state'])
     self.prom_metrics['vlans'] = Gauge('poseidon_endpoint_vlans',
                                        'Number of endpoints by VLAN',
                                        ['tenant'])
     self.prom_metrics['port_tenants'] = Gauge(
         'poseidon_endpoint_port_tenants', 'Number of tenants by port',
         ['port', 'tenant'])
     self.prom_metrics['port_hosts'] = Gauge('poseidon_endpoint_port_hosts',
                                             'Number of hosts by port',
                                             ['port'])
     self.prom_metrics['last_rabbitmq_routing_key_time'] = Gauge(
         'poseidon_last_rabbitmq_routing_key_time',
         'Epoch time when last received a RabbitMQ message',
         ['routing_key'])
     self.prom_metrics['ncapture_count'] = Counter(
         'poseidon_ncapture_count', 'Number of times ncapture ran')
     self.prom_metrics['method_runtime_secs'] = Summary(
         'poseidon_method_runtime_secs', 'Time spent in Monitor methods',
         ['method'])
     self.prom_metrics['endpoint_role_confidence_top'] = Gauge(
         'poseidon_role_confidence_top',
         'Confidence of top role prediction', [
             'mac', 'name', 'role', 'pcap_labels', 'ipv4_os',
             'ipv4_address', 'ipv6_address', 'hash_id'
         ])
     self.prom_metrics['endpoint_role_confidence_second'] = Gauge(
         'poseidon_role_confidence_second',
         'Confidence of second role prediction', [
             'mac', 'name', 'role', 'pcap_labels', 'ipv4_os',
             'ipv4_address', 'ipv6_address', 'hash_id'
         ])
     self.prom_metrics['endpoint_role_confidence_third'] = Gauge(
         'poseidon_role_confidence_third',
         'Confidence of third role prediction', [
             'mac', 'name', 'role', 'pcap_labels', 'ipv4_os',
             'ipv4_address', 'ipv6_address', 'hash_id'
         ])
     self.prom_metrics['endpoints'] = Gauge(
         'poseidon_endpoints', 'All endpoints', [
             'mac', 'tenant', 'segment', 'ether_vendor', 'controller_type',
             'controller', 'name', 'port', 'hash_id'
         ])
     self.prom_metrics['endpoint_state'] = Gauge(
         'poseidon_endpoint_state', 'State for all endpoints', [
             'mac', 'tenant', 'segment', 'ether_vendor', 'name', 'port',
             'state', 'hash_id'
         ])
     self.prom_metrics['endpoint_os'] = Gauge(
         'poseidon_endpoint_os', 'Operating System for all endpoints', [
             'mac', 'tenant', 'segment', 'ether_vendor', 'name', 'port',
             'ipv4_os', 'hash_id'
         ])
     self.prom_metrics['endpoint_role'] = Gauge(
         'poseidon_endpoint_role', 'Top role for all endpoints', [
             'mac', 'tenant', 'segment', 'ether_vendor', 'name', 'port',
             'top_role', 'hash_id'
         ])
     self.prom_metrics['endpoint_ip'] = Gauge(
         'poseidon_endpoint_ip', 'IP Address for all endpoints', [
             'mac', 'tenant', 'segment', 'ether_vendor', 'name', 'port',
             'ipv4_address', 'ipv6_address', 'ipv4_subnet', 'ipv6_subnet',
             'ipv4_rdns', 'ipv6_rdns', 'hash_id'
         ])
     self.prom_metrics['endpoint_metadata'] = Gauge(
         'poseidon_endpoint_metadata', 'Metadata for all endpoints', [
             'mac', 'tenant', 'segment', 'ether_vendor', 'prev_state',
             'next_state', 'acls', 'ignore', 'ipv4_subnet', 'ipv6_subnet',
             'ipv4_rdns', 'ipv6_rdns', 'controller_type', 'controller',
             'name', 'state', 'port', 'top_role', 'ipv4_os', 'ipv4_address',
             'ipv6_address', 'hash_id'
         ])
示例#17
0
                                   'process_count').set(process_count)
            g_container_pid.labels(container_name,
                                   'container_pid').set(container_pid)
            g_container_status.labels(container_name,
                                      'container_status').set(container_status)
        except:
            pass


if __name__ == '__main__':
    # Start up the server to expose the metrics.
    registry = CollectorRegistry()
    start_http_server(8000, registry=registry)

    g_cpu = Gauge('lxc_container_cpu_usage',
                  'Usage of lxc cpu',
                  labelnames=['container_name', 'metrictype'],
                  registry=registry)
    g_mem_usage = Gauge('lxc_container_mem_usage',
                        'Usage of lxc Memory',
                        labelnames=['container_name', 'metrictype'],
                        registry=registry)
    g_mem_usage_peak = Gauge('lxc_container_mem_usage_peak',
                             'Usage of lxc Memory Peak',
                             labelnames=['container_name', 'metrictype'],
                             registry=registry)
    g_swap_usage = Gauge('lxc_container_swap_usage',
                         'Usage of SWAP',
                         labelnames=['container_name', 'metrictype'],
                         registry=registry)
    g_swap_usage_peak = Gauge('lxc_container_swap_usage_peak',
                              'Usage of SWAP Peak',
示例#18
0
文件: reaper2.py 项目: rizart/rucio
GRACEFUL_STOP = threading.Event()

REGION = make_region().configure('dogpile.cache.memcached',
                                 expiration_time=600,
                                 arguments={
                                     'url':
                                     config_get('cache', 'url', False,
                                                '127.0.0.1:11211'),
                                     'distributed_lock':
                                     True
                                 })

DELETION_COUNTER = Counter('rucio_daemons_reaper_deletion_done',
                           'Number of deleted replicas')
EXCLUDED_RSE_GAUGE = Gauge('rucio_daemons_reaper_excluded_rses',
                           'Temporarly excluded RSEs',
                           labelnames=('rse', ))


def get_rses_to_process(rses, include_rses, exclude_rses, vos):
    """
    Return the list of RSEs to process based on rses, include_rses and exclude_rses

    :param rses:               List of RSEs the reaper should work against. If empty, it considers all RSEs.
    :param exclude_rses:       RSE expression to exclude RSEs from the Reaper.
    :param include_rses:       RSE expression to include RSEs.
    :param vos:                VOs on which to look for RSEs. Only used in multi-VO mode.
                               If None, we either use all VOs if run from "def"

    :returns: A list of RSEs to process
    """
示例#19
0
    'binderhub_launch_time_seconds',
    'Histogram of launch times',
    ['status', 'retries'],
    buckets=LAUNCH_BUCKETS,
)
BUILD_COUNT = Counter(
    'binderhub_build_count',
    'Counter of builds by repo',
    ['status', 'provider', 'repo'],
)
LAUNCH_COUNT = Counter(
    'binderhub_launch_count',
    'Counter of launches by repo',
    ['status', 'provider', 'repo'],
)
BUILDS_INPROGRESS = Gauge('binderhub_inprogress_builds',
                          'Builds currently in progress')
LAUNCHES_INPROGRESS = Gauge('binderhub_inprogress_launches',
                            'Launches currently in progress')


def _generate_build_name(build_slug, ref, prefix='', limit=63, ref_length=6):
    """Generate a unique build name with a limited character length.

    Guaranteed (to acceptable level) to be unique for a given user, repo,
    and ref.

    We really, *really* care that we always end up with the same
    'build_name' for a particular repo + ref, but the default max
    character limit for build names is 63. To meet this constraint, we
    include a prefixed hash of the user / repo in all build names and do
    some length limiting :)
示例#20
0
文件: push.py 项目: chon2/romaine
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway

registry = CollectorRegistry()
gauge_temperature = Gauge('temperature', 'Current temperature in Celsius', ['location'],
                              registry=registry)
gauge_humidity = Gauge('humidity', 'Current humidity level in %', ['location'],
                    registry=registry)

def to_gateway(location, temperature, humidity, job='romaine'):
    push_gw_svr_path = "pushgateway.romaine.ly.lv"

    gauge_temperature.labels(location).set(temperature)
    gauge_humidity.labels(location).set(humidity)

    push_to_gateway(push_gw_svr_path, job=job, registry=registry)
示例#21
0
    def write_metrics(self):
        """Write metrics in textfile."""
        registry = CollectorRegistry()

        metric_scanned_range = Gauge(
            'scanned_range',
            'Total of offset ranges scanned',
            registry=registry)
        metric_scanned_range.set(self._n_range_size)

        metric_total = Gauge(
            'metric_total',
            'Total of metric found in offset ranges scanned',
            registry=registry)
        metric_total.set(self._n_count)

        metric_total_expired = Gauge(
            'metric_expired',
            'Total of expired metric found in offset ranges scanned',
            registry=registry)
        metric_total_expired.set(self._n_count_expired)

        multiplier = 2**64 / self._n_range_size

        metric_estimated_total = Gauge(
            'metric_estimated_total',
            'Estimated total of metric in database',
            registry=registry)
        metric_estimated_total.set(int(self._n_count * multiplier))

        metric_estimated_total_expired = Gauge(
            'metric_estimated_expired',
            'Estimated total of expired metric in database',
            registry=registry)
        metric_estimated_total_expired.set(int(self._n_count_expired * multiplier))

        directories_total = Gauge(
            'directories_total',
            'Total of directories found in offset ranges scanned',
            registry=registry)
        directories_total.set(self._n_dir_count)

        directories_total_empty = Gauge(
            'directories_empty',
            'Total of empty directories found in offset ranges scanned',
            registry=registry)
        directories_total_empty.set(self._n_dir_empty)

        directories_estimated_total = Gauge(
            'directories_estimated_total',
            'Estimated total of directories in database',
            registry=registry)
        directories_estimated_total.set(int(self._n_dir_count * multiplier))

        directories_estimated_total_empty = Gauge(
            'directories_estimated_empty',
            'Estimated total of empty directories in database',
            registry=registry)
        directories_estimated_total_empty.set(int(self._n_dir_empty * multiplier))

        # Final metric dump
        write_to_textfile(self.metrics_file_path, registry)
示例#22
0
import logging
import time

from prometheus_client import Gauge

from app import app
from data import model
from data.database import UseThenDisconnect
from util.locking import GlobalLock, LockNotAcquiredException
from util.log import logfile_path
from workers.worker import Worker

logger = logging.getLogger(__name__)

repository_rows = Gauge("quay_repository_rows",
                        "number of repositories in the database")
user_rows = Gauge("quay_user_rows", "number of users in the database")
org_rows = Gauge("quay_org_rows", "number of organizations in the database")
robot_rows = Gauge("quay_robot_rows",
                   "number of robot accounts in the database")

WORKER_FREQUENCY = app.config.get("GLOBAL_PROMETHEUS_STATS_FREQUENCY", 60 * 60)


def get_repository_count(self):
    return model.repository.get_estimated_repository_count()


def get_active_user_count(self):
    return model.user.get_active_user_count()
示例#23
0
# -*- coding: utf-8 -*-


from prometheus_client import Gauge


DATA_UNBLOCK_OK = 1
DATA_UNBLOCK_ERROR = 0
DATA_UNBLOCK_EXCEPTION = -1

UnblockStatus = Gauge("suc_unblock_status",
                      "Unblock status",
                      labelnames=("task", "media"))
示例#24
0
def add_metrics(dom, header_mn, g_dict):
    try:
        labels = {'domain': dom.name()}
        if header_mn == "libvirt_cpu_stats_":
            stats = dom.getCPUStats(True)
            metric_names = stats[0].keys()
            metrics_collection = get_metrics_collections(
                metric_names, labels, stats)
            unit = "_nanosecs"

        elif header_mn == "libvirt_mem_stats_":
            stats = dom.memoryStats()
            metric_names = stats.keys()
            metrics_collection = get_metrics_collections(
                metric_names, labels, stats)
            unit = ""

        elif header_mn == "libvirt_block_stats_":

            metric_names = \
                ['read_requests_issued',
                 'read_bytes',
                 'write_requests_issued',
                 'write_bytes',
                 'errors_number']

            metrics_collection = get_metrics_multidim_collections(
                dom, metric_names, device="disk")
            unit = ""

        elif header_mn == "libvirt_interface_":

            metric_names = \
                ['read_bytes',
                 'read_packets',
                 'read_errors',
                 'read_drops',
                 'write_bytes',
                 'write_packets',
                 'write_errors',
                 'write_drops']

            metrics_collection = get_metrics_multidim_collections(
                dom, metric_names, device="interface")
            unit = ""

        for mn in metrics_collection:
            metric_name = header_mn + mn + unit
            dimensions = metrics_collection[mn]

            if metric_name not in g_dict.keys():

                metric_help = 'help'
                labels_names = metrics_collection[mn][0][1].keys()

                g_dict[metric_name] = Gauge(metric_name, metric_help,
                                            labels_names)

                for dimension in dimensions:
                    dimension_metric_value = dimension[0]
                    dimension_label_values = dimension[1].values()
                    g_dict[metric_name].labels(
                        *dimension_label_values).set(dimension_metric_value)
            else:
                for dimension in dimensions:
                    dimension_metric_value = dimension[0]
                    dimension_label_values = dimension[1].values()
                    g_dict[metric_name].labels(
                        *dimension_label_values).set(dimension_metric_value)
    except Exception as e:
        print(e)
    return g_dict
示例#25
0
 def setMetricInfo(self, metricname, labels):
     metricNameAnomaly = metric_prefix+metricname+"_anomaly"
     if not (metricNameAnomaly in self.instance.metrics ):
         self.instance.metrics[metricNameAnomaly] = Gauge(metricNameAnomaly, metricNameAnomaly+" anomaly timestamp",  
                 labelnames=labels.keys())                                                                                        
示例#26
0
MAX_CONCURRENT_REQS = 100

MINION_MANAGER_HOSTNAME = "http://minion-manager.kube-system"
MINION_MANAGER_PORT = "6000"

kubectl = KubernetesApiClient(use_proxy=True)
cluster_name_id = os.getenv("AX_CLUSTER_NAME_ID", None)
asg_manager = AXUserASGManager(os.getenv("AX_CLUSTER_NAME_ID"),
                               AXClusterConfig().get_region())

# Need a lock to serialize cluster config operation
cfg_lock = RLock()

axmon_api_latency_stats = Summary("axmon_api_latency", "Latency for axmon REST APIs",
                              ["method", "endpoint", "status"])
axmon_api_concurrent_reqs = Gauge("axmon_api_concurrent_reqs", "Concurrent requests in axmon")


def before_request():
    request.start_time = time.time()
    global concurrent_reqs, MAX_CONCURRENT_REQS, concurrent_reqs_lock
    with concurrent_reqs_lock:
        axmon_api_concurrent_reqs.set(concurrent_reqs)
        # Disabling concurrent request logic for now due to findings in AA-3167
        #if concurrent_reqs >= MAX_CONCURRENT_REQS:
        #    return ax_make_response(
        #        original_jsonify(result="too many concurrent requests (max {})".format(MAX_CONCURRENT_REQS)), 429
        #    )
        concurrent_reqs += 1

示例#27
0
"""Som Model Adapter - Working with custom implementation of SOM."""
import logging
import uuid
import numpy as np
from anomaly_detector.adapters import BaseModelAdapter
from anomaly_detector.decorator.utils import latency_logger
from anomaly_detector.events import AnomalyEvent
from anomaly_detector.exception import FactStoreEnvVarNotSetException, \
    ModelLoadException, ModelSaveException
from anomaly_detector.model import SOMPYModel, W2VModel
import os
from prometheus_client import Gauge, Counter, Histogram
from urllib.parse import quote

ANOMALY_COUNT = Gauge("aiops_lad_anomaly_count", "count of anomalies runs",
                      ['anomaly_status'])
ANOMALY_SCORE = Gauge("aiops_lad_anomaly_avg_score", "avg anomaly score")
LOG_LINES_COUNT = Gauge("aiops_lad_loglines_count",
                        "count of log lines processed runs")
FALSE_POSITIVE_COUNT = Counter("aiops_lad_false_positive_count",
                               "count of false positives processed runs",
                               ['id'])
ANOMALY_HIST = Histogram("aiops_hist", "histogram of anomalies runs")
THRESHOLD = Gauge("aiops_lad_threshold", "Threshold of marker for anomaly")


class SomModelAdapter(BaseModelAdapter):
    """Self organizing map custom logic to train model. Includes logic to train and predict anomalies in logs."""
    def __init__(self, storage_adapter):
        """Init storage provider which provides config and storage interface with storage systems."""
        self.storage_adapter = storage_adapter
示例#28
0
# (c) Oleg Plaxin 2018
# [email protected]

from requests_html import HTMLSession
from prometheus_client import start_http_server, Gauge
import sys
import time
import os
import configparser

config_name = "Settings"
vkapiuri_tag = "VKAPIURI"
accesstoken_tag = "ACCESSTOKEN"
version_tag = "VKAPIVERSION"

user_state = Gauge('vk_user_state', 'Description of summary', ['id'])


def check_user(ids):
    TAG = "user checker"

    user_ids = ",".join(str(e) for e in ids)
    get_link = vk_api_link + "users.get?user_ids=" + str(user_ids) + "&fields=sex,online,last_seen&access_token=" + \
               access_token + "&v=" + v + "&lang=ru"

    ms = int(round(time.time() * 1000))
    content = HTMLSession().get(get_link)
    json = content.json()
    countMillis = int(round(time.time() * 1000)) - ms

    sex = [["была в сети", "был в сети"], ["в сети", "в сети"]]
示例#29
0
 def __init__(self, registry: CollectorRegistry = None):
     self.registry: CollectorRegistry = registry or \
                                        CollectorRegistry(
                                            auto_describe=True)
     # Buckets: 5 seconds, 10 seconds, 20 seconds, 30 seconds,
     #          45 seconds, 1 minute, 1.5 minutes, 2 minutes,
     #          3 minutes, 4 minutes, 5 minutes, 6 minutes,
     #          8 minutes, 10 minutes, 12 minutes, 15 minutes
     #          20 minutes, 25 minutes, 30 minutes
     # used for both worker launch time and worker update time
     histogram_buckets = [
         5, 10, 20, 30, 45, 60, 90, 120, 180, 240, 300, 360, 480, 600,
         720, 900, 1200, 1500, 1800
     ]
     # Buckets: .01 seconds to 1000 seconds.
     # Used for autoscaler update time.
     update_time_buckets = [.01, .1, 1, 10, 100, 1000]
     self.worker_create_node_time: Histogram = Histogram(
         "worker_create_node_time_seconds",
         "Worker launch time. This is the time it takes for a call to "
         "a node provider's create_node method to return. Note that "
         "when nodes are launched in batches, the launch time for that "
         "batch will be observed once for *each* node in that batch. "
         "For example, if 8 nodes are launched in 3 minutes, a launch "
         "time of 3 minutes will be observed 8 times.",
         unit="seconds",
         namespace="autoscaler",
         registry=self.registry,
         buckets=histogram_buckets)
     self.worker_update_time: Histogram = Histogram(
         "worker_update_time_seconds",
         "Worker update time. This is the time between when an updater "
         "thread begins executing and when it exits successfully. This "
         "metric only observes times for successful updates.",
         unit="seconds",
         namespace="autoscaler",
         registry=self.registry,
         buckets=histogram_buckets)
     self.update_time: Histogram = Histogram(
         "update_time",
         "Autoscaler update time. This is the time for an autoscaler "
         "update iteration to complete.",
         unit="seconds",
         namespace="autoscaler",
         registry=self.registry,
         buckets=update_time_buckets)
     self.pending_nodes: Gauge = Gauge(
         "pending_nodes",
         "Number of nodes pending to be started.",
         unit="nodes",
         namespace="autoscaler",
         registry=self.registry)
     self.started_nodes: Counter = Counter(
         "started_nodes",
         "Number of nodes started.",
         unit="nodes",
         namespace="autoscaler",
         registry=self.registry)
     self.stopped_nodes: Counter = Counter(
         "stopped_nodes",
         "Number of nodes stopped.",
         unit="nodes",
         namespace="autoscaler",
         registry=self.registry)
     self.updating_nodes: Gauge = Gauge(
         "updating_nodes",
         "Number of nodes in the process of updating.",
         unit="nodes",
         namespace="autoscaler",
         registry=self.registry)
     self.recovering_nodes: Gauge = Gauge(
         "recovering_nodes",
         "Number of nodes in the process of recovering.",
         unit="nodes",
         namespace="autoscaler",
         registry=self.registry)
     self.running_workers: Gauge = Gauge(
         "running_workers",
         "Number of worker nodes running.",
         unit="nodes",
         namespace="autoscaler",
         registry=self.registry)
     self.failed_create_nodes: Counter = Counter(
         "failed_create_nodes",
         "Number of nodes that failed to be created due to an "
         "exception in the node provider's create_node method.",
         unit="nodes",
         namespace="autoscaler",
         registry=self.registry)
     self.failed_updates: Counter = Counter(
         "failed_updates",
         "Number of failed worker node updates.",
         unit="updates",
         namespace="autoscaler",
         registry=self.registry)
     self.successful_updates: Counter = Counter(
         "successful_updates",
         "Number of succesfful worker node updates.",
         unit="updates",
         namespace="autoscaler",
         registry=self.registry)
     self.failed_recoveries: Counter = Counter(
         "failed_recoveries",
         "Number of failed node recoveries.",
         unit="recoveries",
         namespace="autoscaler",
         registry=self.registry)
     self.successful_recoveries: Counter = Counter(
         "successful_recoveries",
         "Number of successful node recoveries.",
         unit="recoveries",
         namespace="autoscaler",
         registry=self.registry)
     self.update_loop_exceptions: Counter = Counter(
         "update_loop_exceptions",
         "Number of exceptions raised in the update loop of the "
         "autoscaler.",
         unit="exceptions",
         namespace="autoscaler",
         registry=self.registry)
     self.node_launch_exceptions: Counter = Counter(
         "node_launch_exceptions",
         "Number of exceptions raised while launching nodes.",
         unit="exceptions",
         namespace="autoscaler",
         registry=self.registry)
     self.reset_exceptions: Counter = Counter(
         "reset_exceptions",
         "Number of exceptions raised while resetting the autoscaler.",
         unit="exceptions",
         namespace="autoscaler",
         registry=self.registry)
     self.config_validation_exceptions: Counter = Counter(
         "config_validation_exceptions",
         "Number of exceptions raised while validating the config "
         "during a reset.",
         unit="exceptions",
         namespace="autoscaler",
         registry=self.registry)
     self.drain_node_exceptions: Counter = Counter(
         "drain_node_exceptions",
         "Number of exceptions raised when making a DrainNode rpc"
         "prior to node termination.",
         unit="exceptions",
         namespace="autoscaler",
         registry=self.registry)
示例#30
0
 def test_gauge(self):
     g = Gauge('gg', 'A gauge', registry=self.registry)
     g.set(17)
     self.assertEqual(
         b'# HELP gg A gauge\n# TYPE gg gauge\ngg 17.0\n# EOF\n',
         generate_latest(self.registry))