Example #1
0
class PPPoESessionCollector(BaseCollector):
    METRICS_SUBSYSTEM = 'pppoe'
    ENDPOINT = 'session'

    def __init__(self, client: Client):
        super().__init__(client)

        self._session = Info(namespace=self.METRICS_NAMESPACE,
                             subsystem=self.METRICS_SUBSYSTEM,
                             name='session',
                             documentation='Information about PPPoE Session')

        self._mtu = Gauge(namespace=self.METRICS_NAMESPACE,
                          subsystem=self.METRICS_SUBSYSTEM,
                          name='mtu',
                          documentation='The MTU of the PPPoE')

    def _process_data(self, data):
        mtu = data['MTU']
        del data['MTU']
        if mtu:
            self._mtu.set(mtu)
        else:
            self._mtu.set(-1)

        self._session.info(data)
def main():
    """Start exporter web server and refresh bios information"""

    # Parse Arguments
    parser = argparse.ArgumentParser(description='BIOS Prometheus Exporter')
    parser.add_argument('--port',
                        dest='port',
                        default=8000,
                        type=int,
                        action='store',
                        help='Set listening port')
    args = parser.parse_args()

    # Catch signals
    signal.signal(signal.SIGINT, exit_gracefully)
    signal.signal(signal.SIGTERM, exit_gracefully)

    # Create metric
    bios = Info('bios', 'Node BIOS Information')

    # Start Listener
    start_http_server(args.port)
    print("Listening on port", args.port, end='', flush=True)

    # Update BIOS Information
    while True:
        bios.info(get_bios_info())
        print('.', end='', flush=True)
        sleep(30)
class SystemInfoUpdater(ExporterPeriodicTask):
    def __init__(self, rpc):
        super(SystemInfoUpdater, self).__init__(rpc, 5 * 60)
        self._info = Info('polkadot_system', 'Polkadot system information')
        self._runtime_info = Info('polkadot_runtime',
                                  'Polkadot runtime information')

    def _perform_internal(self):
        self._info.info({
            'name':
            self._rpc.request('system_name')['result'],
            'version':
            self._rpc.request('system_version')['result'],
            'chain':
            self._rpc.request('system_chain')['result'],
        })

        runtime = self._rpc.request('state_getRuntimeVersion')['result']
        for key in list(runtime):
            if key not in ("authoringVersion", "implName", "implVersion",
                           "specName", "specVersion"):
                runtime.pop(key)
            else:
                runtime[key] = str(runtime[key])

        self._runtime_info.info(runtime)
Example #4
0
def start_prometheus_server(port):
    hostname = socket.gethostname()
    log.debug(
        f"[prometheus] Start web server: 0.0.0.0:{port} (Host:{hostname})")
    start_http_server(port)
    prometheus_info = Info("synology_exporter", "Synology Prometheus exporter")
    prometheus_info.info({"version": exporter_version, "running_on": hostname})
    log.info(f"[prometheus] Web server started: {hostname}:{port}")
Example #5
0
def init_http_server(port):
    hostname = socket.gethostname()
    log.debug(
        f"Start Prometheus web server: 0.0.0.0:{port} (Host:{hostname}) ..")
    start_http_server(port)
    prometheus_info = Info("mi_flower_exporter",
                           "Mi-Flower Prometheus exporter")
    prometheus_info.info({"version": VERSION, "running_on": hostname})
    log.info(f"Prometheus web server started: {hostname}:{port}")
Example #6
0
class ModuleCollector(BaseCollector):
    METRICS_SUBSYSTEM = 'module'

    def __init__(self, client: Client):
        super().__init__(client)

        self._info = Info(namespace=self.METRICS_NAMESPACE,
                          name=self.METRICS_SUBSYSTEM,
                          documentation='Firmware version information')

    def _process_data(self, data):
        self._info.info(data)
Example #7
0
def metrics():
    registry = CollectorRegistry()
    multiprocess.MultiProcessCollector(registry)

    ProcessCollector(registry=registry)
    PlatformCollector(registry=registry)

    i = Info('build_info', "The build information", registry=registry)
    i.info({"version": "1,2,3"})

    data = generate_latest(registry)
    return Response(data, mimetype=CONTENT_TYPE_LATEST)
Example #8
0
 def prometheus_solid(context):
     i = Info(
         "my_build_version",
         "Description of info",
         registry=context.resources.prometheus.registry,
     )
     info_labels = {"version": "1.2.3", "buildhost": "foo@bar"}
     i.info(info_labels)
     metric = None
     for metric in context.resources.prometheus.registry.collect():
         if metric.name == "my_build_version":
             break
     assert metric and metric.samples[0].labels == info_labels
Example #9
0
 def prometheus_solid(context):
     i = Info(
         'my_build_version',
         'Description of info',
         registry=context.resources.prometheus.registry,
     )
     info_labels = {'version': '1.2.3', 'buildhost': 'foo@bar'}
     i.info(info_labels)
     metric = None
     for metric in context.resources.prometheus.registry.collect():
         if metric.name == 'my_build_version':
             break
     assert metric and metric.samples[0].labels == info_labels
Example #10
0
def init_http_server(port):
    hostname = socket.gethostname()
    log.debug(f"Start Prometheus web server: {hostname}:{port} ..")

    start_http_server(port)

    prometheus_info = Info('cinder_exporter',
                           'Openstack Cinder Prometheus exporter')
    prometheus_info.info({
        'version': cinder_exporter.__version__,
        'running_on': hostname
    })

    log.info(f"Prometheus web server started: {hostname}:{port}")
Example #11
0
class LteCollector(BaseCollector):
    METRICS_SUBSYSTEM = 'lte'
    ENDPOINT = 'lteinfo'

    def __init__(self, client: Client):
        super().__init__(client)

        self._device_info = Info(namespace=self.METRICS_NAMESPACE,
                                 subsystem=self.METRICS_SUBSYSTEM,
                                 name='device',
                                 documentation='LTE Device Information')
        self._connection_info = Info(namespace=self.METRICS_NAMESPACE,
                                     subsystem=self.METRICS_SUBSYSTEM,
                                     name='connection',
                                     documentation='LTE Cell Information')

        self._rsrp = Gauge(namespace=self.METRICS_NAMESPACE,
                           subsystem=self.METRICS_SUBSYSTEM,
                           name='rsrp',
                           documentation='LTE RSRP')
        self._rsrq = Gauge(namespace=self.METRICS_NAMESPACE,
                           subsystem=self.METRICS_SUBSYSTEM,
                           name='rsrq',
                           documentation='LTE RSRQ')

    # noinspection SpellCheckingInspection
    def _process_data(self, data):
        self._device_info.info({
            'imei': data['imei'],
            'imsi': data['imsi'],
            'device_status': data['device_status'],
            'card_status': data['card_status'],
            'antenna_mode': data['antenna_mode'],
        })

        self._connection_info.info({
            'phycellid': data['phycellid'],
            'cellid': data['cellid'],
            'tac': data['tac'],
            'service_status': data['service_status'],
            'eps': data['eps']
        })

        self._rsrp.set(data['rsrp'])
        self._rsrq.set(data['rsrq'])
Example #12
0
def init_prom(port, http_server):
    """ (Str) -> None
    Initialize Prometheus exporter and start the http server exposing
    the metrics.
    Parameters:
      port: The http port for the client
      http_sever: for the moment only twisted
    """
    if http_server == 'twisted':
        # start the exporter http server with twisted
        from prometheus_client import start_http_server
        start_http_server(int(port))
    else:
        print(f'{http_server} is not supoorted. supported: twisted')
        sys.exit(1)
    i = Info('purestorage_exporter',
             'Purestorage Flasharray Prometheus exporter')
    i.info({'version': purestoragefa_exporter.__version__,
            'running_on': socket.gethostname()})
Example #13
0
    def __init__(self,
                 name: str,
                 config_dict,
                 loglvl=logging.INFO,
                 setup_logs=True,
                 monitoring=True):
        if setup_logs:
            # Setup Logging
            logs_dir = config_dict["GENERAL"].get("LOGS_DIR", fallback="")

            logging.basicConfig(format=LOGGING_FORMAT,
                                level=loglvl,
                                filename=os.path.join(logs_dir,
                                                      f"{name}-bot.log"))

            # Log also to stdout
            stream_log_handler = logging.StreamHandler()
            stream_log_handler.setFormatter(logging.Formatter(LOGGING_FORMAT))
            logging.getLogger().addHandler(stream_log_handler)

        if name not in [
                "signal", "threema", "telegram", "interactive", "twitter",
                "mastodon", "instagram", "messenger", "facebook", "feedback",
                "matrix"
        ]:
            raise ValueError(
                f"Invalid messenger interface was requested: {name}")

        self.name = name
        self.config = config_dict

        monitor_port = self.config.getint(name.upper(),
                                          "PROMETHEUS_PORT",
                                          fallback=0)
        if monitoring and monitor_port > 0:
            try:
                prometheus_client.start_http_server(monitor_port, '0.0.0.0')
            except OSError as e:
                logging.error("Error while starting Prometheus Endpoint",
                              exc_info=e)
            i = Info('platform', 'Bot Platform')
            i.info({'platform': self.name})
Example #14
0
import time
from typing import Tuple
import datetime
import uuid
from prometheus_client import Counter, Gauge, Histogram, Info , Summary
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Match
from starlette.types import ASGIApp

from .logging import logger

RELEASE_DATE = Info("release_date","Date of last server update")
RELEASE_DATE.info({'last_release': datetime.datetime.now().strftime("%m/%d/%Y"),})

REQUESTS = Counter(
    "server_requests_total",
    "Total count of requests by path.",
    ["path"]
)
RESPONSES = Counter(
    "server_responses_total",
    "Total count of responses by path and status codes.",
    ["path", "status_code"],
)

REQUESTS_PROCESSING_TIME = Histogram(
    "server_requests_processing_time_seconds",
    "Histogram of requests processing time by path",
Example #15
0
def metrics():
    REGISTRY = CollectorRegistry()

    SYSTEM_INFO = Info('awx_system', 'AWX System Information', registry=REGISTRY)
    ORG_COUNT = Gauge('awx_organizations_total', 'Number of organizations', registry=REGISTRY)
    USER_COUNT = Gauge('awx_users_total', 'Number of users', registry=REGISTRY)
    TEAM_COUNT = Gauge('awx_teams_total', 'Number of teams', registry=REGISTRY)
    INV_COUNT = Gauge('awx_inventories_total', 'Number of inventories', registry=REGISTRY)
    PROJ_COUNT = Gauge('awx_projects_total', 'Number of projects', registry=REGISTRY)
    JT_COUNT = Gauge('awx_job_templates_total', 'Number of job templates', registry=REGISTRY)
    WFJT_COUNT = Gauge('awx_workflow_job_templates_total', 'Number of workflow job templates', registry=REGISTRY)
    HOST_COUNT = Gauge(
        'awx_hosts_total',
        'Number of hosts',
        [
            'type',
        ],
        registry=REGISTRY,
    )
    SCHEDULE_COUNT = Gauge('awx_schedules_total', 'Number of schedules', registry=REGISTRY)
    INV_SCRIPT_COUNT = Gauge('awx_inventory_scripts_total', 'Number of invetory scripts', registry=REGISTRY)
    USER_SESSIONS = Gauge(
        'awx_sessions_total',
        'Number of sessions',
        [
            'type',
        ],
        registry=REGISTRY,
    )
    CUSTOM_VENVS = Gauge('awx_custom_virtualenvs_total', 'Number of virtualenvs', registry=REGISTRY)
    RUNNING_JOBS = Gauge('awx_running_jobs_total', 'Number of running jobs on the Tower system', registry=REGISTRY)
    PENDING_JOBS = Gauge('awx_pending_jobs_total', 'Number of pending jobs on the Tower system', registry=REGISTRY)
    STATUS = Gauge(
        'awx_status_total',
        'Status of Job launched',
        [
            'status',
        ],
        registry=REGISTRY,
    )

    INSTANCE_CAPACITY = Gauge(
        'awx_instance_capacity',
        'Capacity of each node in a Tower system',
        [
            'hostname',
            'instance_uuid',
        ],
        registry=REGISTRY,
    )
    INSTANCE_CPU = Gauge(
        'awx_instance_cpu',
        'CPU cores on each node in a Tower system',
        [
            'hostname',
            'instance_uuid',
        ],
        registry=REGISTRY,
    )
    INSTANCE_MEMORY = Gauge(
        'awx_instance_memory',
        'RAM (Kb) on each node in a Tower system',
        [
            'hostname',
            'instance_uuid',
        ],
        registry=REGISTRY,
    )
    INSTANCE_INFO = Info(
        'awx_instance',
        'Info about each node in a Tower system',
        [
            'hostname',
            'instance_uuid',
        ],
        registry=REGISTRY,
    )
    INSTANCE_LAUNCH_TYPE = Gauge(
        'awx_instance_launch_type_total',
        'Type of Job launched',
        [
            'node',
            'launch_type',
        ],
        registry=REGISTRY,
    )
    INSTANCE_STATUS = Gauge(
        'awx_instance_status_total',
        'Status of Job launched',
        [
            'node',
            'status',
        ],
        registry=REGISTRY,
    )
    INSTANCE_CONSUMED_CAPACITY = Gauge(
        'awx_instance_consumed_capacity',
        'Consumed capacity of each node in a Tower system',
        [
            'hostname',
            'instance_uuid',
        ],
        registry=REGISTRY,
    )
    INSTANCE_REMAINING_CAPACITY = Gauge(
        'awx_instance_remaining_capacity',
        'Remaining capacity of each node in a Tower system',
        [
            'hostname',
            'instance_uuid',
        ],
        registry=REGISTRY,
    )

    LICENSE_INSTANCE_TOTAL = Gauge('awx_license_instance_total', 'Total number of managed hosts provided by your license', registry=REGISTRY)
    LICENSE_INSTANCE_FREE = Gauge('awx_license_instance_free', 'Number of remaining managed hosts provided by your license', registry=REGISTRY)

    license_info = get_license()
    SYSTEM_INFO.info(
        {
            'install_uuid': settings.INSTALL_UUID,
            'insights_analytics': str(settings.INSIGHTS_TRACKING_STATE),
            'tower_url_base': settings.TOWER_URL_BASE,
            'tower_version': get_awx_version(),
            'license_type': license_info.get('license_type', 'UNLICENSED'),
            'license_expiry': str(license_info.get('time_remaining', 0)),
            'pendo_tracking': settings.PENDO_TRACKING_STATE,
            'external_logger_enabled': str(settings.LOG_AGGREGATOR_ENABLED),
            'external_logger_type': getattr(settings, 'LOG_AGGREGATOR_TYPE', 'None'),
        }
    )

    LICENSE_INSTANCE_TOTAL.set(str(license_info.get('instance_count', 0)))
    LICENSE_INSTANCE_FREE.set(str(license_info.get('free_instances', 0)))

    current_counts = counts(None)

    ORG_COUNT.set(current_counts['organization'])
    USER_COUNT.set(current_counts['user'])
    TEAM_COUNT.set(current_counts['team'])
    INV_COUNT.set(current_counts['inventory'])
    PROJ_COUNT.set(current_counts['project'])
    JT_COUNT.set(current_counts['job_template'])
    WFJT_COUNT.set(current_counts['workflow_job_template'])

    HOST_COUNT.labels(type='all').set(current_counts['host'])
    HOST_COUNT.labels(type='active').set(current_counts['active_host_count'])

    SCHEDULE_COUNT.set(current_counts['schedule'])
    INV_SCRIPT_COUNT.set(current_counts['custom_inventory_script'])
    CUSTOM_VENVS.set(current_counts['custom_virtualenvs'])

    USER_SESSIONS.labels(type='all').set(current_counts['active_sessions'])
    USER_SESSIONS.labels(type='user').set(current_counts['active_user_sessions'])
    USER_SESSIONS.labels(type='anonymous').set(current_counts['active_anonymous_sessions'])

    all_job_data = job_counts(None)
    statuses = all_job_data.get('status', {})
    for status, value in statuses.items():
        STATUS.labels(status=status).set(value)

    RUNNING_JOBS.set(current_counts['running_jobs'])
    PENDING_JOBS.set(current_counts['pending_jobs'])

    instance_data = instance_info(None, include_hostnames=True)
    for uuid, info in instance_data.items():
        hostname = info['hostname']
        INSTANCE_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['capacity'])
        INSTANCE_CPU.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['cpu'])
        INSTANCE_MEMORY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['memory'])
        INSTANCE_CONSUMED_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['consumed_capacity'])
        INSTANCE_REMAINING_CAPACITY.labels(hostname=hostname, instance_uuid=uuid).set(instance_data[uuid]['remaining_capacity'])
        INSTANCE_INFO.labels(hostname=hostname, instance_uuid=uuid).info(
            {
                'enabled': str(instance_data[uuid]['enabled']),
                'last_isolated_check': getattr(instance_data[uuid], 'last_isolated_check', 'None'),
                'managed_by_policy': str(instance_data[uuid]['managed_by_policy']),
                'version': instance_data[uuid]['version'],
            }
        )

    instance_data = job_instance_counts(None)
    for node in instance_data:
        # skipping internal execution node (for system jobs)
        if node == '':
            continue
        types = instance_data[node].get('launch_type', {})
        for launch_type, value in types.items():
            INSTANCE_LAUNCH_TYPE.labels(node=node, launch_type=launch_type).set(value)
        statuses = instance_data[node].get('status', {})
        for status, value in statuses.items():
            INSTANCE_STATUS.labels(node=node, status=status).set(value)

    return generate_latest(registry=REGISTRY)
Example #16
0
def metrics():
    lst = []
    #print (globals().keys())
    registry = CollectorRegistry()
    url = BASE_URL + "/nifi-api/access/token"
    token = None
    if None != USERNAME:
        token = getToken(url, USERNAME, PASSWORD)

    #### ====  cluster nodes status ==== ####
    url = BASE_URL + "/nifi-api/controller/cluster"
    cluster = getCluster(url, token)
    for item in cluster:
        NodeName = {"instance": item['address']}
        nodeStatus = Gauge('nifi_nodes_status',
                           'Nifi node status',
                           NodeName.keys(),
                           registry=registry)
        activeThreadCount = Gauge('nifi_node_activeThreadCount',
                                  'Total number of activeThreadCount per node',
                                  NodeName.keys(),
                                  registry=registry)
        queuedItems = Gauge('nifi_node_queuedItems',
                            'Number of queud items per node',
                            NodeName.keys(),
                            registry=registry)

        nodeStatus.labels(**NodeName).set(convertStatus(item['status']))
        lst.append(prometheus_client.generate_latest(nodeStatus))

        activeThreadCount.labels(**NodeName).set(item['activeThreadCount'])
        lst.append(prometheus_client.generate_latest(activeThreadCount))
        flow = float(item['queued'].rsplit(' ')[0].replace(',', ''))
        queuedItems.labels(**NodeName).set(flow)
        lst.append(prometheus_client.generate_latest(queuedItems))

        registry = CollectorRegistry()

    #### ====  general cluster info ==== ####
    url = BASE_URL + "/nifi-api/flow/about"
    flowAbout = about(url, token)
    nifiVersion = Info('nifi_cluster_version',
                       'This is the version',
                       registry=registry)
    nifiVersion.info({
        "title": flowAbout['title'],
        "version": flowAbout['version'],
        "timezone": flowAbout['timezone']
    })
    lst.append(prometheus_client.generate_latest(nifiVersion))

    #### ====  general cluster status ==== ####
    url = BASE_URL + "/nifi-api/flow/status"
    generalFlow = getFlow(url, token)
    with open('cluster_status.csv', mode="r") as csv_file:
        csv_reader = csv.reader(csv_file, delimiter=',')
        line_num = 0
        for line in csv_reader:
            if line_num != 0:
                if line[0] in globals().keys():
                    del globals()[line[0]]
                globals()[line[0]] = Gauge(line[1], line[2], registry=registry)
                globals()[line[0]].set(generalFlow[line[0]])
                lst.append(
                    prometheus_client.generate_latest(globals()[line[0]]))
            line_num += 1
    csv_file.close

    url = BASE_URL + "/nifi-api/process-groups/root/process-groups"
    nifi_group = getProcessorFlow(url, token)
    for PG in nifi_group:
        processorName = {"processorName": PG['component']['name']}
        processorQueue = Gauge('nifi_amount_items_queued',
                               'Total number of items queued by the processor',
                               processorName.keys(),
                               registry=registry)
        aggregateSnapshot_queued = float(
            PG['status']['aggregateSnapshot']['queued'].rsplit(' ')[0].replace(
                ',', ''))
        processorQueue.labels(**processorName).set(aggregateSnapshot_queued)
        lst.append(prometheus_client.generate_latest(processorQueue))

        registry = CollectorRegistry()

    return Response(lst, mimetype=CONTENT_TYPE_LATEST)
Example #17
0
from AmqpConnection import AmqpConnection
from RuntimeConfig import getRuntimeConfig
from prometheus import startProm
from prometheus_client import Info

import sys
import time
from time import sleep

METRIC_PROM = Info('custodian_prom', 'Prometheus info')

argParser = argparse.ArgumentParser()
args = argParser.parse_args()

config = getRuntimeConfig()
METRIC_PROM.info({'port': str(config.promPort), 'only': str(config.promOnly)})


def on_timeout():
    global amqpConnection
    amqpConnection.close()


def newAmqpConnection(config):
    logger.info("Connecting to:", config.amqpHost, config.amqpQueue)

    amqpConnection = AmqpConnection(host=config.amqpHost, connect=False)
    amqpConnection.queue = config.amqpQueue
    amqpConnection.connect()
    amqpConnection.setPingReply("upsilon-custodian", "development",
                                "db, amqp, custodian")
Example #18
0
    def GET(self, date):
        validateAccessToken()
        web_logger.mes()
        file_path = ""

        # Allow origin
        web.header('Access-Control-Allow-Origin', '*')
        web.header('Access-Control-Allow-Credentials', 'true')

        # checks if any date has been specified, otherwise looks for the most recent statistics
        if (date != "last-month"):
            if self.__dates_regex.match(date):
                search = self.__dates_regex.search(date)

                month_from = search.group(2)
                year_from = search.group(1)
                month_to = search.group(4)
                year_to = search.group(3)

                if year_from > year_to or (year_from == year_to
                                           and month_from > month_to):
                    raise web.HTTPError(
                        "400", {"Content-Type": "text/plain"},
                        "Bad date provided, the ending date is lower than the beginning date."
                    )

                registry = CollectorRegistry()

                # Counter of accesses to different endpoints oc
                http_requests = Counter(
                    'opencitations_http_requests',
                    'Counter for HTTP requests to opencitations endpoints',
                    ['endpoint'],
                    registry=registry)

                # Aggregate counter of accesses to the different categories of endpoints oc
                agg_counter = Counter(
                    'opencitations_agg_counter',
                    'Aggregate HTTP requests counter to opencitations endpoints',
                    ['category'],
                    registry=registry)
                i = Info('opencitations_date',
                         'Date to which the statistics refers to',
                         registry=registry)
                i.info({
                    'month_from': str(month_from),
                    'year_from': str(year_from),
                    "month_to": str(month_to),
                    'year_to': str(year_to)
                })

                indexed_records = Gauge('opencitations_indexed_records',
                                        'Indexed records',
                                        registry=registry)
                harvested_data_sources = Gauge(
                    'opencitations_harvested_data_sources',
                    'Harvested data sources',
                    registry=registry)

                current_month = int(month_from)
                current_year = int(year_from)
                target_month = int(month_to)
                target_year = int(year_to)

                while (True):
                    # For each month collects the statistics and adds
                    # them to the ones to be returned.
                    while (True):
                        current_month_str = str(current_month)
                        if len(current_month_str) == 1:
                            current_month_str = '0' + current_month_str
                        file_path = path.join(
                            c["stats_dir"], "oc-" + str(current_year) + "-" +
                            current_month_str + ".prom")
                        if path.isfile(file_path):
                            f = open(file_path, 'r')
                            families = text_fd_to_metric_families(f)
                            for family in families:
                                for sample in family.samples:
                                    if sample[
                                            0] == "opencitations_agg_counter_total":
                                        agg_counter.labels(**sample[1]).inc(
                                            sample[2])
                                    if sample[
                                            0] == "opencitations_http_requests_total":
                                        http_requests.labels(**sample[1]).inc(
                                            sample[2])
                                    if sample[
                                            0] == "opencitations_indexed_records":
                                        indexed_records.set(sample[2])
                                    if sample[
                                            0] == "opencitations_harvested_data_sources":
                                        harvested_data_sources.set(sample[2])

                        # If we reaches the target year and the month we are visiting is the last one
                        # or if we visited the whole year i.e. the last month has just been visited
                        # exit the months's loop
                        if (current_year == target_year and current_month >=
                                target_month) or current_month == 12:
                            break
                        current_month += 1

                    # If we visited all the years than we exit the years's loop
                    if (current_year == target_year):
                        break
                    current_year += 1
                    current_month = 1

                return generate_latest(registry)
            else:
                file_name = "oc-" + date + ".prom"
                if self.__file_regex.match(file_name):
                    file_path = path.join(c["stats_dir"], file_name)
                    if not path.isfile(file_path):
                        file_path = ''
                else:
                    raise web.HTTPError(
                        "400", {"Content-Type": "text/plain"},
                        "Bad date format the required one is: year-month or year-month_year-month."
                    )
        else:
            max_year = 0
            max_month = 0
            for file in listdir(c["stats_dir"]):
                if self.__file_regex.match(file):
                    groups = self.__file_regex.search(file).groups()
                    # checks that the file respects the format in the name
                    year = int(groups[0])
                    month = int(groups[1])
                    if year > max_year or (year == max_year
                                           and month > max_month):
                        max_year = year
                        max_month = month
                        file_path = path.join(c["stats_dir"], file)

        # if the statistics file was found then it returns the content
        if file_path != "":
            web.header('Content-Type', "document")
            f = open(file_path, 'r')
            content = f.read()
            f.close()
            web.ctx.status = '200 OK'
            return content
        else:
            raise web.HTTPError("404", {"Content-Type": "text/plain"},
                                "No statistics found.")
from time import time
import pkg_resources

from pyramid.tweens import EXCVIEW
from prometheus_client import start_http_server, Histogram, Gauge, Summary, Info

_VERSION = pkg_resources.get_distribution("scielo-documentstore").version

APP_INFO = Info(
    "kernel",
    "Info about this instance of the application",
)
APP_INFO.info({"version": _VERSION})

REQUEST_DURATION_SECONDS = Histogram(
    "kernel_restfulapi_request_duration_seconds",
    "Time spent processing HTTP requests",
    ["handler", "method"],
)
REQUESTS_INPROGRESS = Gauge(
    "kernel_restfulapi_requests_inprogress",
    "Current number of HTTP requests being processed",
)
RESPONSE_SIZE_BYTES = Summary(
    "kernel_restfulapi_response_size_bytes",
    "Summary of response size for HTTP requests",
    ["handler"],
)


def tween_factory(handler, registry):
Example #20
0
from prometheus_client import Counter, Info, generate_latest as prom_generate_latest
from lbry import __version__ as version
from lbry.build_info import BUILD, COMMIT_HASH
from lbry.wallet.server import util
import lbry.wallet.server.version as wallet_server_version

NAMESPACE = "wallet_server"

VERSION_INFO = Info('build',
                    'Wallet server build info (e.g. version, commit hash)',
                    namespace=NAMESPACE)
VERSION_INFO.info({
    'build':
    BUILD,
    "commit":
    COMMIT_HASH,
    'version':
    version,
    "min_version":
    util.version_string(wallet_server_version.PROTOCOL_MIN),
})
REQUESTS_COUNT = Counter("requests_count",
                         "Number of requests received",
                         namespace=NAMESPACE,
                         labelnames=("method", ))


class PrometheusServer:
    def __init__(self):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.runner = None
Example #21
0
import socket

from prometheus_client import Counter, Gauge, Histogram, Info

# METRICS
NODE_HOST_NAME = socket.gethostname()

SS_NODE_INFO = Info("ss_node", "ss node info")
SS_NODE_INFO.info({"ss_node_name": NODE_HOST_NAME})

CONNECTION_MADE_COUNT = Counter(
    "connection_made_count",
    "shadowsocks connection made number",
    labelnames=[
        "ss_node",
    ],
)
CONNECTION_MADE_COUNT = CONNECTION_MADE_COUNT.labels(ss_node=NODE_HOST_NAME)


ACTIVE_CONNECTION_COUNT = Gauge(
    "active_connection_count",
    "shadowsocks active connection count",
    labelnames=[
        "ss_node",
    ],
)
ACTIVE_CONNECTION_COUNT = ACTIVE_CONNECTION_COUNT.labels(ss_node=NODE_HOST_NAME)


NETWORK_TRANSMIT_BYTES = Counter(
Example #22
0
def main():
    parser = configargparse.ArgParser(default_config_files=[
        "/etc/openvpn-auth-azure-ad/config.conf",
        "~/.openvpn-auth-azure-ad",
    ])

    parser.add_argument(
        "-c",
        "--config",
        is_config_file=True,
        help="path of config file",
        env_var="AAD_CONFIG_PATH",
    )
    parser.add_argument(
        "-V",
        "--version",
        action="version",
        version="%(prog)s {version}".format(version=__version__),
    )
    parser.add_argument(
        "-t",
        "--threads",
        default=10,
        env_var="AAD_THREAD_COUNT",
        help="Amount of threads to handle authentication",
        type=int,
    )

    parser_authentication = parser.add_argument_group(
        "OpenVPN User Authentication")
    parser_authentication.add_argument(
        "-a",
        "--authenticators",
        default=AADAuthenticatorFlows.DEVICE_TOKEN,
        help=
        "Enable authenticators. Multiple authenticators can be separated with comma",
        env_var="AAD_AUTHENTICATORS",
    )
    parser_authentication.add_argument(
        "--auth-token",
        action="store_true",
        help="Use auth token to re-authenticate clients",
        env_var="AAD_AUTH_TOKEN",
    )
    parser_authentication.add_argument(
        "--auth-token-livetime",
        type=int,
        default=86400,
        help="Livetime of auth tokens in seconds",
        env_var="AAD_AUTH_TOKEN_LIFETIME",
    )
    parser_authentication.add_argument(
        "--remember-user",
        action="store_true",
        help=
        "If user authenticated once, the users refresh token is used to reauthenticate silently if possible.",
        env_var="AAD_REMEMBER_USER",
    )
    parser_authentication.add_argument(
        "--verify-common-name",
        action="store_true",
        help="Check if common_name matches Azure AD UPN",
        env_var="AAD_VERIFY_COMMON_NAME",
    )

    parser_openvpn = parser.add_argument_group(
        "OpenVPN Management Interface settings")
    parser_openvpn.add_argument(
        "-H",
        "--ovpn-host",
        help="Host of OpenVPN management interface.",
        env_var="AAD_OVPN_HOST",
    )
    parser_openvpn.add_argument(
        "-P",
        "--ovpn-port",
        help="Port of OpenVPN management interface.",
        env_var="AAD_OVPN_PORT",
        type=int,
    )
    parser_openvpn.add_argument(
        "-s",
        "--ovpn-socket",
        help="Path of socket or OpenVPN management interface.",
        env_var="AAD_OVPN_SOCKET_PATH",
    )
    parser_openvpn.add_argument(
        "-p",
        "--ovpn-password",
        help="Passwort for OpenVPN management interface.",
        env_var="AAD_OVPN_PASSWORD",
    )

    parser_aad = parser.add_argument_group("Azure AD settings")
    parser_aad.add_argument(
        "--client-id",
        required=True,
        help="Client ID of application.",
        env_var="AAD_CLIENT_ID",
    )
    parser_aad.add_argument(
        "--token-authority",
        default=os.environ.get(
            "authority",
            default="https://login.microsoftonline.com/organizations"),
        env_var="AAD_TOKEN_AUTHORITY",
        help=
        "A URL that identifies a token authority. It should be of the format "
        "https://login.microsoftonline.com/your_tenant. By default, we will use "
        "https://login.microsoftonline.com/organizations",
    )
    parser_aad.add_argument(
        "--graph-endpoint",
        default="https://graph.microsoft.com/v1.0/",
        env_var="AAD_GRAPH_ENDPOINT",
        help="Endpoint of the graph API. See: "
        "https://developer.microsoft.com/en-us/graph/graph-explorer",
    )

    parser_prometheus = parser.add_argument_group("Prometheus settings")
    parser_prometheus.add_argument(
        "--prometheus",
        action="store_true",
        env_var="AAD_PROMETHEUS_ENABLED",
        help="Enable prometheus statistics",
    )
    parser_prometheus.add_argument(
        "--prometheus-listen-addr",
        env_var="AAD_PROMETHEUS_LISTEN_HOST",
        default="",
        help="prometheus listen addr",
    )
    parser_prometheus.add_argument(
        "--prometheus-listen-port",
        type=int,
        env_var="AAD_PROMETHEUS_PORT",
        help=" prometheus statistics",
        default=9723,
    )
    parser_prometheus.add_argument(
        "--log-level",
        default=logging.INFO,
        type=lambda x: getattr(logging, x),
        env_var="AAD_LOG_LEVEL",
        help="Configure the logging level.",
    )

    options = parser.parse_args()

    # convert all configured loggers to use a background thread
    setup_logging_queues()

    logging.basicConfig(level=options.log_level,
                        format="%(asctime)s %(levelname)s %(message)s")

    if options.prometheus:
        start_http_server(options.prometheus_listen_port,
                          options.prometheus_listen_addr)
        i = Info("openvpn_auth_azure_ad_version",
                 "info of openvpn-auth-azure-ad")
        i.info({"version": __version__})

    app = msal.PublicClientApplication(options.client_id,
                                       authority=options.token_authority)

    authenticator = AADAuthenticator(
        app,
        options.graph_endpoint,
        options.authenticators,
        options.verify_common_name,
        options.auth_token,
        options.auth_token_livetime,
        options.remember_user,
        options.threads,
        options.ovpn_host,
        options.ovpn_port,
        options.ovpn_socket,
        options.ovpn_password,
    )

    authenticator.run()
    "inventory_ingress_message_parsing_failures",
    "Total amount of failures parsing ingress messages", ["cause"])
add_host_success = Counter("inventory_ingress_add_host_successes",
                           "Total amount of successfully added hosts",
                           ["result", "reporter"])
add_host_failure = Counter("inventory_ingress_add_host_failures",
                           "Total amount of failures adding hosts",
                           ["cause", "reporter"])
ingress_message_handler_success = Counter(
    "inventory_ingress_message_handler_successes",
    "Total amount of successfully handled messages from the ingress queue",
)
ingress_message_handler_failure = Counter(
    "inventory_ingress_message_handler_failures",
    "Total amount of failures handling messages from the ingress queue")
ingress_message_handler_time = Summary(
    "inventory_ingress_message_handler_seconds",
    "Total time spent handling messages from the ingress queue")
version = Info("inventory_mq_service_version",
               "Build version for the inventory message queue service")
version.info({"version": get_build_version()})
egress_message_handler_success = Counter(
    "inventory_egress_message_handler_successes",
    "Total amount of messages successfully written to the egress queue")
egress_message_handler_failure = Counter(
    "inventory_egress_message_handler_failures",
    "Total amount of failures while writing messages to the egress queue")
event_serialization_time = Summary(
    "inventory_event_serialization_seconds",
    "Time spent parsing a message from the egress queue", ["event_type"])
Example #24
0
class BondingTR181Collector(BaseCollector):
    METRICS_SUBSYSTEM = 'bonding'
    ENDPOINT = 'bonding_tr181'

    def __init__(self, client: Client):
        super().__init__(client)

        self._info = Info(
            namespace=self.METRICS_NAMESPACE,
            name=self.METRICS_SUBSYSTEM,
            documentation='General information about the bonding system')
        self._error_info = Info(namespace=self.METRICS_NAMESPACE,
                                subsystem=self.METRICS_SUBSYSTEM,
                                name='error',
                                documentation='Last Error Info')
        self._hello_info = Info(namespace=self.METRICS_NAMESPACE,
                                subsystem=self.METRICS_SUBSYSTEM,
                                name='hello',
                                documentation='Hello Info')

        self._enabled = Gauge(namespace=self.METRICS_NAMESPACE,
                              subsystem=self.METRICS_SUBSYSTEM,
                              name='enabled',
                              documentation='Bonding enabled')
        self._rttswitch = Gauge(namespace=self.METRICS_NAMESPACE,
                                subsystem=self.METRICS_SUBSYSTEM,
                                name='rttswitch',
                                documentation='RTTSwitch')
        self._rtt = Gauge(namespace=self.METRICS_NAMESPACE,
                          subsystem=self.METRICS_SUBSYSTEM,
                          name='rtt',
                          documentation='current rtt')
        self._rttthre = Gauge(namespace=self.METRICS_NAMESPACE,
                              subsystem=self.METRICS_SUBSYSTEM,
                              name='rtt_threshold',
                              documentation='RTT Threshold')
        self._bwcalcula = Gauge(namespace=self.METRICS_NAMESPACE,
                                subsystem=self.METRICS_SUBSYSTEM,
                                name='bandwidth_calculation_interval',
                                documentation='Bandwidth Calculation Interval')
        self._bandwidth = Gauge(namespace=self.METRICS_NAMESPACE,
                                subsystem=self.METRICS_SUBSYSTEM,
                                name='bandwidth_available',
                                documentation='AvailableBW')
        self._hello_interval = Gauge(namespace=self.METRICS_NAMESPACE,
                                     subsystem=self.METRICS_SUBSYSTEM,
                                     name='hello_interval',
                                     documentation='Hello interval')
        self._idle_hello_interval = Gauge(namespace=self.METRICS_NAMESPACE,
                                          subsystem=self.METRICS_SUBSYSTEM,
                                          name='idle_hello_interval',
                                          documentation='IdleHelloInterval')
        self._hello_retry_times = Gauge(
            namespace=self.METRICS_NAMESPACE,
            subsystem=self.METRICS_SUBSYSTEM,
            name='hello_retry_times',
            documentation='HelloRetryTimes',
        )
        self._idle_hello_traffic_interval = Gauge(
            namespace=self.METRICS_NAMESPACE,
            subsystem=self.METRICS_SUBSYSTEM,
            name='idle_hello_traffic_interval',
            documentation='IdleHelloTrafficInterval')

        self._interface_number_of_entries = Gauge(
            namespace=self.METRICS_NAMESPACE,
            subsystem=self.METRICS_SUBSYSTEM,
            name='interface_entries',
            documentation='InterfaceNumberOfEntries')

        self._queue_skb_timeout = Gauge(namespace=self.METRICS_NAMESPACE,
                                        subsystem=self.METRICS_SUBSYSTEM,
                                        name='skb_queue_timeout',
                                        documentation='QueueSkbTimeOut')

    def _process_data(self, data):
        self._enabled.set(data['enable1'])
        del data['enable1']
        self._rttswitch.set(data['rttswitch'])
        del data['rttswitch']
        self._rtt.set(data['rtt'])
        del data['rtt']
        self._rttthre.set(data['rttthre'])
        del data['rttthre']
        self._bwcalcula.set(data['bwcalcula'])
        del data['bwcalcula']
        self._bandwidth.set(data['bw'])
        del data['bw']
        self._hello_interval.set(data['hellointerval'])
        del data['hellointerval']
        self._idle_hello_interval.set(data['idlehellointerval'])
        del data['idlehellointerval']
        self._hello_retry_times.set(data['helloretrytimes'])
        del data['helloretrytimes']
        self._idle_hello_traffic_interval.set(data['idlehellotrafficinterval'])
        del data['idlehellotrafficinterval']
        self._interface_number_of_entries.set(data['num_entry'])
        del data['num_entry']
        self._queue_skb_timeout.set(data['QueueSkbTimeOut'])
        del data['QueueSkbTimeOut']

        del data['status1']  # collected within BondingTunnelCollector

        self._error_info.info({'last_error_info': data['errorinfo']})
        del data['errorinfo']

        self._hello_info.info({'status': data['hellostatus']})
        del data['hellostatus']

        self._info.info(data)
    return _log_to_logger


app = Bottle()
app.install(log_to_logger)

# Create a metric to track time spent and requests made.
REQUEST_TIME = Summary('gitlabservice_request_processing_seconds',
                       'Time spent processing request')
REQUEST_TIME.observe(4.7)
#IN_PROGRESS = Gauge("gitlabservice_inprogress_requests", "help")
REQUESTS = Counter('gitlabservice_http_requests_total',
                   'Description of counter', ['method', 'endpoint'])
INFO = Info('gitlabservice_version', 'Description of info')
INFO.info({'version': '1.1', 'port': '3001'})


def enable_cors(fn):
    def _enable_cors(*args, **kwargs):
        # set CORS headers
        response.headers['Access-Control-Allow-Origin'] = '*'
        response.headers[
            'Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
        response.headers[
            'Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'

        if bottle.request.method != 'OPTIONS':
            # actual request; reply with the actual response
            return fn(*args, **kwargs)
Example #26
0
class BaseMetrics:
    """
    Base collection of extractor metrics. The class also spawns a collector thread on init that regularly fetches
    process information and update the ``process_*`` gauges.

    To create a set of metrics for an extractor, create a subclass of this class.

    **Note that only one instance of this class (or any subclass) can exist simultaneously**

    The collection includes the following metrics:
     * startup:                     Startup time (unix epoch)
     * finish:                      Finish time (unix epoch)
     * process_num_threads          Number of active threads. Set automatically.
     * process_memory_bytes         Memory usage of extractor. Set automatically.
     * process_cpu_percent          CPU usage of extractor. Set automatically.

    Args:
        extractor_name: Name of extractor, used to prefix metric names
        process_scrape_interval: Interval (in seconds) between each fetch of data for the ``process_*`` gauges
    """
    def __init__(self,
                 extractor_name: str,
                 extractor_version: str,
                 process_scrape_interval: float = 15):
        extractor_name = extractor_name.strip().replace(" ", "_")

        self.startup = Gauge(
            f"{extractor_name}_start_time",
            "Timestamp (seconds) of when the extractor last started")
        self.finish = Gauge(
            f"{extractor_name}_finish_time",
            "Timestamp (seconds) of then the extractor last finished cleanly")

        self._process = psutil.Process(os.getpid())

        self.process_num_threads = Gauge(f"{extractor_name}_num_threads",
                                         "Number of threads")
        self.process_memory_bytes = Gauge(f"{extractor_name}_memory_bytes",
                                          "Memory usage in bytes")
        self.process_cpu_percent = Gauge(f"{extractor_name}_cpu_percent",
                                         "CPU usage percent")

        self.info = Info(f"{extractor_name}_info",
                         "Information about running extractor")
        self.info.info({
            "extractor_version": extractor_version,
            "extractor_type": extractor_name
        })

        self.process_scrape_interval = process_scrape_interval
        self._start_proc_collector()

        self.startup.set_to_current_time()

    def _proc_collect(self) -> None:
        """
        Collect values for process metrics
        """
        while True:
            self.process_num_threads.set(self._process.num_threads())
            self.process_memory_bytes.set(self._process.memory_info().rss)
            self.process_cpu_percent.set(self._process.cpu_percent())

            sleep(self.process_scrape_interval)

    def _start_proc_collector(self) -> None:
        """
        Start a thread that collects process metrics at a regular interval
        """
        thread = threading.Thread(target=self._proc_collect,
                                  name="ProcessMetricsCollector",
                                  daemon=True)
        thread.start()
        '--port', help='Port to where the Prometeus page will be exposed')
    parser.add_argument('--prettyName',
                        help='Name of the device in Prometheus')
    args = parser.parse_args()

    interval = 2  # loop delay in seconds

    # Start up the server to expose the metrics.
    print("Starting Server...")
    start_http_server(int(args.port))

    print("Init. Gauges...")
    i = Info('target_info', 'Information about the Embrionix device')
    i.info({
        'ip': args.ip,
        'fw_desc': 'desc...',
        'fw_tag': 'tag...',
        'fw_crc': 'crc...'
    })

    ping_latency_gauge = Gauge('ping_latency', 'Ping Latency')
    api_read_time = Gauge('api_read_time', 'REST api total time for all calls')

    ptp_state = Gauge('ptp_status', 'Current PTP status, 3 being locked')
    ptp_offset_from_master = Gauge('ptp_offset_from_master',
                                   'ptp offset from master')
    ptp_mean_delay = Gauge('ptp_mean_delay', 'ptp mean delay')

    sfp_p3_temperature = Gauge('sfp_temperature_p3',
                               'Temperature of SFP in port 3')
    sfp_p3_vcc = Gauge('sfp_vcc_p3', 'VCC voltage')
    sfp_p3_txpwr = Gauge('sfp_txpwr_p3', 'SFP Tx Power')
Example #28
0
    network = get_Dict_Value(conStats['networks'], 0)
    networkReceive = bytesToMB(network['rx_bytes'])
    networkSend = bytesToMB(network['tx_bytes'])

    containerList.append(
        Container(con.id, con.name, con.status, cpu, memory, ioWrite, ioRead,
                  networkReceive, networkSend))

# for con in containerList:
#     c.labels(con.id,con.name,con.status,con.cpu,con.memory,con.ioWrite, con.ioRead,con.networkSend,con.networkReceive).inc()

for con in containerList:
    # jsonData = json.dumps(con, default=jsonDefault)
    # jsonToPython = json.loads(jsonData)
    i = Info('docker_container_' + con.name, 'Docker container')
    i.info(toJSON(con))

    #jsonString = containerList.toJSON()

    # Create a metric to track time spent and requests made.
    #REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request')

    # c = Counter('my_failures', 'Description of counter')
    # c.inc()     # Increment by 1
    # c.inc(1.6)  # Increment by given value

    # g = Gauge('my_inprogress_requests', 'Description of gauge')
    # g.inc()      # Increment by 1
    # g.dec(10)    # Decrement by given value
    # g.set(4.2)   # Set to a given value
Example #29
0
class DslCollector(BaseCollector):
    METRICS_SUBSYSTEM = 'dsl'

    def __init__(self, client: Client):
        super().__init__(client)

        self._connection_info = Info(
            namespace=self.METRICS_NAMESPACE,
            subsystem=self.METRICS_SUBSYSTEM,
            name='connection',
            documentation='Connection information',
        )

        self._actual_data_rate = Gauge(namespace=self.METRICS_NAMESPACE,
                                       subsystem=self.METRICS_SUBSYSTEM,
                                       name='actual_data_rate',
                                       unit='kbps',
                                       documentation='Actual data rate',
                                       labelnames=['direction'])
        self._attainable_data_rate = Gauge(
            namespace=self.METRICS_NAMESPACE,
            subsystem=self.METRICS_SUBSYSTEM,
            name='attainable_data_rate',
            unit='kbps',
            documentation='Attainable data rate',
            labelnames=['direction'])
        self._snr = Gauge(namespace=self.METRICS_NAMESPACE,
                          subsystem=self.METRICS_SUBSYSTEM,
                          name='snr',
                          documentation='SNR Margin',
                          labelnames=['direction'])
        self._signal = Gauge(namespace=self.METRICS_NAMESPACE,
                             subsystem=self.METRICS_SUBSYSTEM,
                             name='signal',
                             documentation='Signal level',
                             labelnames=['direction'])
        self._line = Gauge(namespace=self.METRICS_NAMESPACE,
                           subsystem=self.METRICS_SUBSYSTEM,
                           name='line',
                           documentation='Line Attenuation',
                           labelnames=['direction'])
        self._fec_size = Gauge(namespace=self.METRICS_NAMESPACE,
                               subsystem=self.METRICS_SUBSYSTEM,
                               name='fec_size',
                               documentation='FEC Size',
                               labelnames=['direction'])
        self._codeword = Gauge(namespace=self.METRICS_NAMESPACE,
                               subsystem=self.METRICS_SUBSYSTEM,
                               name='codeword_size',
                               documentation='Codeword size',
                               labelnames=['direction'])
        self._interleave = Gauge(namespace=self.METRICS_NAMESPACE,
                                 subsystem=self.METRICS_SUBSYSTEM,
                                 name='interleave',
                                 documentation='Interleave delay',
                                 labelnames=['direction'])
        self._crc_error_count = Gauge(
            namespace=self.METRICS_NAMESPACE,
            subsystem=self.METRICS_SUBSYSTEM,
            name='crc_error_count',
            documentation='CRC (Cyclic Redundancy Check) error count',
            labelnames=['direction'])
        self._hec_error_count = Gauge(
            namespace=self.METRICS_NAMESPACE,
            subsystem=self.METRICS_SUBSYSTEM,
            name='hec_error_count',
            documentation='HEC (Header Error Correction) error count',
            labelnames=['direction'])
        self._fec_error_count = Gauge(
            namespace=self.METRICS_NAMESPACE,
            subsystem=self.METRICS_SUBSYSTEM,
            name='fec_error_count',
            documentation='FEC (Forward Error Correction) error count',
            labelnames=['direction'])

    def _process_data(self, data):
        connection = data['Connection']
        line = data['Line']

        self._connection_info.info(connection)

        self._actual_data_rate.labels('upload').set(line['uactual'])
        self._actual_data_rate.labels('download').set(line['dactual'])

        self._attainable_data_rate.labels('upload').set(line['uattainable'])
        self._attainable_data_rate.labels('download').set(line['dattainable'])

        self._snr.labels('upload').set(line['uSNR'])
        self._snr.labels('download').set(line['dSNR'])

        self._signal.labels('upload').set(line['uSignal'])
        self._signal.labels('download').set(line['dSignal'])

        self._line.labels('upload').set(line['uLine'])
        self._line.labels('download').set(line['dLine'])

        self._fec_size.labels('upload').set(line['uFEC_size'])
        self._fec_size.labels('download').set(line['dFEC_size'])

        self._codeword.labels('upload').set(line['uCodeword'])
        self._codeword.labels('download').set(line['dCodeword'])

        self._interleave.labels('upload').set(line['uInterleave'])
        self._interleave.labels('download').set(line['dInterleave'])

        self._crc_error_count.labels('upload').set(line['uCRC'])
        self._crc_error_count.labels('download').set(line['dCRC'])

        self._hec_error_count.labels('upload').set(line['uHEC'])
        self._hec_error_count.labels('downloads').set(line['dHEC'])

        self._fec_error_count.labels('upload').set(line['uFEC'])
        self._fec_error_count.labels('download').set(line['dFEC'])
    snode = Gauge('simplivity_node', 'SimpliVity Node Data',
                  ['nodename', 'nodemetric'])
    svm = Gauge('simplivity_vm', 'SimpliVity VM Data', ['vmname', 'vmmetric'])
    sdatastore = Gauge('simplivity_datastore',
                       'SimpliVity Datastore Data - Sizes in GB',
                       ['dsname', 'dsmetric'])
    delta = Gauge('ConnectorRuntime',
                  'Time required for last data collection in seconds')
    icon = Info('Connector', 'Connector Paramter Info')
    ivm = Info('vm', 'Additional VM Info', ['vm'])
    inode = Info('node', 'Additional Node Info', ['node'])

    icon.info({
        'Monitor': monitor,
        'key': keyfile,
        'xml': xmlfile,
        'limit': limit,
        'oxl': xoffset
    })
    id_node = {}
    replica = {}
    """
    Start an endless loop to capture the current status every TIME_RANGE
    Errors will be catched with an error routine
    Please note that the connection must be refreshed after 24h or afte 10 minutes inactivity.
    """
    while True:
        try:
            t0 = time.time()
            c.inc()