def main():
    """Main program.

    Parse arguments, start webserver to serve /metrics.
    """
    args = ARGS.parse_args()

    if args.level >= 2:
        logging.basicConfig(level=logging.DEBUG)
    elif args.level == 1:
        logging.basicConfig(level=logging.INFO)
    elif args.level == 0:
        logging.basicConfig(level=logging.WARNING)

    LOG.debug('Starting HTTP server')
    httpd_thread = start_http_server(args.port, addr=args.addr)

    LOG.debug('Registering StatsPluginCollector')
    REGISTRY.register(StatsPluginCollector(args.endpoint))

    if not args.no_procstats:
        LOG.debug('Registering ProcessCollector')
        REGISTRY.register(ProcessCollector(
            pid=lambda: get_ts_pid(args.pidfile),
            namespace='trafficserver'))

    LOG.info('Listening on :{port}'.format(port=args.port))

    # Wait for the webserver
    httpd_thread.join()
def main():

    """
    Symbols list contain a list of pairs which describes stock symbols as used by Google API.
    Each element should be 'EXCHANGE:SYMBOL' examples:
 
         [ 'NASDAQ:GOOG', 'NASDAQ:CSCO', 'NYSE:IBM', 'BCBA:YPFD' ]
    """
    start_http_server(int(sys.argv[1]))
    REGISTRY.register(QuoteCollector())
    while True: time.sleep(1)
예제 #3
0
def _app():

    class Collector:

        def __init__(self):
            self.metrics = []

        def set_metrics(self, metrics):
            self.metrics = metrics

        def collect(self):
            return self.metrics

    app = vadvisor.app.rest.app
    app.collector = Collector()
    app.metricStore = MetricStore()
    REGISTRY.register(LibvirtCollector(collector=app.collector))
    return app
예제 #4
0
파일: rest.py 프로젝트: kubevirt/vAdvisor
def make_rest_app(libvirtConnection):
    # start libvirt event broker
    broker = LibvirtEventBroker()
    Greenlet(broker.run).start()
    app.eventBroker = broker

    # Attach event store to broker
    app.eventStore = EventStore()

    def store_events():
        q = queue.Queue()
        broker.subscribe(q)
        while True:
            app.eventStore.put(q.get())

    Greenlet(store_events).start()

    # Create metric collector
    app.conn = libvirtConnection
    app.collector = Collector(app.conn)

    # Register prometheus metrics
    REGISTRY.register(LibvirtCollector(app.collector))

    # For statsd debugging
    app.statsd = StatsdCollector(app.collector)

    # Collect metrics every second and store them in the metrics store
    app.metricStore = MetricStore()

    def store_metrics():
        while True:
            try:
                app.metricStore.put(app.collector.collect())
            except Exception as e:
                logging.error(e)
                sleep(5)
            sleep(1)

    Greenlet(store_metrics).start()

    # Add gzip support
    mime_types = ['application/json', 'text/plain']
    return gzip(mime_types=mime_types, compress_level=9)(app)
예제 #5
0
 def __init__(self, port=8000, counters={}):
     """Start the http server for scraping.
     The port where you open should be scraped by prometheus
     e.g port 8000 could have:
     scrape_configs:
       - job_name: 'local'
         scrape_interval: 5s
         static_configs:
         - targets: ['localhost:8000']
     The counters dict given with will be used to keep track of counters and
     will be given to prometheus when scraped
     """
     start_http_server(port)
     # keep data for scrape_interval * scrape_amount
     # (scrape_interval is found in /etc/prometheus/prometheus.yml,
     # when writing it is 5)
     self.scrape_amount = 60
     self.scrape_count = self.scrape_amount // 2
     self.data = [[] for _ in range(self.scrape_amount)]
     self.counters = counters
     REGISTRY.register(self)
 def __init__(self,
              listen_address: str = "0.0.0.0",
              http_port: int = 8080,
              **kwargs) -> None:
     """
     Initialize Metrics JustInTime Collector
     :param listen_address: address that the server is reachable on
     :param http_port: port that the http server runs on
     """
     self.readers: tp.List[BaseReader] = []
     if kwargs:
         logger.warning(
             f'Unknown Metrics configuration parameter{"s" if len(kwargs) > 1 else ""}:{", ".join(kwargs.keys())}'
         )
     # Initialize Registry
     super().__init__(auto_describe=True, target_info=None)
     # Register as registry
     REGISTRY.register(self)
     logger.info(
         f"Starting Metrics HTTP Server on {listen_address}:{http_port}")
     # Start http server in separate thread
     start_wsgi_server(addr=listen_address, port=http_port)
예제 #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-l',
                        '--listen',
                        metavar='ADDRESS',
                        help='Listen on this address',
                        default=':8000')
    parser.add_argument('-u',
                        '--uri',
                        default='/',
                        help='The URI to check for POSTs coming from Druid')
    parser.add_argument('-d',
                        '--debug',
                        action='store_true',
                        help='Enable debug logging')
    parser.add_argument('-e',
                        '--encoding',
                        default='utf-8',
                        help='Encoding of the Druid POST JSON data.')
    args = parser.parse_args()

    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.WARNING)

    collect_metrics_from = []

    address, port = args.listen.split(':', 1)
    log.info('Starting druid_exporter on %s:%s', address, port)

    druid_collector = collector.DruidCollector()
    REGISTRY.register(druid_collector)
    prometheus_app = make_wsgi_app()
    druid_wsgi_app = DruidWSGIApp(args.uri, druid_collector, prometheus_app,
                                  args.encoding)

    httpd = make_server(address, int(port), druid_wsgi_app)
    httpd.serve_forever()
예제 #8
0
def main():
    parser = argparse.ArgumentParser(
        description='Borg exporter for Prometheus')
    parser.add_argument('-p',
                        '--port',
                        help=f'exporter exposed port (default {PORT})',
                        type=int,
                        default=PORT)
    parser.add_argument('-c',
                        '--conf',
                        help=f'configuration file (default {CONF})',
                        type=argparse.FileType('r'),
                        default=CONF)
    args = parser.parse_args()

    data = safe_load(args.conf)

    REGISTRY.register(BorgCollector(data))

    app = make_wsgi_app()
    httpd = make_server('', args.port, app)
    httpd.serve_forever()
예제 #9
0
def main():
    parser = argparse.ArgumentParser(
        description=
        "Run condor exporter to expose metrics for prometheus consumption")
    parser.add_argument('-p',
                        '--port',
                        type=int,
                        default=9118,
                        required=False,
                        help='Specify a port to be used. Defaults to 9118')
    parser.add_argument(
        '-a',
        '--host',
        type=str,
        default='localhost',
        required=False,
        help='Host address to listen on. Defaults to localhost')
    parser.add_argument('-c',
                        '--collector',
                        type=str,
                        default='',
                        required=False,
                        help='Condor collector address. Defaults to localhost')
    args = parser.parse_args()
    port = args.port
    address = args.host
    collector_address = args.collector

    try:
        REGISTRY.register(CondorCollector(collector_address))
        app = make_wsgi_app()
        httpd = make_server('', port, app)
        httpd.serve_forever()
        print("Exporter listening on %s:%d" % (address, port))
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        print("Interrupted, Shutting down")
        exit(0)
예제 #10
0
def main():
    log.info(f'Prometheus File Content Exporter http server started',
             extra=dict(port=EXPORTER_PORT))

    if not EXPORTER_CONFIG:
        log.fatal(
            'This exporter needs config file provided. Please read README.md for details.'
        )
        sys.exit(1)

    config = Config(EXPORTER_CONFIG)

    start_http_server(EXPORTER_PORT)
    file_content_collector = FileContentMetricsCollector(config.files)
    REGISTRY.register(file_content_collector)

    # Register signal handler
    signal_handler = SignalHandler()

    while not signal_handler.is_shutting_down():
        time.sleep(1)

    log.info(f'Prometheus File Content Exporter shutdown')
예제 #11
0
def main():
    opts = parse_opts(sys.argv[1:])
    init_logger(opts.log_level)

    scheduler = Scheduler()

    def sigterm_handler(signum, frame):
        if scheduler and signal.SIGTERM == signum:
            scheduler.shutdown()

    signal.signal(signal.SIGTERM, sigterm_handler)

    jae_collector = JaeCollector()
    register(jae_collector, opts)

    # register jae to Prometheus registry
    REGISTRY.register(jae_collector)

    scheduler.schedule(jae_collector, int(opts.interval))
    scheduler.start()

    start_http_server(int(opts.port))
    sys.exit(scheduler.wait())
예제 #12
0
def create_app():
    app = Flask(__name__)
    app.app_context().push()

    gunicorn_logger = logging.getLogger('gunicorn.error')
    app.logger.handlers = gunicorn_logger.handlers
    app.logger.setLevel(gunicorn_logger.level)

    config_path = os.environ.get(
        'PROMETHEUS_VIEWPOWER_CONFIGFILE') or 'config.json'
    try:
        with open(config_path, 'r') as config_file:
            config = json.load(config_file)
    except IOError:
        print('Failed opening config.json')
        sys.exit(4)

    REGISTRY.register(collector.ViewPowerCollector(config))

    app.wsgi_app = DispatcherMiddleware(app.wsgi_app,
                                        {'/metrics': make_wsgi_app()})

    return app
예제 #13
0
def cli(**settings):
    """Kayako metrics exporter for Prometheus"""
    if settings['version']:
        click.echo('Version %s' % kayako_exporter.__version__)
        return

    if not settings['url']:
        click.echo('Please provide Kayako API URL')
        return
    if not settings['login']:
        click.echo('Please provide Kayako username')
        return
    if not settings['password']:
        click.echo('Please provide Kayako account password')
        return

    if settings['verbose']:
        handler = logging.StreamHandler()
        logger.addHandler(handler)
        logger.setLevel(logging.DEBUG)
        handler.setFormatter(logging.Formatter('[%(asctime)s] %(message)s', "%Y-%m-%d %H:%M:%S"))

    click.echo('Exporter for {base_url}, user: {login}, password: ***'.format(
        base_url=settings['url'].rstrip('/'),
        login=settings['login'],
        password=settings['password']
    ))

    REGISTRY.register(KayakoCollector(
        base_url=settings['url'].rstrip('/'),
        login=settings['login'],
        password=settings['password'],
        department_ids=settings['department_id'],
    ))
    httpd = HTTPServer(('', int(settings['port'])), MetricsHandler)
    click.echo('Exporting Kayako metrics on http://0.0.0.0:{}'.format(settings['port']))
    httpd.serve_forever()
예제 #14
0
async def start_metrics_server(server, host, port):
    collector = CustomCollector(server)
    REGISTRY.register(collector)

    app = web.Application()
    app.broker = server

    app.router.add_get('/metrics', metrics)
    app.router.add_get('/healthz', healthz)

    runner = web.AppRunner(app, access_log=None)
    await runner.setup()

    site = web.TCPSite(runner, host, port)

    await site.start()

    async def close():
        try:
            await runner.cleanup()
        finally:
            REGISTRY.unregister(collector)

    return close
예제 #15
0
def serve(
    application: "WSGIApplication",
    port: int = 8000,
    metrics_port: int = 9000,
    access_log_formatter: LogFormatter = proxiedLogFormatter,
    health_check_path: str = "/healthz",
):
    # Quiet the Twisted factory logging.
    Factory.noisy = False

    # Start logging.
    logging.basicConfig(level=logging.INFO)
    observers = [STDLibLogObserver()]
    logger.globalLogBeginner.beginLoggingTo(observers)

    # Create the server.
    pool = threadpool.ThreadPool()
    reactor.callWhenRunning(pool.start)
    _listen_wsgi(
        reactor,
        pool,
        application,
        port,
        access_log_formatter,
        health_check_path,
    )
    _listen_metrics(reactor, metrics_port)

    # Register the metrics collector.
    REGISTRY.register(TwistedThreadPoolCollector(pool))

    # Start the main loop.
    reactor.run()

    # Clean up when exiting.
    pool.stop()
                    mc_custom.add_sample('mc_custom',
                                         value=value,
                                         labels={'stat': stat})
        return [
            blocks_mined, blocks_picked_up, player_deaths, player_jumps,
            cm_traveled, player_xp_total, player_current_level,
            player_food_level, player_health, player_score, entities_killed,
            damage_taken, damage_dealt, blocks_crafted, player_playtime,
            player_advancements, player_slept, player_used_crafting_table,
            player_quests_finished, mc_custom
        ]

    def collect(self):
        for player in self.get_players():
            for metric in self.update_metrics_for_player(player):
                yield metric
        for metric in self.get_server_stats():
            yield metric


if __name__ == '__main__':
    if all(x in os.environ for x in ['RCON_HOST', 'RCON_PASSWORD']):
        print("RCON is enabled for " + os.environ['RCON_HOST'])

    start_http_server(8000)
    REGISTRY.register(MinecraftCollector())
    print("Exporter started on Port 8000")
    while True:
        time.sleep(1)
        schedule.run_pending()
            mtype = 'gauge'
            if mtr in ['num_requests', 'num_failures']:
                mtype = 'counter'
            metric = Metric('locust_requests_' + mtr, 'Locust requests ' + mtr,
                            mtype)
            for stat in response['stats']:
                if not 'Total' in stat['name']:
                    metric.add_sample('locust_requests_' + mtr,
                                      value=stat[mtr],
                                      labels={
                                          'path': stat['name'],
                                          'method': stat['method']
                                      })
            yield metric


if __name__ == '__main__':
    # Usage: locust_exporter.py <port> <locust_host:port>
    if len(sys.argv) != 3:
        print('Usage: locust_exporter.py <port> <locust_host:port>')
        exit(1)
    else:
        try:
            start_http_server(int(sys.argv[1]))
            REGISTRY.register(LocustCollector(str(sys.argv[2])))
            print("Connecting to locust on: " + sys.argv[2])
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            exit(0)
예제 #18
0
                    container_name, pod_name)
                metric = Metric(metric_name, metric_description, 'summary')
                metric.add_sample(metric_name, value=current_val, labels={})
                yield metric

        # Shipshift up/down status
        if (num_container_down == 0):
            shipshift_status = 1
        elif (num_container_down == num_container):
            shipshift_status = 0
        else:
            shipshift_status = 0.5

        metric = Metric('Shipshift_status', 'Current status of Shipshift',
                        'summary')
        metric.add_sample('Shipshift_status',
                          value=shipshift_status,
                          labels={})
        yield metric


if __name__ == '__main__':
    # Usage: json_exporter.py port endpoint
    start_http_server(8080)
    # direct endpoint to Shipshift on Upshift
    shipshift_endpoint = 'https://upshift.engineering.redhat.com/api/v1/namespaces/dh-stage-shipshift/pods'
    REGISTRY.register(JsonCollector(shipshift_endpoint))

    while True:
        time.sleep(1)
예제 #19
0
def _setup_jemalloc_stats() -> None:
    """Checks to see if jemalloc is loaded, and hooks up a collector to record
    statistics exposed by jemalloc.
    """

    global _JEMALLOC_STATS

    # Try to find the loaded jemalloc shared library, if any. We need to
    # introspect into what is loaded, rather than loading whatever is on the
    # path, as if we load a *different* jemalloc version things will seg fault.

    # We look in `/proc/self/maps`, which only exists on linux.
    if not os.path.exists("/proc/self/maps"):
        logger.debug("Not looking for jemalloc as no /proc/self/maps exist")
        return

    # We're looking for a path at the end of the line that includes
    # "libjemalloc".
    regex = re.compile(r"/\S+/libjemalloc.*$")

    jemalloc_path = None
    with open("/proc/self/maps") as f:
        for line in f:
            match = regex.search(line.strip())
            if match:
                jemalloc_path = match.group()

    if not jemalloc_path:
        # No loaded jemalloc was found.
        logger.debug("jemalloc not found")
        return

    logger.debug("Found jemalloc at %s", jemalloc_path)

    jemalloc_dll = ctypes.CDLL(jemalloc_path)

    stats = JemallocStats(jemalloc_dll)
    _JEMALLOC_STATS = stats

    class JemallocCollector(Collector):
        """Metrics for internal jemalloc stats."""

        def collect(self) -> Iterable[Metric]:
            stats.refresh_stats()

            g = GaugeMetricFamily(
                "jemalloc_stats_app_memory_bytes",
                "The stats reported by jemalloc",
                labels=["type"],
            )

            # Read the relevant global stats from jemalloc. Note that these may
            # not be accurate if python is configured to use its internal small
            # object allocator (which is on by default, disable by setting the
            # env `PYTHONMALLOC=malloc`).
            #
            # See the jemalloc manpage for details about what each value means,
            # roughly:
            #   - allocated ─ Total number of bytes allocated by the app
            #   - active ─ Total number of bytes in active pages allocated by
            #     the application, this is bigger than `allocated`.
            #   - resident ─ Maximum number of bytes in physically resident data
            #     pages mapped by the allocator, comprising all pages dedicated
            #     to allocator metadata, pages backing active allocations, and
            #     unused dirty pages. This is bigger than `active`.
            #   - mapped ─ Total number of bytes in active extents mapped by the
            #     allocator.
            #   - metadata ─ Total number of bytes dedicated to jemalloc
            #     metadata.
            for t in (
                "allocated",
                "active",
                "resident",
                "mapped",
                "metadata",
            ):
                try:
                    value = stats.get_stat(t)
                except Exception as e:
                    # There was an error fetching the value, skip.
                    logger.warning("Failed to read jemalloc stats.%s: %s", t, e)
                    continue

                g.add_metric([t], value=value)

            yield g

    REGISTRY.register(JemallocCollector())

    logger.debug("Added jemalloc stats")
예제 #20
0
                                     task.queued_dttm).total_seconds()
            task_scheduler_delay.add_metric([task.queue],
                                            task_scheduling_delay)
        yield task_scheduler_delay

        num_queued_tasks_metric = GaugeMetricFamily(
            'airflow_num_queued_tasks',
            'Airflow Number of Queued Tasks',
        )

        num_queued_tasks = get_num_queued_tasks()
        num_queued_tasks_metric.add_metric([], num_queued_tasks)
        yield num_queued_tasks_metric


REGISTRY.register(MetricsCollector())


class Metrics(BaseView):
    @expose('/')
    def index(self):
        return Response(generate_latest(), mimetype='text/plain')


ADMIN_VIEW = Metrics(category='Prometheus exporter', name='metrics')


class AirflowPrometheusPlugin(AirflowPlugin):
    """Airflow Pluging for collecting metrics."""

    name = 'airflow_prometheus_plugin'
예제 #21
0
def main_loop():

    LARGE_PAYMENT_MIN_AMOUNT = 10000

    global previous_data
    previous_data = defaultdict(lambda: 0)

    global previous_payment_detail
    previous_payment_detail = defaultdict(lambda: defaultdict(lambda: 0))

    global previous_large_native_payment_detail
    previous_large_native_payment_detail = defaultdict(
        lambda: defaultdict(lambda: 0))

    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--port',
                        nargs='?',
                        const=9101,
                        help='The TCP port to listen on.  Defaults to 9101.',
                        default=9101)
    args = parser.parse_args()
    log.info(args.port)

    current_data = defaultdict(lambda: 0)
    current_payment_detail = defaultdict(lambda: defaultdict(lambda: 0))
    current_large_native_payment_detail = defaultdict(
        lambda: defaultdict(lambda: 0))

    REGISTRY.register(StatsCollector())
    start_http_server(int(args.port))

    h = horizon_livenet()
    p = {
        'limit': 200,
        'cursor': 'now',
    }

    current_minute = None

    r = h.operations(sse=True, params=p)
    try:
        for resp in r:
            try:
                m = json.loads(str(resp))
            except json.decoder.JSONDecodeError:
                pass

            if m == 'hello':
                continue

            cm = m['created_at'][:-4]
            if cm != current_minute:

                log.info('minute change %s => %s' % (current_minute, cm))
                current_minute = cm

                previous_data = copy.deepcopy(current_data)
                previous_payment_detail = copy.deepcopy(current_payment_detail)
                previous_large_native_payment_detail = copy.deepcopy(
                    current_large_native_payment_detail)

                current_data = defaultdict(lambda: 0)
                current_payment_detail = defaultdict(
                    lambda: defaultdict(lambda: 0))
                current_large_native_payment_detail = defaultdict(
                    lambda: defaultdict(lambda: 0))

            op_type = m['type']

            current_data['nb_operation'] += 1
            current_data['nb_operation_%s' % op_type] += 1

            if op_type == 'payment':
                current_data['total_amount_payment'] += float(m['amount'])

                if m['asset_type'] == 'native':
                    asset = 'native'

                    v = float(m['amount'])
                    if v >= LARGE_PAYMENT_MIN_AMOUNT:

                        from_addr = m['from']
                        to_addr = m['to']
                        current_large_native_payment_detail[from_addr][
                            to_addr] += v

                else:
                    asset = m['asset_code']

                current_payment_detail[asset]['nb'] += 1
                current_payment_detail[asset]['sum'] += float(m['amount'])

    except requests.exceptions.HTTPError as e:
        log.info(str(e))
        lolog.infoo('http exception, restarting')
        return
예제 #22
0
            yield metric


if __name__ == '__main__':
    try:
        parser = argparse.ArgumentParser(
            description=__doc__,
            formatter_class=argparse.ArgumentDefaultsHelpFormatter)
        parser.add_argument('--port',
                            nargs='?',
                            const=9101,
                            help='The TCP port to listen on',
                            default=9101)
        parser.add_argument('--addr',
                            nargs='?',
                            const='0.0.0.0',
                            help='The interface to bind to',
                            default='0.0.0.0')
        args = parser.parse_args()
        log.info('listening on http://%s:%d/metrics' % (args.addr, args.port))

        REGISTRY.register(CoinCollector())
        start_http_server(int(args.port), addr=args.addr)

        while True:
            time.sleep(60)
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)
예제 #23
0
            yield gitlab_tag_metric

            # Get the .gitlab-ci.yml from reach project in order to validate the target from the releases
            file_content = project.files.raw('.gitlab-ci.yml', 'master')
            test = yaml.safe_load(file_content)
            for library_name, version_value in test['variables'].items():
                if 'VERSION' in library_name:
                    library_version = version_value
                    project_version_req = Metric(
                        'project_version_req',
                        'Project versions requirements in Master branch',
                        'summary')
                    project_version_req.add_sample('project_version_req',
                                                   value='0',
                                                   labels={
                                                       'project':
                                                       str(project.name),
                                                       'library_name':
                                                       str(library_name),
                                                       'target_version':
                                                       str(library_version)
                                                   })
                    yield project_version_req


if __name__ == '__main__':
    start_http_server(6666)
    REGISTRY.register(GitlabCollector())
    while True:
        time.sleep(30)
예제 #24
0
    def collect(self):
        c = Metric('darp_owl', 'Help text', 'summary')
        response = json.loads(requests.get('http://'+genesis_host+'/config').content.decode('UTF-8'))
        for k in response['gSRlist']:
            darp_host_id = response['gSRlist'][k]
            darp_host_list[darp_host_id] = {}
            darp_host_list[darp_host_id]['hostname'] = k
            owl = response['pulses'][k]['owls'].split(',')
            owl_values = {}
            for a in owl:
                if '=' not in a:
                    owl_values[a] = '3000'
                    continue
                owl_values[a.split('=')[0]] = a.split('=')[1]
            darp_host_list[darp_host_id]['latency'] =  owl_values
        print(darp_host_list)
        for host in darp_host_list:
            for k in darp_host_list[host]['latency']:
                ll = darp_host_list[host]['latency'][k]
                c.add_sample('darp_owl', value=ll, labels={'darp_host': darp_host_list[host]['hostname'], 'dest_host': darp_host_list[k]['hostname']})
        yield c

REGISTRY.register(DarpCollector())
def main():
    start_http_server(18000)
    print('Started DARP Prometheus exporter')
    while True:
        time.sleep(10)

main()
예제 #25
0
    # command line arguments
    parser = cli()
    args = parser.parse_args()

    # display version and exit
    if args.version is True:
        print("Version is {0}".format(__version__))
        sys.exit()

    # check if password has been set
    if args.password is None and args.password_file is None:
        parser.error('Option --password or --password-file must be set.')

    # logger configuration
    logging.basicConfig(
        format='%(asctime)s %(name)s %(levelname)s: %(message)s',
        datefmt="%Y-%m-%d %H:%M:%S")
    logger.setLevel(args.log_level)

    # get password
    status_password = get_password(args.password, args.password_file)

    start_http_server(args.port)
    REGISTRY.register(
        KannelCollector(args.target, status_password, args.filter_smsc,
                        args.box_connections, args.collect_wdp,
                        args.collect_box_uptime))

    while True:
        time.sleep(1)
예제 #26
0
def main():
    REGISTRY.register(CsvCollector(const.csv_directory))
    app.run("0.0.0.0", port=4050)
        # DagRun metrics
        dag_duration = GaugeMetricFamily(
            'airflow_dag_run_duration',
            'Duration of currently running dag_runs in seconds',
            labels=['dag_id', 'run_id']
        )
        driver = Session.bind.driver
        for dag in get_dag_duration_info():
            if driver == 'mysqldb' or driver == 'pysqlite':
                dag_duration.add_metric([dag.dag_id, dag.run_id], dag.duration)
            else:
                dag_duration.add_metric([dag.dag_id, dag.run_id], dag.duration.seconds)
        yield dag_duration


REGISTRY.register(MetricsCollector())


class Metrics(BaseView):
    @expose('/')
    def index(self):
        return Response(generate_latest(), mimetype='text/plain')


ADMIN_VIEW = Metrics(category="Prometheus exporter", name="metrics")


class AirflowPrometheusPlugins(AirflowPlugin):
    '''plugin for show metrics'''
    name = "airflow_prometheus_plugin"
    operators = []
예제 #28
0

def connect():
    logging.info('-- Mongo URI: {0}'.format(mongo_config['uri']))
    logging.info('-' * 50)
    client = MongoClient(mongo_config['uri'])

    try:
        client.server_info()
    except pymongo.errors.ServerSelectionTimeoutError as e:
        logging.error('Unable to connect to MongoDB')
        client = None

    return client


if __name__ == '__main__':
    logging.info('-- Starting exporter')
    logging.info('-- Exporter port: {0}'.format(exporter_port))
    logging.info('-' * 50)

    start_http_server(exporter_port)
    connection = connect()
    if connection:
        REGISTRY.register(MongoDbCollector(connection))
        logging.info('-- Listening...')
        while True:
            time.sleep(1)
    else:
        logging.error('No Mongo Connection')
예제 #29
0
 def register_slaves_metrics_collector(cls, get_slaves: Callable[[], List['app.master.slave.Slave']]):
     if not cls._slaves_collector_is_registered:
         REGISTRY.register(SlavesCollector(get_slaves))
         cls._slaves_collector_is_registered = True
예제 #30
0
        yield release_validation_metric

        push_speed_metric = CounterMetricFamily('push_speed',
                                                'To get the push speed',
                                                labels=['push_speed'])
        if 'image_push_Speed' in quayPushSpeed_cache:
            push_speed_metric.add_metric(
                ['image_push_Speed'], quayPushSpeed_cache['image_push_Speed'])
        yield push_speed_metric

        pull_speed_metric = CounterMetricFamily('pull_speed',
                                                'To get the pull speed',
                                                labels=['pull_speed'])
        if 'image_pull_Speed' in quayPullSpeed_cache:
            pull_speed_metric.add_metric(
                ['image_pull_Speed'], quayPullSpeed_cache['image_pull_Speed'])
        yield pull_speed_metric


if __name__ == '__main__':
    # Usage: json_exporter.py port endpoint
    start_http_server(9192)
    REGISTRY.register(
        JsonCollector(
            'https://git-dhc-int.app.corpintra.net/rest/api/1.0/admin/groups'))

    while True:
        #time.sleep(1)
        quayValidation()
        #schedule.run_pending()
예제 #31
0
def cli(**settings):
    """Zabbix metrics exporter for Prometheus

       Use config file to map zabbix metrics names/labels into prometheus.
       Config below transfroms this:

           local.metric[uwsgi,workers,myapp,busy] = 8
           local.metric[uwsgi,workers,myapp,idle] = 6

       into familiar Prometheus gauges:

           uwsgi_workers{instance="host1",app="myapp",status="busy"} 8
           uwsgi_workers{instance="host1",app="myapp",status="idle"} 6

       YAML:

       \b
           metrics:
             - key: 'local.metric[uwsgi,workers,*,*]'
               name: 'uwsgi_workers'
               labels:
                 app: $1
                 status: $2
               reject:
                 - 'total'
    """
    if settings['version']:
        click.echo('Version %s' % zabbix_exporter.__version__)
        return

    if not validate_settings(settings):
        return

    if settings['config']:
        exporter_config = yaml.safe_load(open(settings['config']))
    else:
        exporter_config = {}

    base_logger = logging.getLogger('zabbix_exporter')
    handler = logging.StreamHandler()
    base_logger.addHandler(handler)
    base_logger.setLevel(logging.ERROR)
    handler.setFormatter(logging.Formatter('[%(asctime)s] %(message)s', "%Y-%m-%d %H:%M:%S"))
    if settings['verbose']:
        base_logger.setLevel(logging.DEBUG)

    collector = ZabbixCollector(
        base_url=settings['url'].rstrip('/'),
        login=settings['login'],
        password=settings['password'],
        verify_tls=settings['verify_tls'],
        timeout=settings['timeout'],
        **exporter_config
    )

    if settings['dump_metrics']:
        return dump_metrics(collector)

    REGISTRY.register(collector)
    httpd = HTTPServer(('', int(settings['port'])), MetricsHandler)
    click.echo('Exporter for {base_url}, user: {login}, password: ***'.format(
        base_url=settings['url'].rstrip('/'),
        login=settings['login'],
        password=settings['password']
    ))
    if settings['return_server']:
        return httpd
    click.echo('Exporting Zabbix metrics on http://0.0.0.0:{}'.format(settings['port']))
    httpd.serve_forever()
예제 #32
0
import logging
from time import sleep

from prometheus_client import start_http_server, REGISTRY

import settings
from collector import BigBlueButtonCollector

logging.basicConfig(level=logging.INFO,
                    format="%(asctime)s [%(levelname)s]: %(message)s")

if __name__ == '__main__':
    if settings.DEBUG:
        logging.getLogger().setLevel(logging.DEBUG)

    start_http_server(settings.PORT)
    logging.info("HTTP server started on port: {}".format(settings.PORT))

    REGISTRY.register(BigBlueButtonCollector())
    while True:
        sleep(1)
예제 #33
0
def main():
    error_msg = "Collector could not run"
    collector_pid = os.getpid()
    pid_file = "/var/run/synology-exporter.pid"
    # Get args
    args = arg_parser()
    # Init logger
    format_logger(log_path=args.log_path, debug=args.debug)
    # Check pid
    if os.path.isfile(pid_file):
        log.error(f"[main] {error_msg}: Existing pid file is present")
        exit(1)
    # Update data collectors dict
    for metric_type, data in DATA_COLLECTORS.items():
        enabled = False if getattr(args,
                                   f"disable_{metric_type}_metric") else True
        data.update({
            "interval":
            getattr(args, f"{metric_type}_refresh_interval"),
            "enabled":
            enabled
        })
    # Tool starting message
    msg = [
        f"{k.capitalize()}:{v['interval']}s"
        for k, v in DATA_COLLECTORS.items() if v["enabled"]
    ]
    log.info(f"[main] Exporter starting [Refresh -> {'|'.join(msg)}]")
    # Get synology credentials from config file
    auths = get_config(args.config)
    # Get nas to collect
    synos = auths.keys()
    # Generate dump file template path
    create_dump_path(args.dump_path)
    # Init prometheus http server
    start_prometheus_server(args.port)
    # Start data collect with interval
    dump_files = {}
    for syno in synos:
        client = get_synology_client(name=syno, auth=auths[syno])
        dump_files[syno] = {}
        for metric_type, data in DATA_COLLECTORS.items():
            if not data["enabled"]:
                continue
            dump_file = os.path.join(args.dump_path,
                                     f"{syno}.{metric_type}.dump")
            data_collect = data["collector"](name=syno,
                                             client=client,
                                             dump_file=dump_file,
                                             refresh_interval=data["interval"])
            dump_files[syno][metric_type] = dump_file
            data_collect.start()
    # Wait dump files creation
    log.debug("[main] Wait for first dump file creation ..")
    sleep(2 * len(synos))
    # Get metric status
    metric_status = {k: v["enabled"] for k, v in DATA_COLLECTORS.items()}
    # Start prometheus collector
    builders = []
    for syno in synos:
        builder = SynoMetricBuilder(name=syno,
                                    metric_status=metric_status,
                                    dump_files=dump_files[syno])
        builders.append(builder)
    REGISTRY.register(CollectMany(builders))

    while True:
        sleep(30)
예제 #34
0
def main():
    import argparse

    parser = argparse.ArgumentParser()

    parser.add_argument(
        "--device-db",
        type=argparse.FileType("r"),
        default=None,
        help="Device database in JSON format (default: {})".format(
            DEFAULT_DEVICE_DB
        )
    )

    parser.add_argument(
        "--attr-mapping",
        type=argparse.FileType("r"),
        default=None,
        help="Attribute mapping in JSON format (default: {})".format(
            DEFAULT_ATTR_MAPPING
        )
    )

    parser.add_argument(
        "-v",
        dest="verbosity",
        action="count",
        default=0,
        help="Increase verbosity (up to -vvv)",
    )

    parser.add_argument(
        "--journal",
        action="store_true",
        default=False,
        help="Log to systemd journal",
    )

    parser.add_argument(
        "-p", "--listen-port",
        default=9257,
        metavar="PORT",
        type=int,
        help="Port number to bind to (default: 9257)",
    )

    parser.add_argument(
        "-a", "--listen-address",
        metavar="ADDR",
        help="Address to bind to (default: 127.0.0.1)",
        default="127.0.0.1",
    )

    parser.add_argument(
        "socket",
        type=socket_path,
        help="Path to UNIX socket where the helper listens",
    )

    args = parser.parse_args()

    logging_kwargs = {}
    if args.journal:
        import systemd.journal
        logging_kwargs["handlers"] = [systemd.journal.JournalHandler()]

    logging.basicConfig(
        level={
            0: logging.ERROR,
            1: logging.WARNING,
            2: logging.INFO,
        }.get(args.verbosity, logging.DEBUG),
        **logging_kwargs
    )

    if args.device_db is None:
        logger.debug("no --device-db specified, using default %r",
                     DEFAULT_DEVICE_DB)
        try:
            args.device_db = DEFAULT_DEVICE_DB.open("r")
        except OSError as exc:
            logger.error("failed to open device database: %s", exc)
            logger.info(
                "check that it exists and is accessible, or use --device-db to"
                " specify a different database.",
            )
            sys.exit(2)

    with args.device_db as f:
        device_db = devicedb.DeviceDB(logging.getLogger("devdb"))
        try:
            device_db.load(f)
        except SyntaxError as exc:
            logger.error(
                "failed to load device database: %s", exc,
                exc_info=True,
            )
            sys.exit(2)
        else:
            logger.info(
                "device db loaded with %s",
                device_db.stats()
            )

    if args.attr_mapping is None:
        logger.debug("no --attr-mapping specified, using default %r",
                     DEFAULT_ATTR_MAPPING)
        try:
            args.attr_mapping = DEFAULT_ATTR_MAPPING.open("r")
        except OSError as exc:
            logger.error("failed to open attribute mapping: %s", exc)
            logger.info(
                "check that it exists and is accessible, or use --attr-mapping"
                " to specify a different file.",
            )
            sys.exit(2)

    with args.attr_mapping as f:
        attr_mapping = attrmeta.AttributeMapping(logging.getLogger("attrmap"))
        try:
            attr_mapping.load(f)
        except (SyntaxError, ValueError) as exc:
            logger.error(
                "failed to load attribute mapping: %s", exc,
                exc_info=True,
            )
            sys.exit(2)
        else:
            logger.info(
                "attribute_mapping loaded with %s",
                attr_mapping.stats()
            )

    REGISTRY.register(
        SMARTCollector(
            args.socket,
            device_db,
            attr_mapping,
            logger.getChild("collector")
        ),
    )

    if ":" in args.listen_address:
        class_ = HTTP6Server
    else:
        class_ = http.server.HTTPServer

    httpd = class_(
        (args.listen_address, args.listen_port),
        MetricsHandler
    )
    httpd.serve_forever()
예제 #35
0
@app.route('/test/')
def test():
    return 'rest'


@app.route('/')
def test1():
    return 'landing page'


@app.errorhandler(500)
def handle_500(error):
    return str(error), 500


@app.route('/metrics')
def metrics():
    return Response(prometheus_client.generate_latest(),
                    mimetype=CONTENT_TYPE_LATEST)


if __name__ == '__main__':
    REGISTRY.register(CustomCollector())
    REGISTRY.unregister(PROCESS_COLLECTOR)
    REGISTRY.unregister(PLATFORM_COLLECTOR)
    REGISTRY.unregister(
        REGISTRY._names_to_collectors['python_gc_objects_collected_total'])

    app.run(host='0.0.0.0')
예제 #36
0
                          labels={})
        metric.add_sample('svc_requests_duration_seconds_sum',
                          value=response['requests_duration_milliseconds'] /
                          1000.0,
                          labels={})
        yield metric

        # Counter for the failures
        metric = Metric('svc_requests_failed_total', 'Requests failed',
                        'summary')
        metric.add_sample('svc_requests_failed_total',
                          value=response['request_failures'],
                          labels={})
        yield metric

        # Metrics with labels for the documents loaded
        metric = Metric('svc_documents_loaded', 'Requests failed', 'gauge')
        for k, v in response['documents_loaded'].items():
            metric.add_sample('svc_documentes_loaded',
                              value=v,
                              labels={'repository': k})
        yield metric


if __name__ == '__main__':
    # Usage: json_exporter.py port endpoint
    start_http_server(int(sys.argv[1]))
    REGISTRY.register(JsonCollector(sys.argv[2]))

    while True:
        time.sleep(1)
예제 #37
0
        url = st2_host + '/api/v1/executions?status=' + status + '&parent=null'
        r = requests.get(url, headers=headers, verify=False)
        metric = Metric('jobs_running', 'Current Jobs Running', 'gauge')
        metric.add_sample('jobs_running',
                          value=float(len(r.json())),
                          labels={})
        yield metric

        status = 'failed'
        url = st2_host + '/api/v1/executions?status=' + status + '&parent=null'
        r = requests.get(url, headers=headers, verify=False)
        metric = Metric('jobs_failed', 'Number of failed jobs', 'gauge')
        metric.add_sample('jobs_failed', value=float(len(r.json())), labels={})
        yield metric

        status = 'succeeded'
        url = st2_host + '/api/v1/executions?status=' + status + '&parent=null'
        r = requests.get(url, headers=headers, verify=False)
        metric = Metric('jobs_succeeded', 'Number of failed jobs', 'gauge')
        metric.add_sample('jobs_succeeded',
                          value=float(len(r.json())),
                          labels={})
        yield metric


if __name__ == "__main__":
    start_http_server(8000)
    REGISTRY.register(JsonCollector())
    while True:
        time.sleep(1)
예제 #38
0
 def run(self):
     start_http_server(self.exporter_port)
     REGISTRY.register(JsonCollector())
     while self.stop_network_exporter.is_set():
         time.sleep(1)