Example #1
0
def includeme(config):
    settings = config.registry.settings
    port = settings.get('prometheus.port', None)
    if port:
        # if you don't specify port, you have to expose the metrics yourself somehow
        start_http_server(int(port))
    config.add_tween('pyramid_prometheus.tween_factory', over=EXCVIEW)
Example #2
0
def start_exporter(config, port, interval):
    """ run the exporter every <interval> seconds """
    REGISTRY.register(NovaCollector(config))
    start_http_server(port)
    while True:
        generate_latest(REGISTRY)
        time.sleep(30)
Example #3
0
def main():
    parser = create_parser()
    args = parser.parse_args()

    # log configuration
    loginipath = os.path.join(os.path.dirname(__file__)) + '/' + "logconf.ini"
    logging.config.fileConfig(
        loginipath,
        defaults={'logfilename': args.logfile}
        )
    logger = logging.getLogger("zvmExporter")

    # split address and port
    addr_rx = re.compile(
        r'(?P<addr>[a-zA-Z0-9][a-zA-Z0-9\-]*(\.[a-zA-Z0-9][a-zA-Z0-9\-]*)+)'
        r'(:(?P<port>\d+))?')
    match = addr_rx.match(args.server)
    if match:
        xcat_addr = match.group('addr')
        xcat_port = match.group('port') or '443'
    else:
        logger.info("Invalid address")
        return 1

    logger.info("Program started")

    # start collector
    REGISTRY.register(ZVMCollector(args.zhcpnode, args.username,
                                   args.password, xcat_addr, xcat_port,
                                   args.cert))
    start_http_server(args.port)
    while True:
        sleep(1)
def main():
    parser = argparse.ArgumentParser(description='Feed Apache Samza metrics into Prometheus.')
    parser.add_argument('--brokers', metavar='BROKERS', type=str, required=True,
                        help='list of comma-separated kafka brokers: host[:port],host[:port],...')
    parser.add_argument('--port', metavar='PORT', type=int, nargs='?', default=8080,
                        help='port to serve metrics to Prometheus (default: 8080)')
    parser.add_argument('--topic', metavar='TOPIC', type=str, nargs='?',default='samza-metrics',
                        help='name of topic to consume (default: "samza-metrics")')
    parser.add_argument('--from-beginning', action='store_const', const=True,
                        help='consume topic from offset 0')
    parser.add_argument('--ttl', metavar='GAUGES_TTL', type=int, nargs='?',
                        help='time in seconds after which a metric (or label set) is no longer reported when not updated (default: 60s)')
    args = parser.parse_args()
    brokers = args.brokers.split(',')
    consumer = KafkaConsumer(args.topic, group_id=KAFKA_GROUP_ID, bootstrap_servers=brokers)
    start_http_server(args.port)

    set_gauges_ttl(args.ttl)

    if args.from_beginning:
        consumer.set_topic_partitions((args.topic, 0, 0)) # FIXME: beginning may not be offset 0

    start_ttl_watchdog_thread()

    try:
        consume_topic(consumer, args.brokers)
    except KeyboardInterrupt:
        pass # FIXME : should we close consumer ?

    print('Shutting down')
Example #5
0
def main():
    os.environ['SDL_VIDEODRIVER'] = 'dummy'
    if '--debug' in sys.argv:
        logging.basicConfig(level=logging.DEBUG)

    logger = logging.getLogger()
    settings.from_files(cfg='yaml', verbose=True)
    settings.from_args(sys.argv[1:])
    if settings.prometheus:
        start_http_server(settings.prometheus_port)

    b = Board(settings)
    http = HTTPThread(b, settings)
    http.start()
    for file in get_files(settings.yaml_directory, 'yaml'):
        sound_set = SoundSet.from_yaml(file, settings=settings)
        b.register_sound_set(sound_set)

    joystick = Joystick(settings.device_path, backend=settings.input_type,
                        mapping=settings.physical_mapping,
                        offset=settings.scancode_offset)

    b.register_joystick(joystick)
    http_joystick = Joystick(http.queue, backend='queue')
    b.register_joystick(http_joystick)
    b.run()
Example #6
0
def exporter_start():
    print('starting server http://{}:{}/metrics'.format(
        EXPORTER_LISTEN_HOST, EXPORTER_LISTEN_PORT))
    REGISTRY.register(CustomCollector())
    start_http_server(EXPORTER_LISTEN_PORT, addr=EXPORTER_LISTEN_HOST)
    while True:
        time.sleep(5)
Example #7
0
 def _on_open(self, ws):
     global prometheus_stream_port
     # start local http export server
     start_http_server(prometheus_stream_port)
     # make Prometheus scrape this server
     self._config_prometheus()
     LOG.info('websocket opened: {}'.format(self.url))
def main():
    """
    Main method
    """
    args = create_parser().parse_args()

    log_format = '%(asctime)s %(message)s'
    logging_args = dict(format=log_format,
                        level=args.log_level)
    if args.log_file:
        logging_args['filename'] = args.log_file
        logging_args['filemode'] = 'a'

    logging.basicConfig(**logging_args)

    scheme = "https" if args.use_tls else "http"
    use_ts = args.use_device_data_timestamp
    collector = SunPowerPVSupervisorCollector(hostname=args.hostname,
                                              port=args.port,
                                              scheme=scheme,
                                              timeout=args.timeout,
                                              use_device_data_timestamp=use_ts,
                                             )

    logging.info("Listening on port %d...", args.listen_on)
    start_http_server(args.listen_on)

    REGISTRY.register(collector)

    # Sleep indefinitely until we receive a signal
    while True:
        time.sleep(10)
Example #9
0
 def _start_prometheus_server(self):
     if not self.prometheus_server_base_port:
         logger.error("Can't start prometheus server: missing prometheus_server_base_port configuration")
         return
     server_port = int(self.prometheus_server_base_port) + self.worker_id
     start_http_server(server_port)
     logger.info('Prometheus server started on port %s', server_port)
def start_httpd(addr):  # pragma: no cover
    """
    Starts the exposing HTTPD using the addr provided in a separate
    thread.
    """
    host, port = addr.split(':')
    logging.info('Starting HTTPD on {}:{}'.format(host, port))
    prometheus_client.start_http_server(int(port), host)
Example #11
0
 def init(self):
     """Init the Prometheus Exporter"""
     try:
         start_http_server(port=int(self.port), addr=self.host)
     except Exception as e:
         logger.critical("Can not start Prometheus exporter on {}:{} ({})".format(self.host, self.port, e))
         sys.exit(2)
     else:
         logger.info("Start Prometheus exporter on {}:{}".format(self.host, self.port))
def main():
    parser = argparse.ArgumentParser(description='Export ES query results to Prometheus.')
    parser.add_argument('-e', '--es-cluster', default='localhost',
        help='addresses of nodes in a Elasticsearch cluster to run queries on. Nodes should be separated by commas e.g. es1,es2. Ports can be provided if non-standard (9200) e.g. es1:9999 (default: localhost)')
    parser.add_argument('-p', '--port', type=int, default=8080,
        help='port to serve the metrics endpoint on. (default: 8080)')
    parser.add_argument('-c', '--config-file', default='exporter.cfg',
        help='path to query config file. Can be absolute, or relative to the current working directory. (default: exporter.cfg)')
    parser.add_argument('-v', '--verbose', action='store_true',
        help='turn on verbose logging.')
    args = parser.parse_args()

    logging.basicConfig(
        format='[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s',
        level=logging.DEBUG if args.verbose else logging.INFO
    )
    logging.captureWarnings(True)

    port = args.port
    es_cluster = args.es_cluster.split(',')

    config = configparser.ConfigParser()
    config.read_file(open(args.config_file))

    query_prefix = 'query_'
    queries = {}
    for section in config.sections():
        if section.startswith(query_prefix):
            query_name = section[len(query_prefix):]
            query_interval = config.getfloat(section, 'QueryIntervalSecs')
            query_indices = config.get(section, 'QueryIndices', fallback='_all')
            query = json.loads(config.get(section, 'QueryJson'))

            queries[query_name] = (query_interval, query_indices, query)

    if queries:
      es_client = Elasticsearch(es_cluster)

      scheduler = sched.scheduler()

      logging.info('Starting server...')
      start_http_server(port)
      logging.info('Server started on port %s', port)

      for name, (interval, indices, query) in queries.items():
          run_scheduler(scheduler, es_client, name, interval, indices, query)

      try:
          scheduler.run()
      except KeyboardInterrupt:
          pass

    else:
      logging.warn('No queries found in config file %s', args.config_file)

    logging.info('Shutting down')
Example #13
0
    def __init__(self, cdb):
        super().__init__(cdb)

        cdb.reserve_keywords(["prometheus"], NAME)
        cdb.add_plugin_description(DESCRIPTION, NAME)
        cdb.add_plugin_usage(USAGE, NAME)

        start_http_server(9100)
        # Maybe too much labals, can overload Prometheus: https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels
        self.reserved_keywords_calls = Counter("reserved_keywords_calls", "Reserved Keywords Calls", ["keyword", "author_id", "server_id", "channel_id"])
Example #14
0
def listen_metrics(bind_addresses, port):
    """
    Start Prometheus metrics server.
    """
    from synapse.metrics import RegistryProxy
    from prometheus_client import start_http_server

    for host in bind_addresses:
        logger.info("Starting metrics listener on %s:%d", host, port)
        start_http_server(port, addr=host, registry=RegistryProxy)
Example #15
0
def main():
	try:
		args = parse_args()
		port = int(args.port)
		REGISTRY.register(CouchbaseCollector(args.couchbase))
		start_http_server(port)
		print "Serving at port: ", port
		while True: time.sleep(1)
	except KeyboardInterrupt:
		print(" Interrupted")
		exit(0)
def main():
    try:
        args = parse_args()
        port = int(args.port)
        REGISTRY.register(AzureStatusCollector())
        start_http_server(port)
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)
def main():

    """
    Symbols list contain a list of pairs which describes stock symbols as used by Google API.
    Each element should be 'EXCHANGE:SYMBOL' examples:
 
         [ 'NASDAQ:GOOG', 'NASDAQ:CSCO', 'NYSE:IBM', 'BCBA:YPFD' ]
    """
    start_http_server(int(sys.argv[1]))
    REGISTRY.register(QuoteCollector())
    while True: time.sleep(1)
def SetupPrometheusEndpointOnPort(port, addr=''):
    """Exports Prometheus metrics on an HTTPServer running in its own thread.

    The server runs on the given port and is by default listenning on
    all interfaces. This HTTPServer is fully independent of Django and
    its stack. This offers the advantage that even if Django becomes
    unable to respond, the HTTPServer will continue to function and
    export metrics. However, this also means that none of the features
    offered by Django (like middlewares or WSGI) can't be used.
    """
    prometheus_client.start_http_server(port, addr=addr)
def main():
    try:
        args = parse_args()
        port = int(args.port)
        REGISTRY.register(JenkinsCollector(args.jenkins, args.user, args.password, args.insecure))
        start_http_server(port)
        print("Polling {}. Serving at port: {}".format(args.jenkins, port))
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)
Example #20
0
def main():
    print "Start listening on :%s" % (METRICS_PORT)
    start_http_server(METRICS_PORT)

    dht11 = sensor_dht11.DHT11()
    dht11.start()

    light = sensor_light.LightSensor()
    light.start()

    light.join()
    dht11.join()
Example #21
0
def spawn_on_socket(fd):
    worker_id = uwsgi.worker_id()
    application = make_app(debug=options.debug)
    server = HTTPServer(application, xheaders=True, max_body_size=options.max_body_size)
    sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
    server.add_sockets([sock])

    if options.prometheus_port:
        prometheus_port = options.prometheus_port + worker_id
        uwsgi.log('starting prometheus server on port %d' % prometheus_port)
        start_http_server(prometheus_port)
    uwsgi.log('tornado plumber reporting for duty on uWSGI worker %s' % worker_id)
Example #22
0
def cli(**settings):
    """Notify about new reviews in AppStore and Google Play in slack.

       Launch command using supervisor or using screen/tmux/etc.
       Reviews are fetched for multiple apps and languages in --beat=300 interval.
    """

    setup_logging(settings)
    settings = setup_languages(settings)
    channels = setup_channel_map(settings)
    app = CriticApp(**dict(settings, channels=channels))

    if settings['version']:
        click.echo('Version %s' % critics.__version__)
        return
    if not (settings['ios'] or settings['android']):
        click.echo('Please choose either --ios or --android')
        return

    loop = tornado.ioloop.IOLoop.instance()

    if app.load_model():
        logger.debug('Model loaded OK, not skipping notify on first run')
        notify = True
    else:
        notify = False

    if settings['ios']:
        logger.info('Tracking IOS apps: %s', ', '.join(settings['ios']))
        itunes = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'ios'),
                                                 1000 * settings['beat'], loop)
        itunes.start()
    if settings['android']:
        logger.info('Tracking Android apps: %s', ', '.join(settings['android']))
        google_play = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'android'),
                                                      1000 * settings['beat'], loop)
        google_play.start()

    echo_channel_map(channels)

    if settings['ios']:
        app.poll_store('ios', notify=notify)
    if settings['android']:
        app.poll_store('android', notify=notify)

    if settings['stats']:
        port = int(settings['stats'])
        logger.debug('Serving metrics server on port %s' % port)
        start_http_server(port)

    if settings['daemonize']:
        loop.start()
Example #23
0
def deadman_exporter(argv=sys.argv):
    """This daemon monitors the local zgres-deadman daemon running on this machine.

    It works by using the deadman configuration to look into the DCS to find
    statistics for this machine. We build it as a separate daemon to lessen the
    risk that monitoring and statistics collection inside the zgres-deadman
    will cause errors.

    We run it on the same machine as this provides:
        * reusability of the existing deadman configuration
        * easier prometheus configuration
        * automatic HA
    """
    parser = argparse.ArgumentParser(description="Prometheus statistics daemon for zgres-deadman")
    config = parse_args(parser, argv, config_file='deadman.ini')
    # this sleep prevents us from restarting too fast and systemd failing to restart us
    # we use a fail-always architecture here, any exception causes a daemon restart
    sleep(10)
    start_http_server(9163)
    # use only one plugin and zookeeper connection, otherwise we get memory leaks :(
    plugins = App(config)._plugins
    plugins.initialize()
    while True:
        dcs_has_conn_info = 0
        dcs_is_willing_replica = 0
        # HACK, we only need the plugins, really
        all_state = list(plugins.dcs_list_state())
        my_id = plugins.get_my_id()
        for id, state in all_state:
            if id == my_id:
                if 'master' == state.get('replication_role'):
                    metric_dcs_is_master.set(1)
                else:
                    metric_dcs_is_master.set(0)
                break
        for id, state in willing_replicas(all_state):
            if id == my_id:
                dcs_is_willing_replica = 1
                metric_dcs_willing_since.set(state['willing'])
                break
        for id, conn_info in plugins.dcs_list_conn_info():
            if id == my_id:
                dcs_has_conn_info = 1
                break
        metric_dcs_has_conn_info.set(dcs_has_conn_info)
        metric_dcs_is_willing_replica.set(dcs_is_willing_replica)
        sleep(60)
def main():
    try:
        args = parse_args()
        port = int(args.port)
        while True:
            try:
                if requests.get(args.odl_inventory):
                    REGISTRY.register(OpenDaylightCollector(args.opendaylight, args.odl_inventory))
                    start_http_server(port)
                    print "Polling data from OpenDaylight: %s. Starting OpenDaylight exporter on port: %s" % (args.opendaylight, port)
                    while True:
                        time.sleep(1)
            except ConnectionError:
                print "OpenDaylight is either not running or it is unreachable."
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)
def main():
    parser = argparse.ArgumentParser(description='Feed Maxwell metrics into Prometheus.')
    parser.add_argument('--config', metavar='CONFIG', type=str, required=True,
                        help='path to config file')
    args = parser.parse_args()
    config = configparser.ConfigParser()
    config.read(args.config)

    validate_config(config)

    start_http_server(config.getint('exporter', 'port'))

    print "Starting loop"
    while True:
        for section in config.sections():
            if section == 'exporter': continue
            host_config = {
                'user': config.get(section, 'username'),
                'password': config.get(section, 'password'),
                'host': config.get(section, 'hostname'),
                'port': config.getint(section, 'port'),
                'database': 'maxwell',
                'raise_on_warnings': True,
            }
            connection = connector.connect(**host_config)
            cursor = connection.cursor()
            cursor.execute("SHOW MASTER STATUS")
            row = cursor.fetchall()[0]
            binlog_name = row[0]
            binlog_position = row[1]

            cursor.execute("SELECT * from maxwell.positions")
            row = cursor.fetchall()[0]
            maxwell_binlog_name = row[1]
            maxwell_binlog_position = row[2]

            cursor.close()
            connection.close()

            backlog = calculateBacklog(binlog_name, binlog_position, maxwell_binlog_name, maxwell_binlog_position)
            setGaugeValue('maxwell:master_binlog_position_bytes', ['host'], [section], binlog_position)
            setGaugeValue('maxwell:maxwell_binlog_position_bytes', ['host'], [section], maxwell_binlog_position)
            setGaugeValue('maxwell:backlog_bytes', ['host'], [section], backlog)

        time.sleep(config.getint('exporter', 'refresh_interval_ms') / 1000.0)
Example #26
0
    def _handle_config(self, msg):
        """
        Handle config message from Felix.

        Called from the reader thread.
        """
        complete_logging(msg[MSG_KEY_LOG_FILE],
                         file_level=msg[MSG_KEY_SEV_FILE],
                         syslog_level=msg[MSG_KEY_SEV_SYSLOG],
                         stream_level=msg[MSG_KEY_SEV_SCREEN],
                         gevent_in_use=False)
        if msg[MSG_KEY_PROM_PORT]:
            _log.info("Prometheus metrics enabled, starting driver metrics"
                      "server on port %s", msg[MSG_KEY_PROM_PORT])
            start_http_server(msg[MSG_KEY_PROM_PORT])

        self._config_received.set()
        _log.info("Received config from Felix: %s", msg)
def start_metrics_server(port=9389):
    """
    https://github.com/prometheus/prometheus/wiki/Default-port-allocations
    Occupied port 9389 for SCT
    """
    hostname = socket.gethostname()
    if not network.is_port_free(port, hostname):
        port = network.find_free_port(8001, 10000)

    try:
        logger.debug('Try to start prometheus API server on port: %s', port)
        prometheus_client.start_http_server(port)
        ip = socket.gethostbyname(hostname)
        return '{}:{}'.format(ip, port)
    except Exception as ex:
        logger.error('Cannot start local http metrics server: %s', ex)

    return None
Example #28
0
def main():
    parser = argparse.ArgumentParser(description='Prometheus statistics for a Mumble ICE interface')
    parser.add_argument('-l', '--listen', help='Port to listen on', default=9123, type=int)
    parser.add_argument('-H', '--host', help='Host of the Ice interface', default='127.0.0.1')
    parser.add_argument('-p', '--port', help='Port of the Ice interface', default=6502, type=int)
    parser.add_argument('-i', '--interval', help='Interval in seconds', default=60, type=int)
    parser.add_argument('--secret', help='The read secret', default=None)
    parser.add_argument('-v', '--verbose', help='Verbose', action='store_true')
    args = parser.parse_args()

    node.start_http_server(args.listen)

    gauges = {
        'users': node.Gauge('mumble_users_connected', 'Number of connected users',
            ['ice_server', 'server_id']),
        'uptime': node.Gauge('mumble_uptime', 'Virtual uptime',
            ['ice_server', 'server_id']),
    }

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    ice_server = '%s:%d' % (args.host, args.port)
    with ice_connect(args.host, args.port, args.secret) as meta:
        while True:
            logger.info('gathering info')
            t1 = time.time()
            for server in meta.getBootedServers():
                g_user = len(server.getUsers())
                g_uptime = server.getUptime()
                logger.debug('mumble_user_connected: %d' % g_user)
                logger.debug('mumble_uptime: %d' % g_uptime)
                labels = {'server_id': server.id(), 'ice_server': ice_server}
                gauges['users'].labels(labels).set(g_user)
                gauges['uptime'].labels(labels).set(g_uptime)

            time_to_wait = args.interval - (time.time() - t1)
            if time_to_wait > 0:
                time.sleep(time_to_wait)
    return 0
Example #29
0
async def main(ctx, bind, port, token, account, key, reuse_port, tls_ciphers,
               tls_certificate, metrics_port, table):
    # Start up our metrics server in another thread.
    prometheus_client.start_http_server(metrics_port)

    bqc = BigQueryClient(*table.split(":"), client_id=account, key=key.read())

    if tls_certificate is not None:
        ssl_context = tls.create_context(tls_certificate, tls_ciphers)
    else:
        ssl_context = None

    with Linehaul(token=token, bigquery=bqc, loop=ctx.event_loop) as lh:
        async with Server(lh, bind, port,
                          reuse_port=reuse_port,
                          ssl=ssl_context,
                          loop=ctx.event_loop) as s:
            try:
                await s.wait_closed()
            except asyncio.CancelledError:
                click.echo(click.style("Shutting Down...", fg="yellow"))
Example #30
0
def main():
    application = make_app(debug=options.debug)

    with open(options.logging_config, 'r') as conf:
        conf_dictionary = json.load(conf)
        logging.config.dictConfig(conf_dictionary)

    if options.prometheus_port:
        start_http_server(options.prometheus_port)

    if options.debug:
        application.listen(address=options.address, port=options.port)
    else:
        server = tornado.httpserver.HTTPServer(application,
                                               xheaders=True,
                                               max_body_size=options.max_body_size)
        server.bind(options.port)
        server.start()
    logger.info('Using asyncio')
    from tornado.platform.asyncio import AsyncIOMainLoop
    AsyncIOMainLoop.current().start()
Example #31
0
def main(unparsed_args=None):
    parser = argparse.ArgumentParser(
        description="Start deployment api and make request to it.")

    parser.add_argument("--deployment",
                        default="periodic-test",
                        type=str,
                        help="Deployment name.")
    parser.add_argument("--email",
                        default="*****@*****.**",
                        type=str,
                        help="Email used during e2e test")
    parser.add_argument("--project",
                        default="kubeflow-ci-deploy",
                        type=str,
                        help="e2e test project id")
    parser.add_argument("--project_number",
                        default="453914067825",
                        type=str,
                        help="e2e test project number")
    parser.add_argument("--namespace",
                        default="",
                        type=str,
                        help="namespace where deployment service is running")
    parser.add_argument("--target_url",
                        default="deploy.kubeflow.cloud",
                        type=str,
                        help="target url which accept deployment request")
    parser.add_argument("--wait_sec",
                        default=120,
                        type=int,
                        help="oauth client secret")
    parser.add_argument("--zone",
                        default="us-east1-d",
                        type=str,
                        help="GKE cluster zone")
    parser.add_argument("--sa_client_id",
                        default="111670663612681935351",
                        type=str,
                        help="Service account client id")
    parser.add_argument("--kfverison",
                        default="v0.3.1",
                        type=str,
                        help="Service account client id")
    parser.add_argument(
        "--mode",
        default="e2e",
        type=str,
        help="offer three test mode: e2e, prober, and load_test")

    args = parser.parse_args(args=unparsed_args)

    util_run(('gcloud auth activate-service-account --key-file=' +
              may_get_env_var("GOOGLE_APPLICATION_CREDENTIALS")).split(' '),
             cwd=FILE_PATH)
    if args.mode == "e2e":
        sleep(args.wait_sec)
        make_e2e_call(args)
        insert_ssl_cert(args)
        if check_deploy_status(args) != 200:
            raise RuntimeError(
                "IAP endpoint not ready after 30 minutes, time out...")

    if args.mode == "prober":
        start_http_server(8000)
        SERVICE_HEALTH.set(0)
        PROBER_HEALTH.set(0)
        service_account_credentials = get_service_account_credentials(
            "SERVICE_CLIENT_ID")
        while True:
            sleep(args.wait_sec)
            if not prober_clean_up_resource(args):
                PROBER_HEALTH.set(1)
                FAILURE_COUNT.inc()
                logging.error("request cleanup failed, retry in %s seconds" %
                              args.wait_sec)
                continue
            PROBER_HEALTH.set(0)
            if make_prober_call(args, service_account_credentials):
                if insert_ssl_cert(args):
                    PROBER_HEALTH.set(0)
                else:
                    PROBER_HEALTH.set(1)
                    FAILURE_COUNT.inc()
                    logging.error(
                        "request insert_ssl_cert failed, retry in %s seconds" %
                        args.wait_sec)
                    continue
                if check_deploy_status(args) == 200:
                    SERVICE_HEALTH.set(0)
                    SUCCESS_COUNT.inc()
                else:
                    SERVICE_HEALTH.set(1)
                    FAILURE_COUNT.inc()
            else:
                SERVICE_HEALTH.set(2)
                FAILURE_COUNT.inc()
                logging.error("prober request failed, retry in %s seconds" %
                              args.wait_sec)
Example #32
0
app_memberarea_staging = Gauge('app_memberarea_staging_status', 'Status of app-memberarea port 6767')
app_publicapi_staging = Gauge('app_publicapi_staging_status', 'Status of app-publicapi port 7070')
app_publicapi_production = Gauge('app_publicapi_production_stage_status', 'Status of app-production port 5050')
app_payment_staging = Gauge('app_payment_staging_status', 'Status of npm port 9000')

@app_memberarea_staging.time()
@app_publicapi_staging.time()
@app_publicapi_production.time()
@app_payment_staging.time()
def process_request(t):

	PORTS = ['6767', '7070', '5050', '9000']
	PROCESS = [app_memberarea_staging, app_publicapi_staging, app_publicapi_production, app_payment_staging]

	# Check all the services status
	for i in range(len(PROCESS)):
		checkPort = os.popen('sudo netstat -tulpn | grep ' + PORTS[i])
		if (len(checkPort.readlines())):
			PROCESS[i].set(100)
		else:
			PROCESS[i].set(0)
	time.sleep(t)

if __name__ == '__main__':
	# Start up the server to expose the metrics.
	start_http_server(9101)
	# Generate some requests.
	while True:
		process_request(5)
Example #33
0
    if pelorus.missing_configs(REQUIRED_CONFIG):
        print("This program will exit.")
        sys.exit(1)

    pelorus.load_kube_config()
    k8s_config = client.Configuration()
    k8s_client = client.api_client.ApiClient(configuration=k8s_config)
    dyn_client = DynamicClient(k8s_client)

    username = os.environ.get("GIT_USER")
    token = os.environ.get("GIT_TOKEN")
    git_api = os.environ.get("GIT_API")
    git_provider = os.environ.get("GIT_PROVIDER", pelorus.DEFAULT_GIT)
    tls_verify = bool(
        strtobool(os.environ.get("TLS_VERIFY", pelorus.DEFAULT_TLS_VERIFY)))
    namespaces = None
    if os.environ.get("NAMESPACES") is not None:
        namespaces = [
            proj.strip() for proj in os.environ.get("NAMESPACES").split(",")
        ]
    apps = None
    start_http_server(8080)

    collector = GitFactory.getCollector(dyn_client, username, token,
                                        namespaces, apps, git_api,
                                        git_provider)
    REGISTRY.register(collector)

    while True:
        time.sleep(1)
def main():  # pylint: disable=too-many-statements
    """Main kafka listener entrypoint."""
    start_http_server(int(PROMETHEUS_PORT))
    init_logging()
    LOGGER.info("Starting upload listener.")

    loop = asyncio.get_event_loop()
    signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
    for sig in signals:
        loop.add_signal_handler(
            sig, lambda sig=sig: loop.create_task(terminate(sig, loop)))
    executor = BoundedExecutor(MAX_QUEUE_SIZE, max_workers=WORKER_THREADS)

    def process_message(msg):  # pylint: disable=too-many-return-statements,too-many-branches
        """Message processing logic"""
        PROCESS_MESSAGES.inc()
        LOGGER.debug('Received message from topic %s: %s', msg.topic,
                     msg.value)

        try:
            msg_dict = json.loads(msg.value.decode("utf8"))
        except json.decoder.JSONDecodeError:
            MESSAGE_PARSE_ERROR.inc()
            LOGGER.exception("Unable to parse message: ")
            return
        FailedCache.process_failed_cache(FailedCache.upload_cache, executor,
                                         process_upload, loop)
        FailedCache.process_failed_cache(FailedCache.delete_cache, executor,
                                         process_delete, loop)

        if msg.topic == mqueue.UPLOAD_TOPIC:
            if not validate_msg(msg_dict, "upload",
                                REQUIRED_UPLOAD_MESSAGE_FIELDS):
                return
            LOGGER.info("Received upload msg, inventory_id: %s, type: %s",
                        msg_dict["host"]["id"], msg_dict["type"])
            # send message to payload tracker
            send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                        msg_dict,
                                        'received',
                                        loop=loop)
            # proces only archives from smart_management accounts
            identity = get_identity(
                msg_dict["platform_metadata"]["b64_identity"])
            if identity is None:
                INVALID_IDENTITY.inc()
                error_msg = "Skipped upload due to invalid identity header."
                LOGGER.warning(error_msg)
                send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                            msg_dict,
                                            'error',
                                            status_msg=error_msg,
                                            loop=loop)
                return
            if not is_entitled_smart_management(identity,
                                                allow_missing_section=True):
                MISSING_SMART_MANAGEMENT.inc()
                error_msg = "Skipped upload due to missing smart_management entitlement."
                LOGGER.debug(error_msg)
                send_msg_to_payload_tracker(PAYLOAD_TRACKER_PRODUCER,
                                            msg_dict,
                                            'error',
                                            status_msg=error_msg,
                                            loop=loop)
                return
            process_func = process_upload
        elif msg.topic == mqueue.EVENTS_TOPIC:
            if not validate_msg(msg_dict, "event",
                                REQUIRED_EVENT_MESSAGE_FIELDS):
                return
            LOGGER.info("Received event msg, inventory_id: %s, type: %s",
                        msg_dict["id"], msg_dict["type"])
            if msg_dict['type'] == 'delete':
                process_func = process_delete
            else:
                UNKNOWN_EVENT_TYPE.inc()
                LOGGER.error("Received unknown event type: %s",
                             msg_dict['type'])
                return
        else:
            UNKNOWN_TOPIC.inc()
            LOGGER.error("Received message on unsupported topic: %s",
                         msg.topic)
            return

        future = executor.submit(process_func, msg_dict, loop=loop)
        future.add_done_callback(on_thread_done)

    with DatabasePool(WORKER_THREADS):
        # prepare repo name to id cache
        db_init_repo_cache()
        LISTENER_QUEUE.listen(process_message)

        # wait until loop is stopped from terminate callback
        loop.run_forever()

        LOGGER.info("Shutting down.")
        executor.shutdown()
        print('Error while setting logger configs::%s', e)
    logging.getLogger("requests").setLevel(logging.WARNING)
    logging.getLogger("urllib3").setLevel(logging.WARNING)
    logger = logging.getLogger('netscaler_metrics_exporter')

    if args.config_file:
        args = parseConfig(args)

    # Wait for other containers to start.
    logger.info('Sleeping for %s seconds.' % args.start_delay)
    time.sleep(args.start_delay)

    # Start the server to expose the metrics.
    logger.info('Starting the exporter on port %s.' % args.port)
    try:
        start_http_server(args.port)
        print("Exporter is running...")
    except Exception as e:
        logger.critical('Error while opening port::%s', e)
        print(e)

    # Get username and password of NetScalers.
    ns_user = os.environ.get("NS_USER")
    if ns_user == None:
        ns_user = args.username
    ns_password = os.environ.get("NS_PASSWORD")
    if ns_password == None:
        ns_password = args.password
    else:
        logger.warning(
            'Using NS_PASSWORD Environment variable is insecure. Consider using config.yaml file and --config-file option to define password'
Example #36
0
from prometheus_client import start_http_server
from prometheus_client import Counter
from prometheus_client import Gauge

SUM = Counter('demo_sum', 'Sum Demo requests duration.')
REQUESTS = Counter('demo_count', 'Count Demo requests.')
EXCEPTIONS = Counter('demo_exceptions_count', 'Exceptions serving Demo.')
LAST = Gauge('demo_last_time_seconds', 'The last time a Demo was served.')


class MyHandler(http.server.BaseHTTPRequestHandler):
    '''Classe de exemplo.'''
    def do_GET(self):
        '''Função com todos os exemplos propostos.'''
        rand = random.randrange(10)
        time.sleep(rand)
        SUM.inc(rand)
        REQUESTS.inc()
        with EXCEPTIONS.count_exceptions():
            if random.random() < 0.2:
                raise Exception
        self.send_response(200)
        self.end_headers()
        self.wfile.write(bytes("Hello World (after %ss)" % rand, "utf-8"))
        LAST.set_to_current_time()


if __name__ == "__main__":
    start_http_server(8001)
    SERVER = http.server.HTTPServer(('', 8002), MyHandler)
    SERVER.serve_forever()
Example #37
0
    # Draining the subprocess STDOUT to the logger as the
    # subprocess is executed
    while True:
        output = process.stdout.readline().decode()
        # Print all the lines while they are not empty
        if output:
            LOG.info(output.strip())
            continue
        # With an empty line, check if the process is still running
        if process.poll() is not None:
            return process.poll()


if __name__ == "__main__":
    start_http_server(9090)

    run_time = Gauge(name='qontract_reconcile_last_run_seconds',
                     documentation='Last run duration in seconds',
                     labelnames=['integration', 'shards', 'shard_id'])

    run_status = Counter(
        name='qontract_reconcile_run_status',
        documentation='Status of the runs',
        labelnames=['integration', 'status', 'shards', 'shard_id'])

    while True:
        start_time = time.monotonic()
        return_code = run_cmd()
        time_spent = time.monotonic() - start_time
def main():
    signal.signal(signal.SIGTERM, signal_handler)

    parser = argparse.ArgumentParser(description='Export ES query results to Prometheus.')
    parser.add_argument('-e', '--es-cluster', default='localhost',
                        help='addresses of nodes in a Elasticsearch cluster to run queries on. Nodes should be separated by commas e.g. es1,es2. Ports can be provided if non-standard (9200) e.g. es1:9999 (default: localhost)')
    parser.add_argument('--ca-certs',
                        help='path to a CA certificate bundle. Can be absolute, or relative to the current working directory. If not specified, SSL certificate verification is disabled.')
    parser.add_argument('-p', '--port', type=int, default=9206,
                        help='port to serve the metrics endpoint on. (default: 9206)')
    parser.add_argument('--basic-user',
                        help='User for authentication. (default: no user)')
    parser.add_argument('--basic-password',
                        help='Password for authentication. (default: no password)')
    parser.add_argument('--query-disable', action='store_true',
                        help='disable query monitoring. Config file does not need to be present if query monitoring is disabled.')
    parser.add_argument('-c', '--config-file', default='exporter.cfg',
                        help='path to query config file. Can be absolute, or relative to the current working directory. (default: exporter.cfg)')
    parser.add_argument('--cluster-health-disable', action='store_true',
                        help='disable cluster health monitoring.')
    parser.add_argument('--cluster-health-timeout', type=float, default=10.0,
                        help='request timeout for cluster health monitoring, in seconds. (default: 10)')
    parser.add_argument('--cluster-health-level', default='indices', choices=['cluster', 'indices', 'shards'],
                        help='level of detail for cluster health monitoring.  (default: indices)')
    parser.add_argument('--nodes-stats-disable', action='store_true',
                        help='disable nodes stats monitoring.')
    parser.add_argument('--nodes-stats-timeout', type=float, default=10.0,
                        help='request timeout for nodes stats monitoring, in seconds. (default: 10)')
    parser.add_argument('--nodes-stats-metrics', type=nodes_stats_metrics_parser,
                        help='limit nodes stats to specific metrics. Metrics should be separated by commas e.g. indices,fs.')
    parser.add_argument('--indices-stats-disable', action='store_true',
                        help='disable indices stats monitoring.')
    parser.add_argument('--indices-stats-timeout', type=float, default=10.0,
                        help='request timeout for indices stats monitoring, in seconds. (default: 10)')
    parser.add_argument('--indices-stats-mode', default='cluster', choices=['cluster', 'indices'],
                        help='detail mode for indices stats monitoring. (default: cluster)')
    parser.add_argument('--indices-stats-metrics', type=indices_stats_metrics_parser,
                        help='limit indices stats to specific metrics. Metrics should be separated by commas e.g. indices,fs.')
    parser.add_argument('--indices-stats-fields', type=indices_stats_fields_parser,
                        help='include fielddata info for specific fields. Fields should be separated by commas e.g. indices,fs. Use \'*\' for all.')
    parser.add_argument('-j', '--json-logging', action='store_true',
                        help='turn on json logging.')
    parser.add_argument('--log-level', default='INFO', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
                        help='detail level to log. (default: INFO)')
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='turn on verbose (DEBUG) logging. Overrides --log-level.')
    args = parser.parse_args()

    if args.basic_user and args.basic_password is None:
        parser.error('Username provided with no password.')
    elif args.basic_user is None and args.basic_password:
        parser.error('Password provided with no username.')
    elif args.basic_user:
        http_auth = (args.basic_user, args.basic_password)
    else:
        http_auth = None

    log_handler = logging.StreamHandler()
    log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
    formatter = JogFormatter(log_format) if args.json_logging else logging.Formatter(log_format)
    log_handler.setFormatter(formatter)

    log_level = getattr(logging, args.log_level)
    logging.basicConfig(
        handlers=[log_handler],
        level=logging.DEBUG if args.verbose else log_level
    )
    logging.captureWarnings(True)

    port = args.port
    es_cluster = args.es_cluster.split(',')

    if args.ca_certs:
        es_client = Elasticsearch(es_cluster, verify_certs=True, ca_certs=args.ca_certs, http_auth=http_auth)
    else:
        es_client = Elasticsearch(es_cluster, verify_certs=False, http_auth=http_auth)

    scheduler = None

    if not args.query_disable:
        scheduler = sched.scheduler()

        config = configparser.ConfigParser()
        config.read_file(open(args.config_file))

        query_prefix = 'query_'
        queries = {}
        for section in config.sections():
            if section.startswith(query_prefix):
                query_name = section[len(query_prefix):]
                query_interval = config.getfloat(section, 'QueryIntervalSecs', fallback=15)
                query_timeout = config.getfloat(section, 'QueryTimeoutSecs', fallback=10)
                query_indices = config.get(section, 'QueryIndices', fallback='_all')
                query = json.loads(config.get(section, 'QueryJson'))

                queries[query_name] = (query_interval, query_timeout, query_indices, query)

        if queries:
            for name, (interval, timeout, indices, query) in queries.items():
                func = partial(run_query, es_client, name, indices, query, timeout)
                run_scheduler(scheduler, interval, func)
        else:
            logging.warn('No queries found in config file %s', args.config_file)

    if not args.cluster_health_disable:
        REGISTRY.register(ClusterHealthCollector(es_client,
                                                 args.cluster_health_timeout,
                                                 args.cluster_health_level))

    if not args.nodes_stats_disable:
        REGISTRY.register(NodesStatsCollector(es_client,
                                              args.nodes_stats_timeout,
                                              metrics=args.nodes_stats_metrics))

    if not args.indices_stats_disable:
        parse_indices = args.indices_stats_mode == 'indices'
        REGISTRY.register(IndicesStatsCollector(es_client,
                                                args.indices_stats_timeout,
                                                parse_indices=parse_indices,
                                                metrics=args.indices_stats_metrics,
                                                fields=args.indices_stats_fields))

    logging.info('Starting server...')
    start_http_server(port)
    logging.info('Server started on port %s', port)

    try:
        if scheduler:
            scheduler.run()
        else:
            while True:
                time.sleep(5)
    except KeyboardInterrupt:
        pass

    shutdown()
Example #39
0
        self.dictionary = json.loads(get_table_dict)
        pass

    def collect(self):
        for tab, v in self.dictionary['Tables'].items():
            key_list = []
            val_list = []
            for key, val in v[0].items():
                key_list.append(key.replace(" ", "_"))
                val_list.append(val)
            labels_names = key_list + ['Table']
            labels_values = val_list + [tab]
            g = GaugeMetricFamily("summary", 'dl-monitoring', labels=labels_names)
            g.add_metric(labels_values, 1)
            yield g


def __getstatus__():
    gc, sc = s3v.init_session(profile)
    db_list = s3v.get_db(gc)
    unhealthy_datasource = s3v.get_tab(sc, gc, db_list)
    return unhealthy_datasource


if __name__ == '__main__':
    start_http_server(8000)
    ds = jsonRenderer.json_response(__getstatus__())
    REGISTRY.register(dl_exporter(ds))
    while True:
        time.sleep(1)
Example #40
0
from livenessserver import LivenessServer
from prometheus_client import start_http_server
from prometheus_client.core import REGISTRY
from jiracollector import JiraCollector

if __name__ == '__main__':

    REGISTRY.register(JiraCollector())

    # First we collect the environment variables that were set in either
    # the Dockerfile or the Kubernetes Pod specification.
    listen_port = int(getenv('LISTEN_PORT', 8090))
    prom_listen_port = int(getenv('PROM_LISTEN_PORT', 8080))

    # Let the Prometheus client export its metrics on a separate port.
    start_http_server(prom_listen_port)
    # Let our web application run and listen on the specified port.
    httpd = http.server.HTTPServer(('localhost', listen_port), LivenessServer)
    # Make sure you have the webserver signal when it's done.
    httpd.ready = True

    # Simple handler function to show that we we're handling the SIGTERM
    def do_shutdown(signum, frame):
        global httpd

        log = {'jira-metrics': {'message': 'Graceful shutdown.'}}
        print(json.dumps(log))
        threading.Thread(target=httpd.shutdown).start()
        sys.exit(0)

    # We catch the SIGTERM signal here and shut down the HTTPServer
Example #41
0
  pass

with h_transaction.labels(method='POST',operation='transaction').time():
  pass


#from prometheus_client import Enum
e = Enum('app_status', 'Application status ',
        states=['starting', 'running', 'stopped'])
e.state('running')
  
#
g_transaction_last_seen = Gauge('transaction_last_seen', 'last seen transactions ',['method','operation'])
g_enroll_last_seen = Gauge('enroll_last_seen', 'last seen enroll ',['method','operation'])






# start the http server to expose the prometheus metrics
logging.info("Starting web-server...")
start_http_server(metrics_port, "0.0.0.0")
logging.info("Server started and listening at 0.0.0.0:{0}".format(metrics_port))
  
if __name__ == '__main__':
#	main()
	while True:
#		process_request(random.uniform(0.1, 20.0))
		process_request(random.random())
Example #42
0
def run_prometheus_server(port, collector_config, openstack_config):
    start_http_server(int(port))
    load_and_register_collectors(collector_config, openstack_config)
    while True:
        time.sleep(1)
Example #43
0
    resp = (service.spreadsheets().values().batchGet(
        spreadsheetId=FINANCE_SPREADSHEET_ID, ranges=titles).execute())
    all_transactions = deserialize_sheets(titles, resp["valueRanges"], days=30)

    timestamps = Gauge(
        "transaction_timestamp_epoch_seconds",
        "When transactions occured",
        ["account", "name", "category", "date"],
    )
    amounts = Gauge(
        "transaction_amount_cents",
        "The transaction value",
        ["account", "name", "category", "date"],
    )
    for t in all_transactions:
        labels = get_labels(t)
        timestamps.labels(**labels).set(t["date"].timestamp())
        amounts.labels(**labels).set(round(float(t["amount"]) * 100))


if __name__ == "__main__":
    start_http_server(PORT)

    creds = get_google_creds()
    service = build("sheets", "v4", credentials=creds, cache_discovery=False)

    while True:
        get_metrics(service)
        time.sleep(60 * 60 * 24)
from kubernetes import client, config
from prometheus_client import start_http_server, Gauge
from time import sleep

start_http_server(8849)
g = Gauge('pvc_mapping', 'fetching the mapping between pod and pvc',
          ['persistentvolumeclaim', 'mountedby'])
pool = {}
while 1:
    config.load_incluster_config()
    k8s_api_obj = client.CoreV1Api()
    ret = k8s_api_obj.list_namespace()
    ret = ret.to_dict()
    ret = ret['items']
    for i in ret:
        na = i['metadata']['name']
        print(na)
        pods = k8s_api_obj.list_namespaced_pod(na)
        pods = pods.to_dict()
        pods = pods['items']
        for p in pods:
            for v in p['spec']['volumes']:
                if v['persistent_volume_claim']:
                    pvc = v['persistent_volume_claim']['claim_name']
                    pod = p['metadata']['name']
                    print(pvc, pod)
                    #g.labels(pvc,pod).set(1)
                    if pvc in pool.keys():
                        g.remove(pvc, pool[pvc])
                        g.labels(pvc, pod)
                        pool[pvc] = pod
Example #45
0
SCOPE = config_get('monitor',
                   'user_scope',
                   raise_exception=False,
                   default='rucio')
CLIENT = StatsClient(host=SERVER, port=PORT, prefix=SCOPE)

ENABLE_METRICS = config_get_bool('monitor',
                                 'enable_metrics',
                                 raise_exception=False,
                                 default=False)
if ENABLE_METRICS:
    METRICS_PORT = config_get_int('monitor',
                                  'metrics_port',
                                  raise_exception=False,
                                  default=8080)
    start_http_server(METRICS_PORT)


def record_counter(counters, delta=1):
    """
    Log one or more counters by arbitrary amounts

    :param counters: The counter or a list of counters to be updated.
    :param delta: The increment for the counter, by default increment by 1.
    """
    if isinstance(counters, list):
        for counter in counters:
            if delta > 0:
                CLIENT.incr(counter, delta)
            else:
                CLIENT.decr(counter, delta)
Example #46
0
        print(ex)
    finally:
        stdout = json.loads(data.text)
        print(stdout)
        data_dict = {'Manufacturer': str(stdout['Manufacturer']),
                   'Status': str(stdout['Status']['Health']),
                   'Model': str(stdout['Model']),
                   'PowerState': str(stdout['PowerState']),
                   'hostName': str(stdout['HostName']),
                   'SerialNumber': str(stdout['SerialNumber'])}
        if 'SKU' in stdout:
            data_dict['SKU'] = str(stdout['SKU']) #service tag
        server_general.labels(serverip=item).info(data_dict)


def get_servers_data(cnf):
    for srv_type in cnf:
        for ip in srv_type['ips']:
            t = threading.Thread(target=srv_stats, args=(ip, srv_type['id'], srv_type['user_id'], srv_type['user_pass'],))
            t.start()


if __name__ == '__main__':
    config = conf_loader()
    # Start up the server to expose the metrics.
    start_http_server(config['config']['web_port'])
    # Generate some requests.
    while True:
        get_servers_data(config['config']['servers'])
        time.sleep(60 * 5)
Example #47
0
    PARSER.add_argument(
        '-server_port',
        metavar='server_port',
        type=int,
        default=9111,
        help='Port to push the metrics to.'
    )

    ARGS = PARSER.parse_args()

    NOTIFIER = telegram_notifier.NotificationHandler()

    LOGGER = results_manager.LogManager(level="debug", output="file",
        filename="pendulum_export_"+str(ARGS.server_port)+".log")

    start_http_server(ARGS.server_port)

    TAIL = LogTailer("LogTailer", 1, ARGS.fname, LOGGER)
    EXPORT = PendulumExporter("PendulumExporter", 2, LOGGER)

    # Start new Threads
    TAIL.start()
    EXPORT.start()

    EXPORT.join()

    TAIL.join()

    NOTIFIER.emit(
        "ALERT! Stopped exporting pendulum metrics from {}".format(ARGS.fname)
    )
Example #48
0
           #metric = data.pop()
            metric = data[i]
            labels = list(metric['labels'].keys())
            labels_values = [ metric['labels'][k] for k in labels ]
            if metric['metric_name'] not in to_yield:
                setattr(self, metric['metric_name'], gauge(metric['metric_name'], metric['description'], labels=labels))
            if labels:
                getattr(self, metric['metric_name']).add_metric(labels_values, metric['value'])
                to_yield.add(metric['metric_name'])
        for metric in to_yield:
            yield getattr(self, metric)

registry = prometheus_client.core.REGISTRY
registry.register(Collector())

prometheus_client.start_http_server(conf['listen_port'])

# endless loop
while True:
    try:
        while True:
            try:
                get_data()
                docker_exporter_up.set(1)
                time.sleep(conf['check_interval'])
            except KeyboardInterrupt:
                break
            except:
                trace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
                for line in trace:
                    log.error(line[:-1])
app_options = AppOptions(options.app_port, options.app_interval)
artifactory_metrics = ArtifactoryMetrics()
artifactory_options = ArtifactoryOptions(options.artifactory_url,
                                         options.artifactory_user,
                                         options.artifactory_password)
artifactory_api_client = ArtifactoyApiClient(artifactory_options)
artifactory_metrics_updater = ArtifactoryMetricsUpdater(artifactory_api_client)


async def update_metrics():
    while True:
        logger.info('Updating metrics')
        artifactory_metrics_updater.update(artifactory_metrics)

        await asyncio.sleep(app_options.interval())


if __name__ == "__main__":
    loop = asyncio.get_event_loop()
    start_http_server(app_options.port())

    loop.create_task(update_metrics())
    try:
        loop.run_forever()

    except KeyboardInterrupt:
        loop.close()
    finally:
        loop.close()
Example #50
0
def check_the_topic(server, topic_name='input'):
    kafka_connector = kafka.KafkaConsumer(bootstrap_servers=server)
    set_of_topics = kafka_connector.topics()
    if topic_name not in set_of_topics:
        create_topic(server)
        print('Topic "{}" has been created.'.format(topic_name), flush=True)
    return True


def write_to_topic(server, topic_name='input'):
    epoch_time = str(time())
    producer = kafka.KafkaProducer(bootstrap_servers=server)
    producer.send(topic_name, key=b'epoch', value=epoch_time.encode())
    return epoch_time


start_http_server(5000)
prometheus_gaue = Gauge('producer_sleep_time', 'Sleep for N sec')

while True:
    try:
        if check_the_topic(server) == True:
            for i in range(100):
                print(write_to_topic(server), flush=True)
                time_to_sleep = randrange(10, 15)
                prometheus_gaue.set(time_to_sleep)
                sleep(time_to_sleep)
    except Exception as err:
        print(err, flush=True)
        sleep(60)
Example #51
0
def run_metrics_server():
    start_http_server(8080)
import docker
from prometheus_client import start_http_server, Counter

APP_NAME = "Docker events prometheus exporter"
EVENTS = Counter('docker_events',
                 'Docker events',
                 ['event', 'type', 'image', 'name'])


def watch_events():
    client = docker.DockerClient(version='auto',
                                 base_url='unix://var/run/docker.sock')
    for event in client.events(decode=True):
        type_event = event['Type']
        try:
            actor = event['Actor']
            attributes = actor['Attributes']
            event = event['status'].strip()
            EVENTS.labels(event=event, type=type_event, image=attributes['image'], name=attributes['name']).inc()
        except Exception as e:
            print(event)
            pass


if __name__ == '__main__':
    start_http_server(9000, addr='0.0.0.0')
    try:
        watch_events()
    except docker.errors.APIError:
        pass
Example #53
0
def run():
    start_http_server(9101)
    register_temps()
Example #54
0
def start_prometheus_serving(port: int = 8080) -> None:
    """Initialize Prometheus metric server"""
    start_http_server(port)
Example #55
0
def prometheus_monitor(app, port):
    start_http_server(port, "")
Example #56
0
    def __init__(self,
                 redis_address,
                 autoscaling_config,
                 redis_password=None,
                 prefix_cluster_info=False,
                 monitor_ip=None,
                 stop_event: Optional[Event] = None):
        # Initialize the Redis clients.
        ray.state.state._initialize_global_state(
            redis_address, redis_password=redis_password)
        self.redis = ray._private.services.create_redis_client(
            redis_address, password=redis_password)
        if monitor_ip:
            self.redis.set("AutoscalerMetricsAddress",
                           f"{monitor_ip}:{AUTOSCALER_METRIC_PORT}")
        (ip, port) = redis_address.split(":")
        self.gcs_client = connect_to_gcs(ip, int(port), redis_password)
        # Initialize the gcs stub for getting all node resource usage.
        gcs_address = self.redis.get("GcsServerAddress").decode("utf-8")

        options = (("grpc.enable_http_proxy", 0), )
        gcs_channel = grpc.insecure_channel(gcs_address, options=options)
        self.gcs_node_resources_stub = \
            gcs_service_pb2_grpc.NodeResourceInfoGcsServiceStub(gcs_channel)

        # Set the redis client and mode so _internal_kv works for autoscaler.
        worker = ray.worker.global_worker
        worker.redis_client = self.redis
        worker.gcs_client = self.gcs_client
        worker.mode = 0
        head_node_ip = redis_address.split(":")[0]
        self.redis_address = redis_address
        self.redis_password = redis_password
        if os.environ.get("RAY_FAKE_CLUSTER"):
            self.load_metrics = LoadMetrics(local_ip=FAKE_HEAD_NODE_ID)
        else:
            self.load_metrics = LoadMetrics(local_ip=head_node_ip)
        self.last_avail_resources = None
        self.event_summarizer = EventSummarizer()
        self.prefix_cluster_info = prefix_cluster_info
        # Can be used to signal graceful exit from monitor loop.
        self.stop_event = stop_event  # type: Optional[Event]
        self.autoscaling_config = autoscaling_config
        self.autoscaler = None
        # If set, we are in a manually created cluster (non-autoscaling) and
        # simply mirroring what the GCS tells us the cluster node types are.
        self.readonly_config = None

        self.prom_metrics = AutoscalerPrometheusMetrics()
        if monitor_ip and prometheus_client:
            # If monitor_ip wasn't passed in, then don't attempt to start the
            # metric server to keep behavior identical to before metrics were
            # introduced
            try:
                logger.info(
                    "Starting autoscaler metrics server on port {}".format(
                        AUTOSCALER_METRIC_PORT))
                prometheus_client.start_http_server(
                    AUTOSCALER_METRIC_PORT,
                    registry=self.prom_metrics.registry)
            except Exception:
                logger.exception(
                    "An exception occurred while starting the metrics server.")
        elif not prometheus_client:
            logger.warning("`prometheus_client` not found, so metrics will "
                           "not be exported.")

        logger.info("Monitor: Started")
Example #57
0
def main():
    # set up prometheus
    log.info(
        f"Starting Prometheus events server at http://localhost:{PROMETHEUS_PORT_FORWARDED}"
    )

    block_gauge = Gauge(name="block_info",
                        documentation="block_info",
                        labelnames=["info"])

    token_flow_counter = Counter(
        name="token_flow",
        documentation="token,event,direction",
        labelnames=["token", "event", "direction"],
    )
    fees_counter = Counter(
        name="fees",
        documentation="entity",
        labelnames=["entity"],
    )

    start_http_server(PROMETHEUS_PORT)

    # set up event filters
    # filter bridge contract events
    # bridge_abi = open("interfaces/Bridge.json", "r").read()
    # bridge = w3.eth.contract(address=ADDRESSES["bridge_v2"], abi=bridge_abi)
    # console.log(f"Read Badger BTC Bridge contract at address {ADDRESSES['bridge_v2']}")
    # filters = [
    #     bridge.events.Mint.createFilter(fromBlock=BLOCK_START, toBlock="latest"),
    #     bridge.events.Burn.createFilter(fromBlock=BLOCK_START, toBlock="latest"),
    # ]

    # watch events
    # chain = Chain()
    # console.log(
    #     f"Processing prior events from block {BLOCK_START} to {w3.eth.blockNumber}"
    # )
    # process_prior_events(chain, filters, block_gauge, token_flow_counter, fees_counter)

    # console.log("Listening for new events in latest blocks...")
    # listen_new_events(
    #     chain, filters, block_gauge, token_flow_counter, fees_counter, POLL_INTERVAL
    # )

    # ---------------------------------------------------------------------------------

    # set up scanner and scanner state
    # scan all blocks for Mint/Burn events with `eth_getLog`
    # works with nodes where `eth_newFilter` is not supported

    # erc20_abi = json.loads("interfaces/ERC20.json")
    # erc20 = web3.eth.contract(abi=abi)
    # wbtc = w3.eth.contract(address=ADDRESSES["WBTC"], abi=erc20_abi)
    # renbtc = w3.eth.contract(address=ADDRESSES["renBTC"], abi=erc20_abi)

    log.info(
        f"Reading Badger BTC Bridge contract at address {ADDRESSES['bridge_v2']}"
    )
    bridge_abi = json.load(open("interfaces/Bridge.json", "r"))
    bridge = w3.eth.contract(address=ADDRESSES["bridge_v2"], abi=bridge_abi)

    state = BridgeScannerState()
    state.restore()

    scanner = EventScanner(
        web3=w3,
        contract=bridge,
        state=state,
        events=[bridge.events.Mint, bridge.events.Burn],
        filters={},
        num_blocks_rescan_for_forks=CHAIN_REORG_SAFETY_BLOCKS,
        max_chunk_scan_size=10000,
    )

    while True:
        run_scan(scanner, state, block_gauge, token_flow_counter, fees_counter)
        time.sleep(POLL_INTERVAL)
Example #58
0
# Decorate function with metric.
@COINEX_REQUEST_TIME.time()
def process_coinex_assets():
    url = "https://api.coinex.com/v1/common/asset/config?coin_type=ADA"
    json_obj = urllib.request.urlopen(url)
    crypto_asset = json.loads(json_obj.read().decode('utf-8'))
    print("Processing Coinex assets")
    if crypto_asset['code'] == 0:
        coinex_deposits.set(crypto_asset['data']['ADA']['can_deposit'])
        coinex_withdraws.set(crypto_asset['data']['ADA']['can_withdraw'])
    sys.stdout.flush()


if __name__ == '__main__':
    # Start up the server to expose the metrics.
    start_http_server(EXPORTER_PORT)
    # Main Loop: Process all API's and sleep for a certain amount of time
    while True:
        try:
            process_binance_assets()
        except:
            print("failed to process binance assets")
            binance_deposits.set(False)
            binance_withdraw.set(False)
        try:
            process_bittrex_assets()
        except:
            print("failed to process bittrex assets")
            bittrex_active.set(False)
            bittrex_withdraw_queue_depth.set(False)
        try:
Example #59
0
        for endpoint_name in data_type['endpoints']:
            endpoint = data_type['endpoints'][endpoint_name]

            url = root_url + endpoint['url']
            interval = endpoint['interval']

            prom_key = "%s:%s" % (data_type_name, endpoint_name)
            labels = endpoint['labels']
            values = endpoint['values']
            print(data_type_name, endpoint_name)
            pc = PrometheusClient(c['prometheus'], prom_key, labels, values)
            csv = CSVWriter(c['csvwriter'], prom_key, labels + values)

            if data_type_name == 'queue' and endpoint_name == 'config':
                worker = QueueConfigCollector(c['dpid'], url, interval, pc,
                                              csv, filter_dict)
                worker.start()
                continue

            worker = Collector(c['dpid'], url, interval, pc, csv, filter_dict)
            worker.start()


if __name__ == '__main__':
    c = get_config()
    spawn_collectors(c)
    print("Starting http server...")
    start_http_server(c['prometheus']['port'])
    while True:
        time.sleep(10)
Example #60
0
            "pqshell --summary |awk '/Total queue size:/ {print $4}'"
        ).readline().strip()
        metric = Metric('total_queue_size', 'Size Of the queue', 'summary')
        metric.add_sample('total_queue_size',
                          value=float(total_size),
                          labels={})
        yield metric

        actv = os.popen(
            "pqshell --summary |awk '/Active/ {print $2}'").readline().strip()
        metric = Metric('active_mails', 'Number of mails with active status',
                        'summary')
        metric.add_sample('active_mails', value=float(actv), labels={})
        yield metric

        defered = os.popen("pqshell --summary |awk '/Deferred/ {print $2}'"
                           ).readline().strip()
        metric = Metric('defered_mails', 'Number of mails with defered status',
                        'summary')
        metric.add_sample('defered_mails', value=float(defered), labels={})
        yield metric


if __name__ == '__main__':
    # pass port number as argument
    start_http_server(int(sys.argv[1]))
    REGISTRY.register(CustomCollector())
    obj = CustomCollector()
    while True:
        obj.collect()