示例#1
0
def start_exporter(config, port, interval):
    """ run the exporter every <interval> seconds """
    REGISTRY.register(NovaCollector(config))
    start_http_server(port)
    while True:
        generate_latest(REGISTRY)
        time.sleep(30)
示例#2
0
def exporter_start():
    print('starting server http://{}:{}/metrics'.format(
        EXPORTER_LISTEN_HOST, EXPORTER_LISTEN_PORT))
    REGISTRY.register(CustomCollector())
    start_http_server(EXPORTER_LISTEN_PORT, addr=EXPORTER_LISTEN_HOST)
    while True:
        time.sleep(5)
示例#3
0
def main():
    parser = create_parser()
    args = parser.parse_args()

    # log configuration
    loginipath = os.path.join(os.path.dirname(__file__)) + '/' + "logconf.ini"
    logging.config.fileConfig(
        loginipath,
        defaults={'logfilename': args.logfile}
        )
    logger = logging.getLogger("zvmExporter")

    # split address and port
    addr_rx = re.compile(
        r'(?P<addr>[a-zA-Z0-9][a-zA-Z0-9\-]*(\.[a-zA-Z0-9][a-zA-Z0-9\-]*)+)'
        r'(:(?P<port>\d+))?')
    match = addr_rx.match(args.server)
    if match:
        xcat_addr = match.group('addr')
        xcat_port = match.group('port') or '443'
    else:
        logger.info("Invalid address")
        return 1

    logger.info("Program started")

    # start collector
    REGISTRY.register(ZVMCollector(args.zhcpnode, args.username,
                                   args.password, xcat_addr, xcat_port,
                                   args.cert))
    start_http_server(args.port)
    while True:
        sleep(1)
def main():
    """
    Main method
    """
    args = create_parser().parse_args()

    log_format = '%(asctime)s %(message)s'
    logging_args = dict(format=log_format,
                        level=args.log_level)
    if args.log_file:
        logging_args['filename'] = args.log_file
        logging_args['filemode'] = 'a'

    logging.basicConfig(**logging_args)

    scheme = "https" if args.use_tls else "http"
    use_ts = args.use_device_data_timestamp
    collector = SunPowerPVSupervisorCollector(hostname=args.hostname,
                                              port=args.port,
                                              scheme=scheme,
                                              timeout=args.timeout,
                                              use_device_data_timestamp=use_ts,
                                             )

    logging.info("Listening on port %d...", args.listen_on)
    start_http_server(args.listen_on)

    REGISTRY.register(collector)

    # Sleep indefinitely until we receive a signal
    while True:
        time.sleep(10)
示例#5
0
    def _register(self):
        if self.name in all_gauges.keys():
            logger.warning("%s already registered, reregistering" % (self.name,))
            REGISTRY.unregister(all_gauges.pop(self.name))

        REGISTRY.register(self)
        all_gauges[self.name] = self
def main():
    try:
        args = parse_args()
        port = int(args.port)
        REGISTRY.register(AzureStatusCollector())
        start_http_server(port)
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)
示例#7
0
def main():
	try:
		args = parse_args()
		port = int(args.port)
		REGISTRY.register(CouchbaseCollector(args.couchbase))
		start_http_server(port)
		print "Serving at port: ", port
		while True: time.sleep(1)
	except KeyboardInterrupt:
		print(" Interrupted")
		exit(0)
def main():
    try:
        args = parse_args()
        port = int(args.port)
        REGISTRY.register(JenkinsCollector(args.jenkins, args.user, args.password, args.insecure))
        start_http_server(port)
        print("Polling {}. Serving at port: {}".format(args.jenkins, port))
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)
示例#9
0
def register_cache(cache_type, cache_name, cache):

    # Check if the metric is already registered. Unregister it, if so.
    # This usually happens during tests, as at runtime these caches are
    # effectively singletons.
    metric_name = "cache_%s_%s" % (cache_type, cache_name)
    if metric_name in collectors_by_name.keys():
        REGISTRY.unregister(collectors_by_name[metric_name])

    class CacheMetric(object):

        hits = 0
        misses = 0
        evicted_size = 0

        def inc_hits(self):
            self.hits += 1

        def inc_misses(self):
            self.misses += 1

        def inc_evictions(self, size=1):
            self.evicted_size += size

        def describe(self):
            return []

        def collect(self):
            try:
                if cache_type == "response_cache":
                    response_cache_size.labels(cache_name).set(len(cache))
                    response_cache_hits.labels(cache_name).set(self.hits)
                    response_cache_evicted.labels(cache_name).set(self.evicted_size)
                    response_cache_total.labels(cache_name).set(self.hits + self.misses)
                else:
                    cache_size.labels(cache_name).set(len(cache))
                    cache_hits.labels(cache_name).set(self.hits)
                    cache_evicted.labels(cache_name).set(self.evicted_size)
                    cache_total.labels(cache_name).set(self.hits + self.misses)
            except Exception as e:
                logger.warn("Error calculating metrics for %s: %s", cache_name, e)
                raise

            yield GaugeMetricFamily("__unused", "")

    metric = CacheMetric()
    REGISTRY.register(metric)
    caches_by_name[cache_name] = cache
    collectors_by_name[metric_name] = metric
    return metric
示例#10
0
    def install_colletor(self):
        class Collector(object):
            def collect(self):
                try:
                    ret = []
                    for c in kvmagent.metric_collectors:
                        ret.extend(c())

                    return ret
                except Exception as e:
                    content = traceback.format_exc()
                    err = '%s\n%s\n' % (str(e), content)
                    logger.warn(err)
                    return []

        REGISTRY.register(Collector())
def main():
    try:
        args = parse_args()
        port = int(args.port)
        while True:
            try:
                if requests.get(args.odl_inventory):
                    REGISTRY.register(OpenDaylightCollector(args.opendaylight, args.odl_inventory))
                    start_http_server(port)
                    print "Polling data from OpenDaylight: %s. Starting OpenDaylight exporter on port: %s" % (args.opendaylight, port)
                    while True:
                        time.sleep(1)
            except ConnectionError:
                print "OpenDaylight is either not running or it is unreachable."
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)
def run_server(test, port):
    """
    This script provides monitoring information about the postgraas server.
    """
    logging.basicConfig(level=logging.INFO, format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s")

    config = cfg.get_config()

    collector = CustomCollector(config)
    if test:
        click.echo("TEST MODE")
        for metric in collector.collect():
            for sample in metric.samples:
                click.echo(sample)
    else:
        click.echo("Running web server at port {}".format(port))
        REGISTRY.register(collector)
        app = Flask(__name__)
        app.register_blueprint(blueprint)
        app.run(host='0.0.0.0', port=port, threaded=True)
示例#13
0
    exporter_configuration = load_file('/etc/bb_exporter/bb_exporter.yml')
    plugins_path = exporter_configuration['plugins_path']

    print(bcolors.OKBLUE+'[INFO] Loading plugins'+bcolors.ENDC)
    modules = {}
    for f in os.listdir(plugins_path+'/'):
        if os.path.isfile(plugins_path+'/'+f) and f.endswith('.py') and f != 'main.py' and f[:-3] in exporter_configuration['collectors']:
            modname = f[:-3]  # remove '.py' extension
            spec = importlib.util.spec_from_file_location(modname, plugins_path+'/'+f)
            modules[modname] = importlib.util.module_from_spec(spec)
            spec.loader.exec_module(modules[modname])

    print(bcolors.OKBLUE+'  Found:'+bcolors.ENDC)
    for plugin in modules:
        print(bcolors.OKBLUE+'    - '+plugin+bcolors.ENDC)

    print(bcolors.OKBLUE+'[INFO] Starting http server'+bcolors.ENDC)
    start_http_server(9777)

    print(bcolors.OKBLUE+'[INFO] Registering collector plugins...'+bcolors.ENDC)
    for coll in exporter_configuration['collectors']:
        if coll in modules:
            print(bcolors.OKBLUE+'    - Registering '+coll+bcolors.ENDC)
            REGISTRY.register(modules[coll].Collector(exporter_configuration['collectors'][coll]))
        else:
            print('Collector '+coll+' was defined in configuration file but could not be found.')

    while True:
        time.sleep(1)
示例#14
0
            logger.info('%s received score of %d on %s', target,
                        scan_results.get('score', 0),
                        scan_results.get('end_time'))

        logger.info('Scraping completed')


if __name__ == '__main__':
    logger.info(
        'observatory-exporter (https://github.com/anroots/observatory-exporter) starting up...'
    )
    api_url = os.environ.get(
        'OBSERVATORY_API_URL',
        'https://http-observatory.security.mozilla.org/api/v1')

    targets = os.environ.get('OBSERVATORY_TARGETS', '').strip()
    if not targets:
        logger.fatal(
            'No targets to scan, please set environment variable OBSERVATORY_TARGETS'
        )
        sys.exit(1)

    REGISTRY.register(ObservatoryCollector(api_url, targets.split(',')))
    start_http_server(8080)
    logger.info(
        'Collector started, listening on port :8080; waiting for scrapes...')

    while True:
        time.sleep(1)
示例#15
0
                match = HAPROXY_ANNOTATION_RE.match(key)
                if match and value:
                    label_name = 'haproxy_' + re.sub(r'[^a-zA-Z_]', '_',
                                                     match.group(1))
                    route_annotations[label_name] = value
            route_metric_family.add_metric([
                namespace, name, host, service, tls_termination,
                insecure_edge_termination, ip_whitelist
            ], route_annotations)

        return image_metric_family, route_metric_family, env_metric_family


if __name__ == '__main__':
    logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
                        level=logging.INFO)

    # Disable SSL warnings: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
    urllib3.disable_warnings()

    interval = int(os.getenv('IMAGE_METRICS_INTERVAL', '300'))
    customCollector = CustomCollector()
    REGISTRY.register(customCollector)
    prometheus_client.start_http_server(8080)
    while True:
        try:
            customCollector.update()
        except Exception as e:
            logging.exception(e)
        time.sleep(interval)
示例#16
0
        yield dev_info_metric
        yield from (x[0] for x in metrics)
        yield from (x[0] for x in attr_metrics.values())
        yield from (x[0] for x in nvme_metrics)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Prometheus exporter for S.M.A.R.T. metrics.')
    parser.add_argument(
        '--listen-address',
        '-a',
        metavar='ADDRESS',
        type=str,
        default='',
        help='Address the exporter should listen on. Default: all')
    parser.add_argument(
        '--listen-port',
        '-p',
        metavar='PORT',
        type=int,
        default=9541,
        help='Port the exporter should listen on. Default: 9541')
    args = parser.parse_args()

    REGISTRY.register(SmartmonCollector())
    start_http_server(args.listen_port, args.listen_address)
    while True:
        time.sleep(60)
        'pulp_in_progress_tasks':
        pulp_in_progress_tasks_family,
        'pulp_waiting_tasks':
        pulp_waiting_tasks_family,
        'pulp_task_duration_seconds':
        pulp_task_duration_seconds_family,
    })


class Expositor(object):
    """ Responsible for exposing metrics to prometheus """
    def collect(self):
        logging.info("Serving prometheus data")
        for key in sorted(metrics):
            yield metrics[key]


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)
    for collector in list(REGISTRY._collector_to_names):
        REGISTRY.unregister(collector)
    REGISTRY.register(Expositor())

    # Popluate data before exposing over http
    scrape()
    start_http_server(8000)

    while True:
        time.sleep(int(os.environ.get('pulp_POLL_INTERVAL', '3')))
        scrape()
示例#18
0
def setup():
    m = Monitis(settings.APIKEY, settings.SECRETKEY)
    REGISTRY.register(MonitisCollector(m))
    start_http_server(8000)
示例#19
0
def sanitise_name(s):
    return re.sub(r"[^a-zA-Z0-9:_]", "_", s)

class ConsulCollector(object):
  def collect(self):
    out = urlopen("http://localhost:8500/v1/agent/metrics").read()
    metrics = json.loads(out.decode("utf-8"))

    for g in metrics["Gauges"]:
      yield GaugeMetricFamily(sanitise_name(g["Name"]),
          "Consul metric " + g["Name"], g["Value"])

    for c in metrics["Counters"]:
      yield CounterMetricFamily(sanitise_name(c["Name"]) + "_total",
          "Consul metric " + c["Name"], c["Count"])

    for s in metrics["Samples"]:
      yield SummaryMetricFamily(sanitise_name(s["Name"]) + "_seconds",
          "Consul metric " + s["Name"],
          count_value=c["Count"], sum_value=s["Sum"] / 1000)

if __name__ == '__main__':
  REGISTRY.register(ConsulCollector())
  start_http_server(8000)
  while True:
    time.sleep(1)



示例#20
0
def run_prometheus_server(port, collectors, *args):
    start_http_server(int(port))
    for c in collectors:
        REGISTRY.register(c)
    while True:
        time.sleep(1)
        'amortized_upfront_fee': amortized_upfront_fee
    }

    return riValues


class awsRIUtilizationExporter(object):
    def collect(self):
        metric = GaugeMetricFamily(
            'reservation_utilization', 'Daily Reserved Instance Data', labels=["ri_metric"])
        for key, value in getAWSRIMetrics().items():
            metric.add_sample('reservation_utilization',
                              value=value, labels={'ri_metric': key})
        yield metric


if __name__ == '__main__':
	port = os.getenv('PORT', 9250)
	if len(sys.argv) > 2:
		print("Usage: awsRIUtilizationExporter.py [PORT]")
	else:
		if len(sys.argv) == 2:
	            port = int(sys.argv[1])
	            print('starting AWS RI Utilization Exporter on port %d' % port)
		else:
			print('starting AWS RI Utilization Exporter on default port 9250')
		start_http_server(port)
		REGISTRY.register(awsRIUtilizationExporter())
		while True:
			time.sleep(1)
示例#22
0
文件: app.py 项目: deweya/pelorus
        for rc in replicationcontrollers.items:
            images = [
                image_sha(c.image) for c in rc.spec.template.spec.containers
            ]

            # Since a commit will be built into a particular image and there could be multiple
            # containers (images) per pod, we will push one metric per image/container in the
            # pod template
            for i in images:
                if i is not None:
                    metric = DeployTimeMetric(rc.metadata.name, namespace)
                    metric.labels = rc.metadata.labels
                    metric.deploy_time = rc.metadata.creationTimestamp
                    metric.image_sha = i
                    metrics.append(metric)

    return metrics


if __name__ == "__main__":
    namespaces = None
    if os.environ.get('NAMESPACES') is not None:
        namespaces = [
            proj.strip() for proj in os.environ.get('NAMESPACES').split(",")
        ]
    start_http_server(8080)
    REGISTRY.register(DeployTimeCollector(namespaces))
    while True:
        time.sleep(1)
示例#23
0
    parser.add_argument('-i',
                        '--update-interval-seconds',
                        type=int,
                        default=0,
                        help='Update interval in seconds.')
    parser.add_argument('-d',
                        '--debug',
                        action='store_true',
                        default=False,
                        help='Enable debug logging.')

    args = parser.parse_args()
    if args.debug:
        sanic_logger.setLevel(logging.DEBUG)

    sanic_logger.info("Starting exchange-meeting-room-exporter...")
    collector = ExchangeMeetingRoomCollector(
        logger=sanic_logger,
        server=args.exchange_server,
        username=args.username,
        password=args.password_file.read().strip(),
        room_name_regex=args.room_name_regex,
        room_list_regex=args.room_list_regex,
        update_interval_seconds=args.update_interval_seconds,
    )

    REGISTRY.register(collector)
    if collector.use_cache:
        app.add_task(collector.start_cache_update)
    app.run(host='0.0.0.0', port=args.port)
示例#24
0
def main():
    """Puts the exporter together."""
    # If the session and context keys are not created, their destruction
    # should not be attempted.
    session = False
    context = False
    try:
        args = parse_args(sys.argv[1:])
        try:
            raw_yaml_creds = parse_yaml_file(args.c)
        # These will be thrown upon wrong user input
        # The user should not see a traceback then
        except (PermissionError, FileNotFoundError) as error_message:
            raise ImproperExit(error_message)
        try:
            yaml_creds = parse_yaml_sections(raw_yaml_creds, ("metrics", ),
                                             args.c)[0]
        except (AttributeError, YAMLInfoNotFoundError) as error_message:
            raise ImproperExit(error_message)
        try:
            check_creds_yaml(yaml_creds, args.c)
        except YAMLInfoNotFoundError as error_message:
            raise ImproperExit(error_message)
        try:
            raw_yaml_metrics = parse_yaml_file(args.m)
        except (PermissionError, FileNotFoundError) as error_message:
            raise ImproperExit(error_message)
        try:
            parsed_yaml_sections = parse_yaml_sections(
                raw_yaml_metrics, ("metric_groups", "metrics"), args.m)
            yaml_metric_groups = parsed_yaml_sections[0]
            yaml_metrics = parsed_yaml_sections[1]
        except (AttributeError, YAMLInfoNotFoundError) as error_message:
            raise ImproperExit(error_message)
        try:
            check_metrics_yaml(yaml_metric_groups, yaml_metrics, args.m)
        except YAMLInfoNotFoundError as error_message:
            raise ImproperExit(error_message)
        session = create_session(yaml_creds)
        try:
            context = create_metrics_context(session, yaml_metric_groups,
                                             args.c)
        except (ConnectTimeout, ServerAuthError) as error_message:
            raise ImproperExit(error_message)
        REGISTRY.register(
            ZHMCUsageCollector(yaml_creds, session, context,
                               yaml_metric_groups, yaml_metrics, args.m,
                               args.c))
        start_http_server(int(args.p))
        while True:
            try:
                time.sleep(1)
            except KeyboardInterrupt:
                raise ProperExit
    except KeyboardInterrupt:
        print("Operation interrupted before server start.")
        delete_metrics_context(session, context)
        sys.exit(1)
    except ImproperExit as error_message:
        print(error_message)
        delete_metrics_context(session, context)
        sys.exit(1)
    except ProperExit:
        print("Operation interrupted after server start.")
        delete_metrics_context(session, context)
        sys.exit(0)
示例#25
0
def main():
    signal.signal(signal.SIGTERM, signal_handler)

    parser = argparse.ArgumentParser(description='Export ES query results to Prometheus.')
    parser.add_argument('-e', '--es-cluster', default='localhost',
                        help='addresses of nodes in a Elasticsearch cluster to run queries on. Nodes should be separated by commas e.g. es1,es2. Ports can be provided if non-standard (9200) e.g. es1:9999 (default: localhost)')
    parser.add_argument('--ca-certs',
                        help='path to a CA certificate bundle. Can be absolute, or relative to the current working directory. If not specified, SSL certificate verification is disabled.')
    parser.add_argument('-p', '--port', type=int, default=9206,
                        help='port to serve the metrics endpoint on. (default: 9206)')
    parser.add_argument('--basic-user',
                        help='User for authentication. (default: no user)')
    parser.add_argument('--basic-password',
                        help='Password for authentication. (default: no password)')
    parser.add_argument('--query-disable', action='store_true',
                        help='disable query monitoring. Config file does not need to be present if query monitoring is disabled.')
    parser.add_argument('-c', '--config-file', default='exporter.cfg',
                        help='path to query config file. Can be absolute, or relative to the current working directory. (default: exporter.cfg)')
    parser.add_argument('--cluster-health-disable', action='store_true',
                        help='disable cluster health monitoring.')
    parser.add_argument('--cluster-health-timeout', type=float, default=10.0,
                        help='request timeout for cluster health monitoring, in seconds. (default: 10)')
    parser.add_argument('--cluster-health-level', default='indices', choices=['cluster', 'indices', 'shards'],
                        help='level of detail for cluster health monitoring.  (default: indices)')
    parser.add_argument('--nodes-stats-disable', action='store_true',
                        help='disable nodes stats monitoring.')
    parser.add_argument('--nodes-stats-timeout', type=float, default=10.0,
                        help='request timeout for nodes stats monitoring, in seconds. (default: 10)')
    parser.add_argument('--nodes-stats-metrics', type=nodes_stats_metrics_parser,
                        help='limit nodes stats to specific metrics. Metrics should be separated by commas e.g. indices,fs.')
    parser.add_argument('--indices-stats-disable', action='store_true',
                        help='disable indices stats monitoring.')
    parser.add_argument('--indices-stats-timeout', type=float, default=10.0,
                        help='request timeout for indices stats monitoring, in seconds. (default: 10)')
    parser.add_argument('--indices-stats-mode', default='cluster', choices=['cluster', 'indices'],
                        help='detail mode for indices stats monitoring. (default: cluster)')
    parser.add_argument('--indices-stats-metrics', type=indices_stats_metrics_parser,
                        help='limit indices stats to specific metrics. Metrics should be separated by commas e.g. indices,fs.')
    parser.add_argument('--indices-stats-fields', type=indices_stats_fields_parser,
                        help='include fielddata info for specific fields. Fields should be separated by commas e.g. indices,fs. Use \'*\' for all.')
    parser.add_argument('-j', '--json-logging', action='store_true',
                        help='turn on json logging.')
    parser.add_argument('--log-level', default='INFO', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
                        help='detail level to log. (default: INFO)')
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='turn on verbose (DEBUG) logging. Overrides --log-level.')
    args = parser.parse_args()

    if args.basic_user and args.basic_password is None:
        parser.error('Username provided with no password.')
    elif args.basic_user is None and args.basic_password:
        parser.error('Password provided with no username.')
    elif args.basic_user:
        http_auth = (args.basic_user, args.basic_password)
    else:
        http_auth = None

    log_handler = logging.StreamHandler()
    log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
    formatter = JogFormatter(log_format) if args.json_logging else logging.Formatter(log_format)
    log_handler.setFormatter(formatter)

    log_level = getattr(logging, args.log_level)
    logging.basicConfig(
        handlers=[log_handler],
        level=logging.DEBUG if args.verbose else log_level
    )
    logging.captureWarnings(True)

    port = args.port
    es_cluster = args.es_cluster.split(',')

    if args.ca_certs:
        es_client = Elasticsearch(es_cluster, verify_certs=True, ca_certs=args.ca_certs, http_auth=http_auth)
    else:
        es_client = Elasticsearch(es_cluster, verify_certs=False, http_auth=http_auth)

    scheduler = None

    if not args.query_disable:
        scheduler = sched.scheduler()

        config = configparser.ConfigParser()
        config.read_file(open(args.config_file))

        query_prefix = 'query_'
        queries = {}
        for section in config.sections():
            if section.startswith(query_prefix):
                query_name = section[len(query_prefix):]
                query_interval = config.getfloat(section, 'QueryIntervalSecs', fallback=15)
                query_timeout = config.getfloat(section, 'QueryTimeoutSecs', fallback=10)
                query_indices = config.get(section, 'QueryIndices', fallback='_all')
                query = json.loads(config.get(section, 'QueryJson'))

                queries[query_name] = (query_interval, query_timeout, query_indices, query)

        if queries:
            for name, (interval, timeout, indices, query) in queries.items():
                func = partial(run_query, es_client, name, indices, query, timeout)
                run_scheduler(scheduler, interval, func)
        else:
            logging.warn('No queries found in config file %s', args.config_file)

    if not args.cluster_health_disable:
        REGISTRY.register(ClusterHealthCollector(es_client,
                                                 args.cluster_health_timeout,
                                                 args.cluster_health_level))

    if not args.nodes_stats_disable:
        REGISTRY.register(NodesStatsCollector(es_client,
                                              args.nodes_stats_timeout,
                                              metrics=args.nodes_stats_metrics))

    if not args.indices_stats_disable:
        parse_indices = args.indices_stats_mode == 'indices'
        REGISTRY.register(IndicesStatsCollector(es_client,
                                                args.indices_stats_timeout,
                                                parse_indices=parse_indices,
                                                metrics=args.indices_stats_metrics,
                                                fields=args.indices_stats_fields))

    logging.info('Starting server...')
    start_http_server(port)
    logging.info('Server started on port %s', port)

    try:
        if scheduler:
            scheduler.run()
        else:
            while True:
                time.sleep(5)
    except KeyboardInterrupt:
        pass

    shutdown()
示例#26
0
def cli(**options):
    """Export MySQL query results to Prometheus."""

    log_handler = logging.StreamHandler()
    log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
    formatter = JogFormatter(
        log_format) if options['json_logging'] else logging.Formatter(
            log_format)
    log_handler.setFormatter(formatter)

    log_level = getattr(logging, options['log_level'])
    logging.basicConfig(
        handlers=[log_handler],
        level=logging.DEBUG if options['verbose'] else log_level)
    logging.captureWarnings(True)

    port = options['port']
    mysql_host, mysql_port = options['mysql_server']

    username = options['mysql_user']
    password = options['mysql_password']
    timezone = options['mysql_local_timezone']

    config = configparser.ConfigParser(converters=CONFIGPARSER_CONVERTERS)
    config.read_file(options['config_file'])

    config_dir_file_pattern = os.path.join(options['config_dir'], '*.cfg')
    config_dir_sorted_files = sorted(glob.glob(config_dir_file_pattern))
    config.read(config_dir_sorted_files)

    query_prefix = 'query_'
    queries = {}
    for section in config.sections():
        if section.startswith(query_prefix):
            query_name = section[len(query_prefix):]
            interval = config.getfloat(section,
                                       'QueryIntervalSecs',
                                       fallback=15)
            db_name = config.get(section, 'QueryDatabase')
            query = config.get(section, 'QueryStatement')
            value_columns = config.get(section, 'QueryValueColumns').split(',')
            on_error = config.getenum(section, 'QueryOnError', fallback='drop')
            on_missing = config.getenum(section,
                                        'QueryOnMissing',
                                        fallback='drop')

            queries[query_name] = (interval, db_name, query, value_columns,
                                   on_error, on_missing)

    scheduler = sched.scheduler()

    mysql_kwargs = dict(
        host=mysql_host,
        port=mysql_port,
        user=username,
        password=password,
        # Use autocommit mode to avoid keeping the same transaction across query
        # runs when the connection is reused. Using the same transaction would
        # prevent changes from being reflected in results, and therefore metrics.
        # Note: Queries could theoretically change data...
        autocommit=True)
    if timezone:
        mysql_kwargs['init_command'] = "SET time_zone = '{}'".format(timezone)

    mysql_client = PersistentDB(creator=pymysql, **mysql_kwargs)

    if queries:
        for query_name, (interval, db_name, query, value_columns, on_error,
                         on_missing) in queries.items():
            schedule_job(scheduler, interval, run_query, mysql_client,
                         query_name, db_name, query, value_columns, on_error,
                         on_missing)
    else:
        log.warning('No queries found in config file(s)')

    REGISTRY.register(QueryMetricCollector())

    log.info('Starting server...')
    start_http_server(port)
    log.info('Server started on port %(port)s', {'port': port})

    scheduler.run()
示例#27
0
                metric_name, metric_path, value))
            metric = GaugeMetricFamily(metric_name,
                                       metric_description,
                                       labels=label_keys)
            metric.add_metric(tuple(str(v) for v in label_values), value)

            yield metric


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description='Expose metrics bu jsonpath for configured url')
    parser.add_argument('config_file_path',
                        help='Path of the config file',
                        nargs='?',
                        default='/etc/prometheus-jsonpath-exporter/config.yml')
    args = parser.parse_args()
    with open(args.config_file_path) as config_file:
        config = yaml.load(config_file)
        log_level = config.get('log_level', DEFAULT_LOG_LEVEL)
        logging.basicConfig(
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            level=logging.getLevelName(log_level.upper()))
        exporter_port = config.get('exporter_port', DEFAULT_PORT)
        logging.debug("Config %s", config)
        logging.info('Starting server on port %s', exporter_port)
        start_http_server(exporter_port)
        REGISTRY.register(JsonPathCollector(config))
    while True:
        time.sleep(1)
示例#28
0
def main():
    REGISTRY.register(IpmiCollector())
    start_http_server(8000)
    while True:
        time.sleep(5)
示例#29
0
            for project in projects:
                asset = project.get('asset', None)
                duration = project.get('duration', None)
                lotSize = project.get('lotSize', None)
                lotsPurchased = project.get('lotsPurchased', None)
                lotsUpLimit = project.get('lotsUpLimit', None)
                projectId = project.get('projectId', None)
                interestRate = project.get('interestRate', None)

                purchased = int(lotsPurchased) * float(lotSize)
                uplimit = int(lotsUpLimit) * float(lotSize)

                customized_fixed_purchased_metrics.add_metric(
                    [projectId, str(duration), asset], purchased)
                customized_fixed_uplimit_metrics.add_metric(
                    [projectId, str(duration), asset], uplimit)
                customized_fixed_rate_metrics.add_metric(
                    [projectId, str(duration), asset], interestRate)

            yield customized_fixed_purchased_metrics
            yield customized_fixed_uplimit_metrics
            yield customized_fixed_rate_metrics


if __name__ == "__main__":

    REGISTRY.register(BinanceAPICollector())
    start_http_server(5000)
    while True:
        time.sleep(10)
示例#30
0
import os
import logging
from flask import Flask, redirect, Response
from collector import MarathonAppCollector
from prometheus_client import PROCESS_COLLECTOR
from prometheus_client.core import REGISTRY
from prometheus_client.exposition import generate_latest

MARATHON_URL = os.environ.get(
        'MARATHON_URL',
        'http://leader.mesos:8080/')
REGISTRY.unregister(PROCESS_COLLECTOR)
REGISTRY.register(MarathonAppCollector(MARATHON_URL))
app = Flask(__name__)


@app.route('/')
def home():
    return redirect('/metrics')


@app.route('/metrics')
def metrics():
    prom_metrics = generate_latest(REGISTRY)
    return Response(prom_metrics, content_type='text/plain')


if __name__ == '__main__':
    log_format = u'[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d}' \
                 u' %(levelname)s - %(message)s'
    logging.basicConfig(
示例#31
0
            return

        with open("/proc/self/stat") as s:
            line = s.read()
            raw_stats = line.split(") ", 1)[1].split(" ")

            user = GaugeMetricFamily("process_cpu_user_seconds_total", "")
            user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
            yield user

            sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
            sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
            yield sys


REGISTRY.register(CPUMetrics())

#
# Python GC metrics
#

gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"])
gc_time = Histogram(
    "python_gc_time",
    "Time taken to GC (sec)",
    ["gen"],
    buckets=[0.0025, 0.005, 0.01, 0.025, 0.05, 0.10, 0.25, 0.50, 1.00, 2.50,
             5.00, 7.50, 15.00, 30.00, 45.00, 60.00],
)

def main():
    signal.signal(signal.SIGTERM, signal_handler)

    parser = argparse.ArgumentParser(
        description='Export Kafka consumer offsets to Prometheus.')
    parser.add_argument(
        '-b',
        '--bootstrap-brokers',
        help='Addresses of brokers in a Kafka cluster to talk to.' +
        ' Brokers should be separated by commas e.g. broker1,broker2.' +
        ' Ports can be provided if non-standard (9092) e.g. brokers1:9999.' +
        ' (default: localhost)')
    parser.add_argument(
        '-p',
        '--port',
        type=int,
        default=9208,
        help='Port to serve the metrics endpoint on. (default: 9208)')
    parser.add_argument(
        '-s',
        '--from-start',
        action='store_true',
        help='Start from the beginning of the `__consumer_offsets` topic.')
    parser.add_argument(
        '--topic-interval',
        type=float,
        default=30.0,
        help='How often to refresh topic information, in seconds. (default: 30)'
    )
    parser.add_argument(
        '--high-water-interval',
        type=float,
        default=10.0,
        help=
        'How often to refresh high-water information, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--low-water-interval',
        type=float,
        default=10.0,
        help=
        'How often to refresh low-water information, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--consumer-config',
        action='append',
        default=[],
        help=
        'Provide additional Kafka consumer config as a consumer.properties file. Multiple files will be merged, later files having precedence.'
    )
    parser.add_argument('-j',
                        '--json-logging',
                        action='store_true',
                        help='Turn on json logging.')
    parser.add_argument(
        '--log-level',
        default='INFO',
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
        help='Detail level to log. (default: INFO)')
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        help='Turn on verbose (DEBUG) logging. Overrides --log-level.')
    args = parser.parse_args()

    log_handler = logging.StreamHandler()
    log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
    formatter = JogFormatter(log_format) \
        if args.json_logging \
        else logging.Formatter(log_format)
    log_handler.setFormatter(formatter)

    log_level = getattr(logging, args.log_level)
    logging.basicConfig(handlers=[log_handler],
                        level=logging.DEBUG if args.verbose else log_level)
    logging.captureWarnings(True)

    port = args.port

    consumer_config = {
        'bootstrap_servers': 'localhost',
        'auto_offset_reset': 'latest',
        'group_id': None,
        'consumer_timeout_ms': 500
    }

    for filename in args.consumer_config:
        with open(filename) as f:
            raw_config = javaproperties.load(f)
            for k, v in raw_config.items():
                if v == '':
                    # Treat empty values as if they weren't set
                    continue

                if v.lower() in ['true', 'false']:
                    # Convert boolean values
                    v = True if v.lower() == 'true' else False

                else:
                    # Try and convert numeric values
                    try:
                        v = int(v)
                    except ValueError:
                        try:
                            v = float(v)
                        except ValueError:
                            pass

                consumer_config[k.replace('.', '_')] = v

    if args.bootstrap_brokers:
        consumer_config['bootstrap_servers'] = args.bootstrap_brokers

    consumer_config['bootstrap_servers'] = consumer_config[
        'bootstrap_servers'].split(',')

    if args.from_start:
        consumer_config['auto_offset_reset'] = 'earliest'

    consumer = KafkaConsumer('__consumer_offsets', **consumer_config)
    client = consumer._client

    topic_interval = args.topic_interval
    high_water_interval = args.high_water_interval
    low_water_interval = args.low_water_interval

    logging.info('Starting server...')
    start_http_server(port)
    logging.info('Server started on port %s', port)

    REGISTRY.register(collectors.HighwaterCollector())
    REGISTRY.register(collectors.LowwaterCollector())
    REGISTRY.register(collectors.ConsumerOffsetCollector())
    REGISTRY.register(collectors.ConsumerLagCollector())
    REGISTRY.register(collectors.ConsumerLeadCollector())
    REGISTRY.register(collectors.ConsumerCommitsCollector())
    REGISTRY.register(collectors.ConsumerCommitTimestampCollector())
    REGISTRY.register(collectors.ExporterOffsetCollector())
    REGISTRY.register(collectors.ExporterLagCollector())
    REGISTRY.register(collectors.ExporterLeadCollector())

    scheduled_jobs = setup_fetch_jobs(topic_interval, high_water_interval,
                                      low_water_interval, client)
    scheduler.run_scheduled_jobs(scheduled_jobs)

    try:
        while True:
            for message in consumer:
                offsets = collectors.get_offsets()
                commits = collectors.get_commits()
                commit_timestamps = collectors.get_commit_timestamps()
                exporter_offsets = collectors.get_exporter_offsets()

                # Commits store the offset a consumer should read from next,
                # so we need to add one to the current offset for semantic parity
                exporter_partition = message.partition
                exporter_offset = message.offset + 1
                exporter_offsets = ensure_dict_key(exporter_offsets,
                                                   exporter_partition,
                                                   exporter_offset)
                exporter_offsets[exporter_partition] = exporter_offset
                collectors.set_exporter_offsets(exporter_offsets)

                if message.key:
                    key_dict = parse_key(message.key)
                    # Only key versions 0 and 1 are offset commit messages.
                    # Ignore other versions.
                    if key_dict is not None and key_dict['version'] in (0, 1):

                        if message.value:
                            value_dict = parse_value(message.value)
                            if value_dict is not None:
                                group = key_dict['group']
                                topic = key_dict['topic']
                                partition = key_dict['partition']
                                offset = value_dict['offset']
                                commit_timestamp = value_dict[
                                    'commit_timestamp'] / 1000

                                offsets = ensure_dict_key(offsets, group, {})
                                offsets[group] = ensure_dict_key(
                                    offsets[group], topic, {})
                                offsets[group][topic] = ensure_dict_key(
                                    offsets[group][topic], partition, offset)
                                offsets[group][topic][partition] = offset
                                collectors.set_offsets(offsets)

                                commits = ensure_dict_key(commits, group, {})
                                commits[group] = ensure_dict_key(
                                    commits[group], topic, {})
                                commits[group][topic] = ensure_dict_key(
                                    commits[group][topic], partition, 0)
                                commits[group][topic][partition] += 1
                                collectors.set_commits(commits)

                                commit_timestamps = ensure_dict_key(
                                    commit_timestamps, group, {})
                                commit_timestamps[group] = ensure_dict_key(
                                    commit_timestamps[group], topic, {})
                                commit_timestamps[group][
                                    topic] = ensure_dict_key(
                                        commit_timestamps[group][topic],
                                        partition, 0)
                                commit_timestamps[group][topic][
                                    partition] = commit_timestamp
                                collectors.set_commit_timestamps(
                                    commit_timestamps)

                        else:
                            # The group has been removed, so we should not report metrics
                            group = key_dict['group']
                            topic = key_dict['topic']
                            partition = key_dict['partition']

                            if group in offsets:
                                if topic in offsets[group]:
                                    if partition in offsets[group][topic]:
                                        del offsets[group][topic][partition]

                            if group in commits:
                                if topic in commits[group]:
                                    if partition in commits[group][topic]:
                                        del commits[group][topic][partition]

                            if group in commit_timestamps:
                                if topic in commit_timestamps[group]:
                                    if partition in commit_timestamps[group][
                                            topic]:
                                        del commit_timestamps[group][topic][
                                            partition]

                # Check if we need to run any scheduled jobs
                # each message.
                scheduled_jobs = scheduler.run_scheduled_jobs(scheduled_jobs)

            # Also check if we need to run any scheduled jobs
            # each time the consumer times out, in case there
            # aren't any messages to consume.
            scheduled_jobs = scheduler.run_scheduled_jobs(scheduled_jobs)

    except KeyboardInterrupt:
        pass

    shutdown()
示例#33
0
        except KeyError:
            logging.error("Could not retrieve metrics from: " + self.metrics)
            logging.error("Check argument sonar_metrics")


if __name__ == "__main__":
    parser = configargparse.ArgumentParser()
    parser.add_argument('--sonar_url', type=str, required=True, env_var='sonar_url')
    parser.add_argument('--sonar_metrics', type=str, env_var='sonar_metrics', default='ncloc,coverage')
    parser.add_argument('--sonar_user', type=str, required=True, env_var='sonar_user')
    parser.add_argument('--sonar_password', type=str, required=True, env_var='sonar_password')
    parser.add_argument('--run_once', action='store_true')
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')

    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

    REGISTRY.register(SonarCollector(args.sonar_url, args.sonar_user, args.sonar_password, args.sonar_metrics))

    if args.run_once:
        for x in REGISTRY.collect():
            logging.info(x)
            for y in x.samples:
                logging.info(y)
        sys.exit("runonce")

    start_http_server(9118)
    while True:
        time.sleep(1)
    if os.path.exists('settings.json'):
        with open('settings.json', 'r') as fh:
            configuration = json.loads(fh.read())

        if configuration is not None:
            if type(configuration) is list:
                for entry in configuration:
                    if 'host' in entry and 'username' in entry and 'password' in entry:
                        collectors.append(
                            FritzBoxCollector(entry['host'], entry['username'],
                                              entry['password']))

    if os.getenv('FRITZ_USER') is not None and os.getenv(
            'FRITZ_PASS') is not None:
        collectors.append(
            FritzBoxCollector(os.getenv('FRITZ_HOST', 'fritz.box'),
                              os.getenv('FRITZ_USER'),
                              os.getenv('FRITZ_PASS')))

    return collectors


if __name__ == '__main__':
    REGISTRY.register(FritzBoxCollector('settings.json'))

    # Start up the server to expose the metrics.
    print("Starting Server at " + str(os.getenv('FRITZ_EXPORTER_PORT', 8765)))
    prometheus_client.start_http_server(os.getenv('FRITZ_EXPORTER_PORT', 8765))
    while True:
        time.sleep(10000)
示例#35
0
        for process in _background_processes_copy:
            process.update_metrics()

        # now we need to run collect() over each of the static Counters, and
        # yield each metric they return.
        for m in (
            _background_process_ru_utime,
            _background_process_ru_stime,
            _background_process_db_txn_count,
            _background_process_db_txn_duration,
            _background_process_db_sched_duration,
        ):
            yield from m.collect()


REGISTRY.register(_Collector())


class _BackgroundProcess:
    def __init__(self, desc: str, ctx: LoggingContext):
        self.desc = desc
        self._context = ctx
        self._reported_stats: Optional[ContextResourceUsage] = None

    def update_metrics(self) -> None:
        """Updates the metrics with values from this process."""
        new_stats = self._context.get_resource_usage()
        if self._reported_stats is None:
            diff = new_stats
        else:
            diff = new_stats - self._reported_stats
示例#36
0
        ], i['org.eclipse.jetty.webapp.WebAppContext.trace-requests']['count'])
        yield hq

    def _mount_point(self, description):
        return description.split('(')[0].strip()

    def _request_data(self):
        info_request = urllib2.Request(
            "{0}/service/rest/atlas/system-information".format(self._target))
        info_request.add_header("Authorization", "Basic %s" % self._auth)
        self._info = json.loads(urllib2.urlopen(info_request).read())

        data_request = urllib2.Request("{0}/service/metrics/data".format(
            self._target))
        data_request.add_header("Authorization", "Basic %s" % self._auth)
        self._data = json.loads(urllib2.urlopen(data_request).read())


def fatal(msg):
    print(msg)
    os._exit(1)  # hard exit without throwing exception


if __name__ == "__main__":
    print("starting...")
    args = parse()
    REGISTRY.register(NexusCollector(args.host, args.user, args.password))
    start_http_server(9184)
    while True:
        time.sleep(1)
示例#37
0
                                   'Target Replicas',
                                   labels=['service_name', 'service_id'])
        for serv in serviceList:
            try:
                target = serviceStatus[serv.id]['Target']
            except KeyError:
                target = 0
            metric.add_metric(labels=[serv.attrs['Spec']['Name'], serv.id],
                              value=target)
        yield metric

        #
        metric = GaugeMetricFamily('docker_swarm_service_running_replicas',
                                   'Running Replicas',
                                   labels=['service_name', 'service_id'])
        for serv in serviceList:
            try:
                rr = serviceStatus[serv.id]['Running']
            except KeyError:
                rr = 0
            metric.add_metric(labels=[serv.attrs['Spec']['Name'], serv.id],
                              value=rr)
        yield metric


if __name__ == '__main__':
    start_http_server(5051)
    REGISTRY.register(SwarmServiceCollector())

    while True:
        time.sleep(1)
示例#38
0
def run_prometheus_server(port, vcenters):
    start_http_server(int(port))
    REGISTRY.register(VmonCollector(vcenters))
    REGISTRY.register(LoggingCollector(vcenters))
    while True:
        time.sleep(1)
示例#39
0
    parser.add_argument('--bamboo_url', type=str, required=True, env_var='bamboo_url')
    parser.add_argument('--bamboo_user', type=str, required=True, env_var='bamboo_user')
    parser.add_argument('--bamboo_password', type=str, required=True, env_var='bamboo_password')
    parser.add_argument('--bamboo_test_jobs', type=str, env_var='bamboo_test_jobs') # CSV of PRJ-XX-JOB, eg COS-COS1-DTOL
#    parser.add_argument('--dashing_event_url', type=str, required=True, env_var='dashing_event_url')
#    parser.add_argument('--redis_host', type=str, required=True, env_var='redis_host')
#    parser.add_argument('--redis_port', type=int, env_var='redis_port', default=6379)
    parser.add_argument('--run_once', action='store_true')
    parser.add_argument('--sonar_url', type=str, required=True, env_var='sonar_url')
    parser.add_argument('--sonar_user', type=str, required=True, env_var='sonar_user')
    parser.add_argument('--sonar_password', type=str, required=True, env_var='sonar_password')


    args = parser.parse_args()

    REGISTRY.register(BambooCollector(args.bamboo_url, args.bamboo_user, args.bamboo_password, args.bamboo_test_jobs))
#    REGISTRY.register(EventStreamCollector(args.dashing_event_url))  # http://192.168.99.100:3030/events
#    REGISTRY.register(RedisCollector(args.redis_host, args.redis_port))
    REGISTRY.register(SonarCollector(args.sonar_url, args.sonar_user, args.sonar_password, []))

    if args.run_once:
        # time.sleep(5) # wait for async
        pp = pprint.PrettyPrinter(indent=4)
        for collector in REGISTRY._collectors:
            # collector = BambooCollector(args.bamboo_url, args.bamboo_user, args.bamboo_password)
            print collector
            for x in collector.collect():
                pp.pprint(x.samples)
        sys.exit("runonce")

    start_http_server(9118)
示例#40
0
                # add stellar-core calculated quantiles to our summary
                summary.add_sample(underscores,
                                   labels={'quantile': '0.75'},
                                   value=metrics[k]['75%'])
                summary.add_sample(underscores,
                                   labels={'quantile': '0.95'},
                                   value=metrics[k]['95%'])
                summary.add_sample(underscores,
                                   labels={'quantile': '0.99'},
                                   value=metrics[k]['99%'])
                yield summary
            elif metrics[k]['type'] == 'counter':
                # we have a counter, this is a Prometheus Gauge
                yield GaugeMetricFamily(underscores,
                                        'libmedida metric type: ' +
                                        metrics[k]['type'],
                                        value=metrics[k]['count'])
            elif metrics[k]['type'] == 'meter':
                # we have a meter, this is a Prometheus Counter
                yield CounterMetricFamily(underscores,
                                          'libmedida metric type: ' +
                                          metrics[k]['type'],
                                          value=metrics[k]['count'])


if __name__ == "__main__":
    REGISTRY.register(StellarCoreCollector())
    start_http_server(args.port)
    while True:
        time.sleep(1)
    parser.add_argument('--secure', default='no', type=str)
    args = parser.parse_args()

    # Wait for other containers to start.
    print('>>> Sleeping for: %s seconds ...' % args.start_delay)
    time.sleep(args.start_delay)

    # Start the server to expose the metrics.
    print('>>> Starting the exporter on port: %s' % args.port)
    start_http_server(args.port)
    
    # Get username and password of NetScalers.
    ns_user = os.environ.get("NS_USER")
    ns_password = os.environ.get("NS_PASSWORD")
    if ns_user == None:
        ns_user = args.username
    if ns_password == None:
        ns_password = args.password
    
    # Load the metrics file specifying stats to be collected
    f = open('/exporter/metrics.json', 'r')
    metrics_json = json.load(f)

    # Register the exporter as a stat collector.
    print('>>> Registering collector for: %s' % (args.target_nsip))
    REGISTRY.register(NetscalerCollector(nsips=args.target_nsip, metrics=metrics_json, username=ns_user, password=ns_password, secure=args.secure.lower()))

    # Forever
    while True:
        time.sleep(1)
示例#42
0
            if os.path.islink(path) and "virtual" not in os.readlink(path):
                yield file

    def convert_size(self, size_bytes, unit):
        if size_bytes == 0:
            return 0
        size_name = ["b", "Kb", "Mb", "Gb", "Tb", "Pb", "Eb", "Zb", "Yb"]

        power = 0
        i = 0
        for dictunit in size_name:
            if dictunit == unit:
                power = i
                break
            i = i + 1

        p = math.pow(1024, power)
        return size_bytes * p


if __name__ == "__main__":
    try:
        REGISTRY.register(EthtoolCollector())
        httpd = http.server.HTTPServer(
            ("", 9417),
            MetricsHandler,
        )
        httpd.serve_forever()
    except KeyboardInterrupt:
        sys.exit()
示例#43
0
        yield GaugeMetricFamily(
            'fdb_coordinators_quorum_state',
            'Quorum status',
            value=1
            if data['client']['coordinators']['quorum_reachable'] else 0)

        yield GaugeMetricFamily(
            'fdb_database_status_health_state',
            'Database health status',
            value=1 if data['client']['database_status']['healthy'] else 0)

        yield GaugeMetricFamily(
            'fdb_database_status_avail_state',
            'Database availability',
            value=1 if data['client']['database_status']['available'] else 0)


REGISTRY.register(FdbCollector())

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-p', '--port', default=9444, type=int)
    args = parser.parse_args()

    print "Listening on 0.0.0.0:%d" % args.port

    # TODO: this starts a thread :(
    start_http_server(args.port)
    while True:
        time.sleep(1)
示例#44
0
                current_payment_detail[asset]['sum'] += float(resp['amount'])

    except requests.exceptions.HTTPError as e:
        log.info(str(e))
        lolog.infoo('http exception, restarting')
        return


if __name__ == '__main__':

    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--port',
                        type=int,
                        help='The TCP port to listen on.',
                        default=9101)
    parser.add_argument('--host',
                        nargs='?',
                        help='The URL exporter will connect.',
                        default="https://horizon.stellar.org")
    args = parser.parse_args()
    log.info(args.port)
    log.info(args.host)

    start_http_server(args.port)
    REGISTRY.register(StatsCollector())
    while True:
        main_loop(args.host)
        time.sleep(10)
示例#45
0
                      port=pg_port,
                      database=pg_db,
                      pool_max_size=pg_pool)
    if db.ping() == 0:
        logger.info("Database connection pool initialized successfully")

    if work_mode == 'WORKER':
        while True:
            worker = Worker(db)
            worker.process()

            forecaster = Forecaster(db)
            forecaster.process()

            time.sleep(interval)
    else:
        REGISTRY.register(Collector(db))
        exporter = Collector(db)

        # Unregister default metrics
        REGISTRY.unregister(PROCESS_COLLECTOR)
        REGISTRY.unregister(PLATFORM_COLLECTOR)
        REGISTRY.unregister(GC_COLLECTOR)

        application = tornado.web.Application([(r"/metrics", MetricHandler, {
            "ref_object": exporter
        })])

        application.listen(9345)
        logger.info("Starting Nostradamus metric exporter")
        tornado.ioloop.IOLoop.instance().start()
示例#46
0
import os
import logging
from flask import Flask, redirect, Response
from collector import MarathonCollector
from prometheus_client import PROCESS_COLLECTOR
from prometheus_client.core import REGISTRY
from prometheus_client.exposition import generate_latest

MARATHON_METRICS_URL = os.environ.get("MARATHON_METRICS_URL", "http://marathon.mesos:8080/metrics")
REGISTRY.unregister(PROCESS_COLLECTOR)
REGISTRY.register(MarathonCollector(MARATHON_METRICS_URL))
app = Flask(__name__)


@app.route("/")
def home():
    return redirect("/metrics")


@app.route("/metrics")
def metrics():
    prom_metrics = generate_latest(REGISTRY)
    return Response(prom_metrics, content_type="text/plain")


if __name__ == "__main__":
    log_format = u"[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d}" u" %(levelname)s - %(message)s"
    logging.basicConfig(level=logging.INFO, format=log_format)
    app.run(debug=True)
示例#47
0
                                  labels=labels)
            for device in data[field_key].keys():
                c.add_metric([APPID, device], data[field_key][device][0])
            yield c

        c2 = GaugeMetricFamily('ttn_rssi',
                               'Help text',
                               labels=['device', 'gateway'])
        for device in rssi.keys():
            for gw in rssi[device].keys():
                c2.add_metric([device, gw], rssi[device][gw])

        yield c2


REGISTRY.register(CustomCollector())

if __name__ == '__main__':
    mqttc = mqtt.Client()
    # Assign event callbacks
    mqttc.on_connect = on_connect
    mqttc.on_message = on_message

    mqttc.username_pw_set(APPID, PSW)
    mqttc.connect("eu.thethings.network", 1883, 60)

    start_http_server(port)
    logger.info('*** Exporter exposes data localhost:%d' % port)

    # and listen to server
    run = True
        for x in range(get_db['replic_status']):
        	replic_usesysid.add_metric([get_db['db_name'], get_db['replic_ip'][x]], get_db['replic_usesysid'][x])
        	replic_pid.add_metric([get_db['db_name'], get_db['replic_ip'][x]], get_db['replic_pid'][x])
        	replica_lags.add_metric([get_db['db_name'], get_db['replic_ip'][x]], get_db['replica_lags'][x])



        yield size
        yield max_connections
        yield total_connections
        yield left_connections
        yield db_deadlocks
        yield replica_lags
        yield replic_usesysid
        yield replic_pid
        yield replic_status
      

if __name__ == '__main__':

    # Start up the server to expose the metrics.
    start_http_server(server_port)
    # Generate some requests.
    get_db = postgres(host, dbname, user, password, dbname_postgres)
    REGISTRY.register(CustomCollector())
    while True:
    	time.sleep(1)
    	get_db = postgres(host, dbname, user, password, dbname_postgres)

    # while True: time.sleep(1)
示例#49
0
                for o_num in range (0, len(sessions_stats)):
                    if sessions_stats[o_num][0] == ch_name and sessions_stats[o_num][1] == ch_type and sessions_stats[o_num][2] == flussonic_host:
                        sessions_stats[o_num][3] += 1
                        #print(sessions_stats[o_num][0] + " " + sessions_stats[o_num][1])
                        is_added = 1

                if is_added == 0:
                    sessions_stats.append([ch_name, ch_type, flussonic_host, 1])
                num += 1

        ### Writing to file ###
        s_num = 0
        for s_num in range(0 ,len(sessions_stats)):
            c.add_metric([sessions_stats[s_num][0], sessions_stats[s_num][1], sessions_stats[s_num][2]], sessions_stats[s_num][3])
        yield c
    ### end for ###


if __name__ == "__main__":
    try:
        REGISTRY.register(FlussonicCollector())
        start_http_server(9228)
        while True: 
            time.sleep(1)
            #print("DateTime " + time.strftime("%c"))
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)

示例#50
0
        yield tape_drive_state
        yield tape_status
        yield gauge_storagepools_used
        yield gauge_storagepools_total


if __name__ == '__main__':
    if 'CONFIG' in os.environ:
        config_path = os.environ['CONFIG']
    else:
        config_path = 'config.ini'
    config = configparser.ConfigParser()
    config.read(config_path)

    if config.get('tsm', 'insecure') == 'true':
        verify = False
        import urllib3
        urllib3.disable_warnings()
    else:
        verify = True

    tsm = IBM_API(
        (config.get('tsm', 'username'), config.get('tsm', 'password')),
        config.get('tsm', 'url'), verify)

    start_http_server(int(config.get('exporter', 'port')))
    REGISTRY.register(TSMCollector(config.get('tsm', 'servers').split(',')))
    while True:
        time.sleep(1)
        yield background_process_in_flight_count

        # now we need to run collect() over each of the static Counters, and
        # yield each metric they return.
        for m in (
                _background_process_ru_utime,
                _background_process_ru_stime,
                _background_process_db_txn_count,
                _background_process_db_txn_duration,
                _background_process_db_sched_duration,
        ):
            for r in m.collect():
                yield r


REGISTRY.register(_Collector())


class _BackgroundProcess(object):
    def __init__(self, desc, ctx):
        self.desc = desc
        self._context = ctx
        self._reported_stats = None

    def update_metrics(self):
        """Updates the metrics with values from this process."""
        new_stats = self._context.get_resource_usage()
        if self._reported_stats is None:
            diff = new_stats
        else:
            diff = new_stats - self._reported_stats
示例#52
0
文件: __init__.py 项目: m64s/synapse
            return

        with open("/proc/self/stat") as s:
            line = s.read()
            raw_stats = line.split(") ", 1)[1].split(" ")

            user = GaugeMetricFamily("process_cpu_user_seconds_total", "")
            user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
            yield user

            sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
            sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
            yield sys


REGISTRY.register(CPUMetrics())

#
# Python GC metrics
#

gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects",
                       ["gen"])
gc_time = Histogram(
    "python_gc_time",
    "Time taken to GC (sec)",
    ["gen"],
    buckets=[
        0.0025,
        0.005,
        0.01,
                  'Jenkins build timestamp in unixtime for {0}'.format(s), labels=["jobname"]),
          }

    # Request exactly the information we need from Jenkins
    result = json.loads(urllib2.urlopen(
        "{0}/api/json?tree=jobs[name,{1}]".format(
              self._target, ",".join([s + "[number,timestamp,duration]" for s in statuses])))
        .read().decode("utf-8"))

    for job in result['jobs']:
      name = job['name']
      for s in statuses:
        # If there's a null result, we want to export zeros.
        status = job[s] or {}
        metrics[s]['number'].add_metric([name], status.get('number', 0))
        metrics[s]['duration'].add_metric([name], status.get('duration', 0) / 1000.0)
        metrics[s]['timestamp'].add_metric([name], status.get('timestamp', 0) / 1000.0)

    for s in statuses:
      for m in metrics[s].values():
        yield m


if __name__ == "__main__":
  if len(sys.argv) < 2:
    sys.stderr.write("Usage: jenkins_exporter.py http://jenkins:8080\n")
    sys.exit(1)
  REGISTRY.register(JenkinsCollector(sys.argv[1]))
  start_http_server(9118)
  while True: time.sleep(1)