示例#1
0
    def _register(self):
        if self.name in all_gauges.keys():
            logger.warning("%s already registered, reregistering" % (self.name,))
            REGISTRY.unregister(all_gauges.pop(self.name))

        REGISTRY.register(self)
        all_gauges[self.name] = self
示例#2
0
def start_exporter(config, port, interval):
    """ run the exporter every <interval> seconds """
    REGISTRY.register(NovaCollector(config))
    start_http_server(port)
    while True:
        generate_latest(REGISTRY)
        time.sleep(30)
def main():
    """
    Main method
    """
    args = create_parser().parse_args()

    log_format = '%(asctime)s %(message)s'
    logging_args = dict(format=log_format,
                        level=args.log_level)
    if args.log_file:
        logging_args['filename'] = args.log_file
        logging_args['filemode'] = 'a'

    logging.basicConfig(**logging_args)

    scheme = "https" if args.use_tls else "http"
    use_ts = args.use_device_data_timestamp
    collector = SunPowerPVSupervisorCollector(hostname=args.hostname,
                                              port=args.port,
                                              scheme=scheme,
                                              timeout=args.timeout,
                                              use_device_data_timestamp=use_ts,
                                             )

    logging.info("Listening on port %d...", args.listen_on)
    start_http_server(args.listen_on)

    REGISTRY.register(collector)

    # Sleep indefinitely until we receive a signal
    while True:
        time.sleep(10)
示例#4
0
def main():
    parser = create_parser()
    args = parser.parse_args()

    # log configuration
    loginipath = os.path.join(os.path.dirname(__file__)) + '/' + "logconf.ini"
    logging.config.fileConfig(
        loginipath,
        defaults={'logfilename': args.logfile}
        )
    logger = logging.getLogger("zvmExporter")

    # split address and port
    addr_rx = re.compile(
        r'(?P<addr>[a-zA-Z0-9][a-zA-Z0-9\-]*(\.[a-zA-Z0-9][a-zA-Z0-9\-]*)+)'
        r'(:(?P<port>\d+))?')
    match = addr_rx.match(args.server)
    if match:
        xcat_addr = match.group('addr')
        xcat_port = match.group('port') or '443'
    else:
        logger.info("Invalid address")
        return 1

    logger.info("Program started")

    # start collector
    REGISTRY.register(ZVMCollector(args.zhcpnode, args.username,
                                   args.password, xcat_addr, xcat_port,
                                   args.cert))
    start_http_server(args.port)
    while True:
        sleep(1)
示例#5
0
def exporter_start():
    print('starting server http://{}:{}/metrics'.format(
        EXPORTER_LISTEN_HOST, EXPORTER_LISTEN_PORT))
    REGISTRY.register(CustomCollector())
    start_http_server(EXPORTER_LISTEN_PORT, addr=EXPORTER_LISTEN_HOST)
    while True:
        time.sleep(5)
def main():
    try:
        args = parse_args()
        port = int(args.port)
        REGISTRY.register(AzureStatusCollector())
        start_http_server(port)
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)
示例#7
0
def main():
	try:
		args = parse_args()
		port = int(args.port)
		REGISTRY.register(CouchbaseCollector(args.couchbase))
		start_http_server(port)
		print "Serving at port: ", port
		while True: time.sleep(1)
	except KeyboardInterrupt:
		print(" Interrupted")
		exit(0)
def main():
    try:
        args = parse_args()
        port = int(args.port)
        REGISTRY.register(JenkinsCollector(args.jenkins, args.user, args.password, args.insecure))
        start_http_server(port)
        print("Polling {}. Serving at port: {}".format(args.jenkins, port))
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)
示例#9
0
def register_cache(cache_type, cache_name, cache):

    # Check if the metric is already registered. Unregister it, if so.
    # This usually happens during tests, as at runtime these caches are
    # effectively singletons.
    metric_name = "cache_%s_%s" % (cache_type, cache_name)
    if metric_name in collectors_by_name.keys():
        REGISTRY.unregister(collectors_by_name[metric_name])

    class CacheMetric(object):

        hits = 0
        misses = 0
        evicted_size = 0

        def inc_hits(self):
            self.hits += 1

        def inc_misses(self):
            self.misses += 1

        def inc_evictions(self, size=1):
            self.evicted_size += size

        def describe(self):
            return []

        def collect(self):
            try:
                if cache_type == "response_cache":
                    response_cache_size.labels(cache_name).set(len(cache))
                    response_cache_hits.labels(cache_name).set(self.hits)
                    response_cache_evicted.labels(cache_name).set(self.evicted_size)
                    response_cache_total.labels(cache_name).set(self.hits + self.misses)
                else:
                    cache_size.labels(cache_name).set(len(cache))
                    cache_hits.labels(cache_name).set(self.hits)
                    cache_evicted.labels(cache_name).set(self.evicted_size)
                    cache_total.labels(cache_name).set(self.hits + self.misses)
            except Exception as e:
                logger.warn("Error calculating metrics for %s: %s", cache_name, e)
                raise

            yield GaugeMetricFamily("__unused", "")

    metric = CacheMetric()
    REGISTRY.register(metric)
    caches_by_name[cache_name] = cache
    collectors_by_name[metric_name] = metric
    return metric
示例#10
0
    def install_colletor(self):
        class Collector(object):
            def collect(self):
                try:
                    ret = []
                    for c in kvmagent.metric_collectors:
                        ret.extend(c())

                    return ret
                except Exception as e:
                    content = traceback.format_exc()
                    err = '%s\n%s\n' % (str(e), content)
                    logger.warn(err)
                    return []

        REGISTRY.register(Collector())
def main():
    try:
        args = parse_args()
        port = int(args.port)
        while True:
            try:
                if requests.get(args.odl_inventory):
                    REGISTRY.register(OpenDaylightCollector(args.opendaylight, args.odl_inventory))
                    start_http_server(port)
                    print "Polling data from OpenDaylight: %s. Starting OpenDaylight exporter on port: %s" % (args.opendaylight, port)
                    while True:
                        time.sleep(1)
            except ConnectionError:
                print "OpenDaylight is either not running or it is unreachable."
    except KeyboardInterrupt:
        print(" Interrupted")
        exit(0)
def run_server(test, port):
    """
    This script provides monitoring information about the postgraas server.
    """
    logging.basicConfig(level=logging.INFO, format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s")

    config = cfg.get_config()

    collector = CustomCollector(config)
    if test:
        click.echo("TEST MODE")
        for metric in collector.collect():
            for sample in metric.samples:
                click.echo(sample)
    else:
        click.echo("Running web server at port {}".format(port))
        REGISTRY.register(collector)
        app = Flask(__name__)
        app.register_blueprint(blueprint)
        app.run(host='0.0.0.0', port=port, threaded=True)
示例#13
0
                        type=int,
                        default=9683,
                        help='Collector http port, default is 9683')
    parser.add_argument(
        '--can-reset-counter',
        dest='can_reset_counter',
        help='Will reset counter as required when maxed out. Can also be \
set with env variable CAN_RESET_COUNTER',
        action='store_true')
    parser.add_argument(
        '--from-file',
        action='store',
        dest='input_file',
        help='Read a file containing the output of ibqueryerrors, if left \
empty, ibqueryerrors will be launched as needed by this collector')
    parser.add_argument(
        '--node-name-map',
        action='store',
        dest='node_name_map',
        help='Node name map used by ibqueryerrors. Ccan also be set with env \
var NODE_NAME_MAP')

    args = parser.parse_args()

    start_http_server(args.port)
    REGISTRY.register(
        InfinibandCollector(args.can_reset_counter, args.input_file,
                            args.node_name_map))
    while True:
        time.sleep(1)
示例#14
0
                    data["ports"][number]["packetsRxErrors"])
                self.metric['packetsTxErrors'].add_metric(
                    [data["device"],
                     str(data["ports"][number]['port'])],
                    data["ports"][number]["packetsTxErrors"])

            tables = device.get_table_statistic()["statistics"][0]
            tables_length = len(tables['table'])
            for number in range(0, tables_length):
                if tables["table"][number]["activeEntries"] > 0:
                    self.metric['packetsMatched'].add_metric([
                        tables["device"],
                        str(tables["table"][number]['tableId'])
                    ], tables["table"][number]["packetsMatched"])
                    self.metric['packetsLookedUp'].add_metric([
                        tables["device"],
                        str(tables["table"][number]['tableId'])
                    ], tables["table"][number]["packetsLookedUp"])

        for metric in self.metric.values():
            yield metric


if __name__ == "__main__":

    REGISTRY.register(prometheusCollector())
    start_http_server(9091)

    while True:
        time.sleep(1)
示例#15
0
def gyroscope_metric():
    raw = sense.get_gyroscope_raw()

    family = GaugeMetricFamily(name='sense_hat_gyroscope_raw',
                               documentation='radians/second',
                               labels=["axis"])
    family.add_metric(["x"], raw["x"])
    family.add_metric(["y"], raw["y"])
    family.add_metric(["z"], raw["z"])
    return family


def accelerometer_metric():
    raw = sense.get_accelerometer_raw()

    family = GaugeMetricFamily(
        name='sense_hat_accelerometer_raw',
        documentation='Measured by Raspberry Pi Sense Hat',
        labels=["axis"])
    family.add_metric(["x"], raw["x"])
    family.add_metric(["y"], raw["y"])
    family.add_metric(["z"], raw["z"])
    return family


REGISTRY.register(SenseHatCollector())
# Registered at https://github.com/prometheus/prometheus/wiki/Default-port-allocations
start_http_server(9607)
while True:
    time.sleep(60)
示例#16
0
        # kafka-consumer-groups.sh --bootstrap-server 172.28.9.1:9092 --group clk --describe | grep dtss-platform-cl | awk 'BEGIN{sum=0}{sum+=$5}END{print sum}'
        lags['dtss_platform_clk'] = cmdout("kafka-consumer-groups.sh --bootstrap-server 172.28.9.1:9092 --group {} "
                                           "--describe | grep {} | awk 'BEGIN{{sum=0}}{{sum+=$5}}END{{print "
                                           "sum}}'".format('clk', 'dtss-platform-clk'))
        lags['dtss_realtime_base'] = cmdout("kafka-consumer-groups.sh --bootstrap-server 172.28.9.1:9092 --group "
                                           "{} --describe | grep {} | awk 'BEGIN{{sum=0}}{{"
                                           "sum+=$5}}END{{print sum}}'".format('base', 'dtss-realtime-base'))
        lags['dtss_stats_cvt'] = cmdout("kafka-consumer-groups.sh --bootstrap-server 172.28.9.1:9092 --group "
                                           "{} --describe | grep {} | awk 'BEGIN{{sum=0}}{{"
                                           "sum+=$5}}END{{print sum}}'".format('cvt', 'dtss-stats-cvt'))
        lags['dtss_stats_cvt_my'] = cmdout("kafka-consumer-groups.sh --bootstrap-server 172.28.9.1:9092 --group "
                                           "{} --describe | grep {} | awk 'BEGIN{{sum=0}}{{"
                                           "sum+=$5}}END{{print sum}}'".format('my', 'dtss-stats-cvt'))
        lags['dtss_realtime_account'] = cmdout("kafka-consumer-groups.sh --bootstrap-server 172.28.9.1:9092 --group "
                                           "{} --describe | grep {} | awk 'BEGIN{{sum=0}}{{"
                                           "sum+=$5}}END{{print sum}}'".format('account', 'dtss-realtime-account'))
        for key in lags:
          # __init__(self, name, documentation, value=None, labels=None, unit=''):
            lags_gauges[key] = GaugeMetricFamily('td_{}'.format(key), 'metric of {}'.format(key), value=None, labels=['proj', 'service'])
            lags_gauges[key].add_metric(['td', 'kafka'], lags[key])
        for metric in lags_gauges:
            yield lags_gauges[metric]


if __name__ == '__main__':
    start_http_server(23333)
    REGISTRY.register(lagCollector())

    while True:
        time.sleep(10)
            if 'lastSuccessfulBuildId' in self.data[repo]:
                lsbi = CounterMetricFamily(
                    'dronedb_repo_last_successful_build_id',
                    'Last successful build id',
                    labels=["repo"])
                lsbi.add_metric([repo],
                                str(self.data[repo]['lastSuccessfulBuildId']))
                yield (lsbi)

            if 'lastBuildId' in self.data[repo]:
                lbi = CounterMetricFamily('dronedb_repo_last_build_id',
                                          'Last build id',
                                          labels=["repo"])
                lbi.add_metric([repo], str(self.data[repo]['lastBuildId']))
                yield (lbi)

        self.drone.close_db()
        return (self.data)


if __name__ == "__main__":
    args = setup_parser()
    LOG.info('Starting dronedb exporter on http://localhost:%s' %
             args.listen_port)
    REGISTRY.register(
        DroneCollector(args.host, args.dbport, args.database, args.username,
                       args.password))
    start_http_server(int(args.listen_port))
    while True:
        time.sleep(1)
示例#18
0
Imports

:time: control sleep time in main function
"""
import time
from datetime import datetime, timedelta
from prometheus_client import PLATFORM_COLLECTOR
from prometheus_client import PROCESS_COLLECTOR
from prometheus_client import start_http_server
from prometheus_client.core import CounterMetricFamily
from prometheus_client.core import REGISTRY
import lib.wg_parser as wg_parser

# unregister not used metrics
# pylint: disable=protected-access
REGISTRY.unregister(PROCESS_COLLECTOR)
REGISTRY.unregister(PLATFORM_COLLECTOR)
REGISTRY.unregister(
    REGISTRY._names_to_collectors["python_gc_objects_collected_total"])


# pylint: disable=too-few-public-methods
class CollectSendBytesTotal:
    """
    Custom collector class for bytes sent to wireguard peer
    """
    def __init__(self, wgparser: wg_parser.WGParser):
        self.parser = wgparser

    def collect(self):
        """
示例#19
0
        except KeyError:
            logging.error("Could not retrieve metrics from: " + self.metrics)
            logging.error("Check argument sonar_metrics")


if __name__ == "__main__":
    parser = configargparse.ArgumentParser()
    parser.add_argument('--sonar_url', type=str, required=True, env_var='sonar_url')
    parser.add_argument('--sonar_metrics', type=str, env_var='sonar_metrics', default='ncloc,coverage')
    parser.add_argument('--sonar_user', type=str, required=True, env_var='sonar_user')
    parser.add_argument('--sonar_password', type=str, required=True, env_var='sonar_password')
    parser.add_argument('--run_once', action='store_true')
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')

    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

    REGISTRY.register(SonarCollector(args.sonar_url, args.sonar_user, args.sonar_password, args.sonar_metrics))

    if args.run_once:
        for x in REGISTRY.collect():
            logging.info(x)
            for y in x.samples:
                logging.info(y)
        sys.exit("runonce")

    start_http_server(9118)
    while True:
        time.sleep(1)
示例#20
0
logger = logging.getLogger(__name__)

try:
    config = Config()
except ValueError as e:
    logger.critical(e)
    logger.critical("Invalid configuration. Exiting.")
    sys.exit(1)

logger.info("Starting Kibana Prometheus exporter version %s\n" %
            config.version + config.description())

REGISTRY.register(
    KibanaCollector(
        config.kibana_url,
        kibana_login=config.kibana_login,
        kibana_password=config.kibana_password,
        ignore_ssl=config.ignore_ssl,
    ))

try:
    start_http_server(config.listen_port)
except PermissionError as e:
    logger.critical("Cannot bind to port %s. Permission denied.",
                    config.listen_port)
    sys.exit(2)

loop = asyncio.new_event_loop()
try:
    loop.run_forever()
except KeyboardInterrupt:
示例#21
0
def main():
    signal.signal(signal.SIGTERM, signal_handler)

    parser = argparse.ArgumentParser(
        description='Export ES query results to Prometheus.')
    parser.add_argument(
        '-e',
        '--es-cluster',
        default='localhost',
        help='addresses of nodes in a Elasticsearch cluster to run queries on. '
        'Nodes should be separated by commas e.g. es1,es2. '
        'Ports can be provided if non-standard (9200) e.g. es1:9999. '
        'Include the scheme for non-http nodes e.g. https://es1:9200. '
        '--ca-certs must be provided for SSL certificate verification. '
        '(default: localhost)')
    parser.add_argument(
        '--cluster-name',
        help='cluster name, if given a value, it will be added to labels.')
    parser.add_argument(
        '--ca-certs',
        help='path to a CA certificate bundle. '
        'Can be absolute, or relative to the current working directory. '
        'If not specified, SSL certificate verification is disabled.')
    parser.add_argument(
        '--client-cert',
        help='path to a SSL client certificate. '
        'Can be absolute, or relative to the current working directory. '
        'If not specified, SSL client authentication is disabled.')
    parser.add_argument(
        '--client-key',
        help='path to a SSL client key. '
        'Can be absolute, or relative to the current working directory. '
        'Must be specified if "--client-cert" is provided.')
    parser.add_argument('--basic-user',
                        help='username for basic authentication with nodes. '
                        'If not specified, basic authentication is disabled.')
    parser.add_argument('--basic-password',
                        help='password for basic authentication with nodes. '
                        'Must be specified if "--basic-user" is provided.')
    parser.add_argument(
        '-p',
        '--port',
        type=int,
        default=9206,
        help='port to serve the metrics endpoint on. (default: 9206)')
    parser.add_argument(
        '--query-disable',
        action='store_true',
        help='disable query monitoring. '
        'Config file does not need to be present if query monitoring is disabled.'
    )
    parser.add_argument(
        '-c',
        '--config-file',
        default='exporter.cfg',
        help='path to query config file. '
        'Can be absolute, or relative to the current working directory. '
        '(default: exporter.cfg)')
    parser.add_argument(
        '--config-dir',
        default='./config',
        help='path to query config directory. '
        'Besides including the single config file specified by "--config-file" at first, '
        'all config files in the config directory will be sorted, merged, then included. '
        'Can be absolute, or relative to the current working directory. '
        '(default: ./config)')
    parser.add_argument('--cluster-health-disable',
                        action='store_true',
                        help='disable cluster health monitoring.')
    parser.add_argument(
        '--cluster-health-timeout',
        type=float,
        default=10.0,
        help=
        'request timeout for cluster health monitoring, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--cluster-health-level',
        default='indices',
        choices=['cluster', 'indices', 'shards'],
        help=
        'level of detail for cluster health monitoring.  (default: indices)')
    parser.add_argument('--nodes-stats-disable',
                        action='store_true',
                        help='disable nodes stats monitoring.')
    parser.add_argument(
        '--nodes-stats-timeout',
        type=float,
        default=10.0,
        help=
        'request timeout for nodes stats monitoring, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--nodes-stats-metrics',
        type=nodes_stats_metrics_parser,
        help='limit nodes stats to specific metrics. '
        'Metrics should be separated by commas e.g. indices,fs.')
    parser.add_argument('--indices-stats-disable',
                        action='store_true',
                        help='disable indices stats monitoring.')
    parser.add_argument(
        '--indices-stats-timeout',
        type=float,
        default=10.0,
        help=
        'request timeout for indices stats monitoring, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--indices-stats-mode',
        default='cluster',
        choices=['cluster', 'indices'],
        help='detail mode for indices stats monitoring. (default: cluster)')
    parser.add_argument(
        '--indices-stats-metrics',
        type=indices_stats_metrics_parser,
        help='limit indices stats to specific metrics. '
        'Metrics should be separated by commas e.g. indices,fs.')
    parser.add_argument(
        '--indices-stats-fields',
        type=indices_stats_fields_parser,
        help='include fielddata info for specific fields. '
        'Fields should be separated by commas e.g. indices,fs. '
        'Use \'*\' for all.')
    parser.add_argument('-j',
                        '--json-logging',
                        action='store_true',
                        help='turn on json logging.')
    parser.add_argument(
        '--log-level',
        default='INFO',
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
        help='detail level to log. (default: INFO)')
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        help='turn on verbose (DEBUG) logging. Overrides --log-level.')
    parser.add_argument('--indices-query-disable',
                        action='store_true',
                        help='disable indices query.')
    args = parser.parse_args()

    predefined_labels = OrderedDict()
    if args.cluster_name is not None:
        predefined_labels['cluster_name'] = [args.cluster_name]

    if args.basic_user and args.basic_password is None:
        parser.error('Username provided with no password.')
    elif args.basic_user is None and args.basic_password:
        parser.error('Password provided with no username.')
    elif args.basic_user:
        http_auth = (args.basic_user, args.basic_password)
    else:
        http_auth = None

    if not args.ca_certs and (args.client_cert or args.client_key):
        parser.error(
            '--client-cert and --client-key can only be used when --ca-certs is provided.'
        )
    elif args.client_cert and not args.client_key:
        parser.error(
            '--client-key must be provided when --client-cert is used.')
    elif not args.client_cert and args.client_key:
        parser.error(
            '--client-cert must be provided when --client-key is used.')

    log_handler = logging.StreamHandler()
    log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
    formatter = JogFormatter(
        log_format) if args.json_logging else logging.Formatter(log_format)
    log_handler.setFormatter(formatter)

    log_level = getattr(logging, args.log_level)
    logging.basicConfig(handlers=[log_handler],
                        level=logging.DEBUG if args.verbose else log_level)
    logging.captureWarnings(True)

    port = args.port
    es_cluster = args.es_cluster.split(',')

    if args.ca_certs:
        es_client = Elasticsearch(es_cluster,
                                  verify_certs=True,
                                  ca_certs=args.ca_certs,
                                  client_cert=args.client_cert,
                                  client_key=args.client_key,
                                  http_auth=http_auth)
    else:
        es_client = Elasticsearch(es_cluster,
                                  verify_certs=False,
                                  http_auth=http_auth)

    scheduler = None

    print("query disable:", args.indices_query_disable)
    if not args.indices_query_disable:
        print("preparing query....")
        scheduler = sched.scheduler()

        config = configparser.ConfigParser()
        config.read_file(open(args.config_file))

        config_dir_file_pattern = os.path.join(args.config_dir, '*.cfg')
        config_dir_sorted_files = sorted(glob.glob(config_dir_file_pattern))
        config.read(config_dir_sorted_files)

        query_prefix = 'query_'
        queries = {}
        for section in config.sections():
            if section.startswith(query_prefix):
                query_name = section[len(query_prefix):]
                query_interval = config.getfloat(section,
                                                 'QueryIntervalSecs',
                                                 fallback=15)
                query_timeout = config.getfloat(section,
                                                'QueryTimeoutSecs',
                                                fallback=10)
                query_indices = config.get(section,
                                           'QueryIndices',
                                           fallback='_all')
                query = json.loads(config.get(section, 'QueryJson'))

                queries[query_name] = (query_interval, query_timeout,
                                       query_indices, query)

        if queries:
            for name, (interval, timeout, indices, query) in queries.items():
                func = partial(run_query, es_client, name, indices, query,
                               timeout, predefined_labels)
                run_scheduler(scheduler, interval, func)
            print("prepared query")
        else:
            logging.warn('No queries found in config file %s',
                         args.config_file)

    if not args.cluster_health_disable:
        REGISTRY.register(
            ClusterHealthCollector(es_client,
                                   args.cluster_health_timeout,
                                   args.cluster_health_level,
                                   predefined_labels=predefined_labels))

    if not args.nodes_stats_disable:
        REGISTRY.register(
            NodesStatsCollector(es_client,
                                args.nodes_stats_timeout,
                                metrics=args.nodes_stats_metrics,
                                predefined_labels=predefined_labels))

    if not args.indices_stats_disable:
        parse_indices = args.indices_stats_mode == 'indices'
        REGISTRY.register(
            IndicesStatsCollector(es_client,
                                  args.indices_stats_timeout,
                                  parse_indices=parse_indices,
                                  metrics=args.indices_stats_metrics,
                                  fields=args.indices_stats_fields,
                                  predefined_labels=predefined_labels))

    logging.info('Starting server...')
    start_http_server(port)
    logging.info('Server started on port %s', port)

    try:
        if scheduler:
            scheduler.run()
        else:
            while True:
                time.sleep(5)
    except KeyboardInterrupt:
        pass

    shutdown()
示例#22
0
def sanitise_name(s):
    return re.sub(r"[^a-zA-Z0-9:_]", "_", s)

class ConsulCollector(object):
  def collect(self):
    out = urlopen("http://localhost:8500/v1/agent/metrics").read()
    metrics = json.loads(out.decode("utf-8"))

    for g in metrics["Gauges"]:
      yield GaugeMetricFamily(sanitise_name(g["Name"]),
          "Consul metric " + g["Name"], g["Value"])

    for c in metrics["Counters"]:
      yield CounterMetricFamily(sanitise_name(c["Name"]) + "_total",
          "Consul metric " + c["Name"], c["Count"])

    for s in metrics["Samples"]:
      yield SummaryMetricFamily(sanitise_name(s["Name"]) + "_seconds",
          "Consul metric " + s["Name"],
          count_value=c["Count"], sum_value=s["Sum"] / 1000)

if __name__ == '__main__':
  REGISTRY.register(ConsulCollector())
  start_http_server(8000)
  while True:
    time.sleep(1)



        for x in range(get_db['replic_status']):
        	replic_usesysid.add_metric([get_db['db_name'], get_db['replic_ip'][x]], get_db['replic_usesysid'][x])
        	replic_pid.add_metric([get_db['db_name'], get_db['replic_ip'][x]], get_db['replic_pid'][x])
        	replica_lags.add_metric([get_db['db_name'], get_db['replic_ip'][x]], get_db['replica_lags'][x])



        yield size
        yield max_connections
        yield total_connections
        yield left_connections
        yield db_deadlocks
        yield replica_lags
        yield replic_usesysid
        yield replic_pid
        yield replic_status
      

if __name__ == '__main__':

    # Start up the server to expose the metrics.
    start_http_server(server_port)
    # Generate some requests.
    get_db = postgres(host, dbname, user, password, dbname_postgres)
    REGISTRY.register(CustomCollector())
    while True:
    	time.sleep(1)
    	get_db = postgres(host, dbname, user, password, dbname_postgres)

    # while True: time.sleep(1)
示例#24
0
def main():
    REGISTRY.register(IpmiCollector())
    start_http_server(8000)
    while True:
        time.sleep(5)
    parser.add_argument('--secure', default='no', type=str)
    args = parser.parse_args()

    # Wait for other containers to start.
    print('>>> Sleeping for: %s seconds ...' % args.start_delay)
    time.sleep(args.start_delay)

    # Start the server to expose the metrics.
    print('>>> Starting the exporter on port: %s' % args.port)
    start_http_server(args.port)
    
    # Get username and password of NetScalers.
    ns_user = os.environ.get("NS_USER")
    ns_password = os.environ.get("NS_PASSWORD")
    if ns_user == None:
        ns_user = args.username
    if ns_password == None:
        ns_password = args.password
    
    # Load the metrics file specifying stats to be collected
    f = open('/exporter/metrics.json', 'r')
    metrics_json = json.load(f)

    # Register the exporter as a stat collector.
    print('>>> Registering collector for: %s' % (args.target_nsip))
    REGISTRY.register(NetscalerCollector(nsips=args.target_nsip, metrics=metrics_json, username=ns_user, password=ns_password, secure=args.secure.lower()))

    # Forever
    while True:
        time.sleep(1)
示例#26
0
文件: base_proc.py 项目: akpw/mktxp
 def start():
     REGISTRY.register(
         CollectorHandler(RouterEntriesHandler(), CollectorRegistry()))
     ExportProcessor.run(port=config_handler.system_entry().port)
示例#27
0
def main():
    signal.signal(signal.SIGTERM, signal_handler)

    parser = argparse.ArgumentParser(
        description='Export Kafka consumer offsets to Prometheus.')
    parser.add_argument(
        '-b',
        '--bootstrap-brokers',
        default='localhost',
        help='Addresses of brokers in a Kafka cluster to talk to.' +
        ' Brokers should be separated by commas e.g. broker1,broker2.' +
        ' Ports can be provided if non-standard (9092) e.g. brokers1:9999.' +
        ' (default: localhost)')
    parser.add_argument(
        '-p',
        '--port',
        type=int,
        default=9208,
        help='Port to serve the metrics endpoint on. (default: 9208)')
    parser.add_argument(
        '-s',
        '--from-start',
        action='store_true',
        help='Start from the beginning of the `__consumer_offsets` topic.')
    parser.add_argument(
        '--topic-interval',
        type=float,
        default=30.0,
        help='How often to refresh topic information, in seconds. (default: 30)'
    )
    parser.add_argument(
        '--high-water-interval',
        type=float,
        default=10.0,
        help=
        'How often to refresh high-water information, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--low-water-interval',
        type=float,
        default=10.0,
        help=
        'How often to refresh low-water information, in seconds. (default: 10)'
    )
    parser.add_argument(
        '--consumer-config',
        action='append',
        default=[],
        help=
        'Provide additional Kafka consumer config as a consumer.properties file. Multiple files will be merged, later files having precedence.'
    )
    parser.add_argument('-j',
                        '--json-logging',
                        action='store_true',
                        help='Turn on json logging.')
    parser.add_argument(
        '--log-level',
        default='INFO',
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
        help='detail level to log. (default: INFO)')
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        help='turn on verbose (DEBUG) logging. Overrides --log-level.')
    args = parser.parse_args()

    log_handler = logging.StreamHandler()
    log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
    formatter = JogFormatter(log_format) \
        if args.json_logging \
        else logging.Formatter(log_format)
    log_handler.setFormatter(formatter)

    log_level = getattr(logging, args.log_level)
    logging.basicConfig(handlers=[log_handler],
                        level=logging.DEBUG if args.verbose else log_level)
    logging.captureWarnings(True)

    port = args.port

    consumer_config = {
        'bootstrap_servers': 'localhost',
        'auto_offset_reset': 'latest',
        'group_id': None,
        'consumer_timeout_ms': 500
    }

    for filename in args.consumer_config:
        with open(filename) as f:
            raw_config = javaproperties.load(f)
            converted_config = {
                k.replace('.', '_'): v
                for k, v in raw_config.items()
            }
            consumer_config.update(converted_config)

    if args.bootstrap_brokers:
        consumer_config['bootstrap_servers'] = args.bootstrap_brokers.split(
            ',')

    if args.from_start:
        consumer_config['auto_offset_reset'] = 'earliest'

    consumer = KafkaConsumer('__consumer_offsets', **consumer_config)
    client = consumer._client

    topic_interval = args.topic_interval
    high_water_interval = args.high_water_interval
    low_water_interval = args.low_water_interval

    logging.info('Starting server...')
    start_http_server(port)
    logging.info('Server started on port %s', port)

    REGISTRY.register(collectors.HighwaterCollector())
    REGISTRY.register(collectors.LowwaterCollector())
    REGISTRY.register(collectors.ConsumerOffsetCollector())
    REGISTRY.register(collectors.ConsumerLagCollector())
    REGISTRY.register(collectors.ConsumerLeadCollector())
    REGISTRY.register(collectors.ConsumerCommitsCollector())
    REGISTRY.register(collectors.ExporterOffsetCollector())
    REGISTRY.register(collectors.ExporterLagCollector())
    REGISTRY.register(collectors.ExporterLeadCollector())

    scheduled_jobs = setup_fetch_jobs(topic_interval, high_water_interval,
                                      low_water_interval, client)

    try:
        while True:
            for message in consumer:
                offsets = collectors.get_offsets()
                commits = collectors.get_commits()
                exporter_offsets = collectors.get_exporter_offsets()

                exporter_partition = message.partition
                exporter_offset = message.offset
                exporter_offsets = ensure_dict_key(exporter_offsets,
                                                   exporter_partition,
                                                   exporter_offset)
                exporter_offsets[exporter_partition] = exporter_offset
                collectors.set_exporter_offsets(exporter_offsets)

                if message.key and message.value:
                    key = parse_key(message.key)
                    if key:
                        value = parse_value(message.value)

                        group = key[1]
                        topic = key[2]
                        partition = key[3]
                        offset = value[1]

                        offsets = ensure_dict_key(offsets, group, {})
                        offsets[group] = ensure_dict_key(
                            offsets[group], topic, {})
                        offsets[group][topic] = ensure_dict_key(
                            offsets[group][topic], partition, offset)
                        offsets[group][topic][partition] = offset
                        collectors.set_offsets(offsets)

                        commits = ensure_dict_key(commits, group, {})
                        commits[group] = ensure_dict_key(
                            commits[group], topic, {})
                        commits[group][topic] = ensure_dict_key(
                            commits[group][topic], partition, 0)
                        commits[group][topic][partition] += 1
                        collectors.set_commits(commits)

                # Check if we need to run any scheduled jobs
                # each message.
                scheduled_jobs = scheduler.run_scheduled_jobs(scheduled_jobs)

            # Also check if we need to run any scheduled jobs
            # each time the consumer times out, in case there
            # aren't any messages to consume.
            scheduled_jobs = scheduler.run_scheduled_jobs(scheduled_jobs)

    except KeyboardInterrupt:
        pass

    shutdown()
示例#28
0
 def collect():
     for metric in REGISTRY.collect():
         if not metric.name.startswith("__"):
             yield metric
示例#29
0
                bom_pressure_pascals.add_metric(labels, latest_obs["press"] *
                                                100)  # hPa to Pa
            if latest_obs["rel_hum"] != None:
                bom_relative_humidity.add_metric(labels, latest_obs["rel_hum"])
            if latest_obs["wind_spd_kmh"] != None:
                bom_wind_speed.add_metric(labels, latest_obs["wind_spd_kmh"])
            if latest_obs["apparent_t"] != None:
                bom_apparent_temperature_celsius.add_metric(
                    labels, latest_obs["apparent_t"])

            wind_dir = latest_obs["wind_dir"]
            if wind_dir in WIND_DIR_TO_DEGREES:
                bom_wind_direction_degrees.add_metric(
                    labels, WIND_DIR_TO_DEGREES[wind_dir])

        yield bom_utctimestamp
        yield bom_air_temperature
        yield bom_pressure_pascals
        yield bom_relative_humidity
        yield bom_wind_speed
        yield bom_wind_direction_degrees
        yield bom_apparent_temperature_celsius


# Query the BOM
REGISTRY.register(BOMCollector())
start_http_server(8000)

while True:
    time.sleep(60)
示例#30
0
                metric.build_name = build.metadata.name
                metric.build_config_name = build.metadata.labels.buildconfig
                metric.namespace = build.metadata.namespace
                labels = build.metadata.labels
                metric.labels = json.loads(str(labels).replace("\'", "\""))

                metric.commit_hash = build.spec.revision.git.commit
                metric.name = app + '-' + build.spec.revision.git.commit
                metric.commiter = build.spec.revision.git.author.name
                metric.image_location = build.status.outputDockerImageReference
                metric.image_hash = build.status.output.to.imageDigest
                metric.getCommitTime()
                metrics.append(metric)

    return metrics


if __name__ == "__main__":
    username = os.environ.get('GITHUB_USER')
    token = os.environ.get('GITHUB_TOKEN')
    namespaces = None
    if os.environ.get('NAMESPACES') is not None:
        namespaces = [
            proj.strip() for proj in os.environ.get('NAMESPACES').split(",")
        ]
    apps = None
    start_http_server(8080)
    REGISTRY.register(CommitCollector(username, token, namespaces, apps))
    while True:
        time.sleep(1)
示例#31
0
def register_cache(cache_type, cache_name, cache, collect_callback=None):
    """Register a cache object for metric collection.

    Args:
        cache_type (str):
        cache_name (str): name of the cache
        cache (object): cache itself
        collect_callback (callable|None): if not None, a function which is called during
            metric collection to update additional metrics.

    Returns:
        CacheMetric: an object which provides inc_{hits,misses,evictions} methods
    """

    # Check if the metric is already registered. Unregister it, if so.
    # This usually happens during tests, as at runtime these caches are
    # effectively singletons.
    metric_name = "cache_%s_%s" % (cache_type, cache_name)
    if metric_name in collectors_by_name.keys():
        REGISTRY.unregister(collectors_by_name[metric_name])

    class CacheMetric(object):

        hits = 0
        misses = 0
        evicted_size = 0

        def inc_hits(self):
            self.hits += 1

        def inc_misses(self):
            self.misses += 1

        def inc_evictions(self, size=1):
            self.evicted_size += size

        def describe(self):
            return []

        def collect(self):
            try:
                if cache_type == "response_cache":
                    response_cache_size.labels(cache_name).set(len(cache))
                    response_cache_hits.labels(cache_name).set(self.hits)
                    response_cache_evicted.labels(cache_name).set(
                        self.evicted_size)
                    response_cache_total.labels(cache_name).set(self.hits +
                                                                self.misses)
                else:
                    cache_size.labels(cache_name).set(len(cache))
                    cache_hits.labels(cache_name).set(self.hits)
                    cache_evicted.labels(cache_name).set(self.evicted_size)
                    cache_total.labels(cache_name).set(self.hits + self.misses)
                if collect_callback:
                    collect_callback()
            except Exception as e:
                logger.warn("Error calculating metrics for %s: %s", cache_name,
                            e)
                raise

            yield GaugeMetricFamily("__unused", "")

    metric = CacheMetric()
    REGISTRY.register(metric)
    caches_by_name[cache_name] = cache
    collectors_by_name[metric_name] = metric
    return metric
示例#32
0
        return getattr(self._poller, item)


class ReactorLastSeenMetric(Collector):
    def __init__(self, epoll_wrapper: EpollWrapper):
        self._epoll_wrapper = epoll_wrapper

    def collect(self) -> Iterable[Metric]:
        cm = GaugeMetricFamily(
            "python_twisted_reactor_last_seen",
            "Seconds since the Twisted reactor was last seen",
        )
        cm.add_metric([], time.time() - self._epoll_wrapper.last_polled)
        yield cm


try:
    # if the reactor has a `_poller` attribute, which is an `epoll` object
    # (ie, it's an EPollReactor), we wrap the `epoll` with a thing that will
    # measure the time between ticks
    from select import epoll  # type: ignore[attr-defined]

    poller = reactor._poller  # type: ignore[attr-defined]
except (AttributeError, ImportError):
    pass
else:
    if isinstance(poller, epoll):
        poller = EpollWrapper(poller)
        reactor._poller = poller  # type: ignore[attr-defined]
        REGISTRY.register(ReactorLastSeenMetric(poller))
示例#33
0
        conn.close()

    def assign_location(self):
        for jail in self.jails:
            for entry in jail.ip_list:
                entry.update(self.geo_provider.annotate(entry['ip']))

    def collect(self):
        self.get_jailed_ips()
        self.assign_location()

        metric_labels = ['jail','ip'] + self.extra_labels
        ip_gauge = GaugeMetricFamily('fail2ban_banned_ip', 'IP banned by fail2ban', labels=metric_labels)

        for jail in self.jails:
            for entry in jail.ip_list:
                values = [jail.name, entry['ip']] + [entry[x] for x in self.extra_labels]
                ip_gauge.add_metric(values, 1)

        yield ip_gauge

if __name__ == '__main__':
    with open('conf.yml') as f:
        conf = yaml.load(f, Loader=yaml.FullLoader)

    REGISTRY.register(F2bCollector(conf))

    app = make_wsgi_app()
    httpd = make_server('', conf['server']['port'], app)
    httpd.serve_forever()
示例#34
0
    # Verify ADC stats access
    verify_ns_stats_access(args.target_nsip, ns_protocol, ns_user, ns_password,
                           args.timeout)

    # Start the server to expose the metrics.
    start_exporter_server(args.port)

    if not args.k8sCICprefix.isalnum():
        logger.error('Invalid k8sCICprefix : non-alphanumeric not accepted')

    # Register the exporter as a stat collector
    logger.info('Registering collector for %s' % args.target_nsip)

    try:
        REGISTRY.register(
            CitrixAdcCollector(nsip=args.target_nsip,
                               metrics=metrics_json,
                               username=ns_user,
                               password=ns_password,
                               protocol=ns_protocol,
                               nitro_timeout=args.timeout,
                               k8s_cic_prefix=args.k8sCICprefix))
    except Exception as e:
        logger.error(
            'Invalid arguments! could not register collector for {}::{}'.
            format(args.target_nsip, e))

    # Forever
    while True:
        signal.pause()
示例#35
0
]

try:
    LINKY_EXPORTER_PORT = int(os.environ.get('LINKY_EXPORTER_PORT', '8123'))
except ValueError:
    logging.error("LINKY_EXPORTER_PORT must be int !")
    sys.exit(1)

LINKY_EXPORTER_MODE = os.environ.get('LINKY_EXPORTER_MODE', 'HISTORIQUE')
VALID_MODE = [i['name'] for i in LINKY_MODE]
if not LINKY_EXPORTER_MODE in VALID_MODE:
    logging.error("LINKY_EXPORTER_MODE must be : %s", ' or '.join(VALID_MODE))
    sys.exit(1)

# REGISTRY Configuration
REGISTRY.unregister(PROCESS_COLLECTOR)
REGISTRY.unregister(PLATFORM_COLLECTOR)
REGISTRY.unregister(REGISTRY._names_to_collectors['python_gc_objects_collected_total'])

# Linky Collector Class
class LinkyCollector():
    '''Linky Collector Class'''
    def __init__(self):
        self.ser = self._check_for_valid_frame()

    def teleinfo(self):
        '''Read Teleinfo And Return Linky Frame Dict'''
        logging.debug("Reading Linky Teleinfo on %s.", LINKY_EXPORTER_INTERFACE)

        with self.ser:
            # Wait For New Linky Frame (Start with 0x02)
    parser.add_argument("-s",
                        "--control-socket",
                        help="Tor control socket",
                        default="/var/run/tor/control")
    parser.add_argument("-p",
                        "--listen-port",
                        help="Listen on this port",
                        type=int,
                        default=9099)
    parser.add_argument("-b",
                        "--bind-addr",
                        help="Bind this address",
                        default="localhost")
    args = parser.parse_args()

    if args.mode == 'unix':
        torctl = stem.control.Controller.from_socket_file(args.control_socket)
    else:
        torctl = stem.control.Controller.from_port(args.address,
                                                   port=args.control_port)
    coll = StemCollector(torctl)
    REGISTRY.register(coll)

    print("Starting on %s:%s" % (args.bind_addr, args.listen_port))
    prom.start_http_server(args.listen_port, addr=args.bind_addr)

    # We can't exit as start_http_server starts a daemon thread which would get
    # killed.
    while True:
        time.sleep(1000)
        yield background_process_in_flight_count

        # now we need to run collect() over each of the static Counters, and
        # yield each metric they return.
        for m in (
                _background_process_ru_utime,
                _background_process_ru_stime,
                _background_process_db_txn_count,
                _background_process_db_txn_duration,
                _background_process_db_sched_duration,
        ):
            for r in m.collect():
                yield r


REGISTRY.register(_Collector())


class _BackgroundProcess(object):
    def __init__(self, desc, ctx):
        self.desc = desc
        self._context = ctx
        self._reported_stats = None

    def update_metrics(self):
        """Updates the metrics with values from this process."""
        new_stats = self._context.get_resource_usage()
        if self._reported_stats is None:
            diff = new_stats
        else:
            diff = new_stats - self._reported_stats
示例#38
0
def _collect_to_http():
    REGISTRY.register(BitfinexCollector())
    start_http_server(int(settings['bitfinex_exporter']['listen_port']))
    while True:
        time.sleep(int(settings['bitfinex_exporter']['interval']))
示例#39
0
            process.update_metrics()

        # now we need to run collect() over each of the static Counters, and
        # yield each metric they return.
        for m in (
                _background_process_ru_utime,
                _background_process_ru_stime,
                _background_process_db_txn_count,
                _background_process_db_txn_duration,
                _background_process_db_sched_duration,
        ):
            for r in m.collect():
                yield r


REGISTRY.register(_Collector())


class _BackgroundProcess:
    def __init__(self, desc, ctx):
        self.desc = desc
        self._context = ctx
        self._reported_stats = None

    def update_metrics(self):
        """Updates the metrics with values from this process."""
        new_stats = self._context.get_resource_usage()
        if self._reported_stats is None:
            diff = new_stats
        else:
            diff = new_stats - self._reported_stats
示例#40
0
    def collect(self):
        temp_guage = GaugeMetricFamily('temperature',
                                       'Current temperature in celsius',
                                       labels=['host', 'sensor_type'])
        temp_guage.add_metric([self.hostname, 'BME280'],
                              round(self.sensor.temperature, 2))
        yield temp_guage
        humidity_guage = GaugeMetricFamily('humidity',
                                           'Current realtive humidity',
                                           labels=['host', 'sensor_type'])
        humidity_guage.add_metric([self.hostname, 'BME280'],
                                  round(self.sensor.humidity, 2))
        yield humidity_guage
        pressure_guage = GaugeMetricFamily(
            'pressure',
            'Current atmospheric pressure in hPa',
            labels=['host', 'sensor_type'])
        pressure_guage.add_metric([self.hostname, 'BME280'],
                                  round(self.sensor.pressure, 2))
        yield pressure_guage


if __name__ == "__main__":
    REGISTRY.register(BMP280Exporter())
    start_http_server(8000)
    running = True
    while running:
        try:
            time.sleep(10)
        except KeyboardInterrupt:
            running = False
示例#41
0
        "Syncing": 3,
        "Farming": 4
    }

    def __init__(self, target):
        self.target = target

    def get(self):
        response = requests.get(self.target)
        return response.json()

    def collect(self):
        data = self.get()
        for name, value in data.items():
            if name == "status":
                value = self.farm_status[value]
            yield GaugeMetricFamily(f"chia_farm_summary_{name}",
                                    f"chia_farm_summary_{name}",
                                    value=value)
            #stdout = subprocess.check_output(["plotman", "status"]).decode("utf-8")
            #yield GaugeMetricFamily("chia_farm_summary_plots_in_progress", "plotman_jobs_count", value=stdout.count("/mnt/plotter"))


if __name__ == "__main__":
    port = 8000
    start_http_server(port)
    REGISTRY.register(ChiaCollector(CHIA_HTTP_SERVER))
    print(f"Listening on port {port}")
    while True:
        time.sleep(1)
示例#42
0
文件: app.py 项目: Mint3kool/pelorus
            return GitLabCommitCollector("", "", "", "")
        if git_provider == "github":
            return GitHubCommitCollector(username, token, namespaces, apps,
                                         git_api)
        if git_provider == "bitbucket":
            return BitbucketCommitCollector("", "", "", "")


if __name__ == "__main__":
    pelorus.check_legacy_vars()
    pelorus.check_required_config(REQUIRED_CONFIG)

    username = os.environ.get('GIT_USER')
    token = os.environ.get('GIT_TOKEN')
    git_api = os.environ.get('GIT_API')
    git_provider = os.environ.get('GIT_PROVIDER', pelorus.DEFAULT_GIT)
    namespaces = None
    if os.environ.get('NAMESPACES') is not None:
        namespaces = [
            proj.strip() for proj in os.environ.get('NAMESPACES').split(",")
        ]
    apps = None
    start_http_server(8080)

    collector = GitFactory.getCollector(username, token, namespaces, apps,
                                        git_api, git_provider)
    REGISTRY.register(collector)

    while True:
        time.sleep(1)
示例#43
0
import os
import logging
from flask import Flask, redirect, Response
from collector import MarathonAppCollector
from prometheus_client import PROCESS_COLLECTOR
from prometheus_client.core import REGISTRY
from prometheus_client.exposition import generate_latest

MARATHON_URL = os.environ.get(
        'MARATHON_URL',
        'http://leader.mesos:8080/')
REGISTRY.unregister(PROCESS_COLLECTOR)
REGISTRY.register(MarathonAppCollector(MARATHON_URL))
app = Flask(__name__)


@app.route('/')
def home():
    return redirect('/metrics')


@app.route('/metrics')
def metrics():
    prom_metrics = generate_latest(REGISTRY)
    return Response(prom_metrics, content_type='text/plain')


if __name__ == '__main__':
    log_format = u'[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d}' \
                 u' %(levelname)s - %(message)s'
    logging.basicConfig(
class GaugeCollector(object):
    def collect(self):
        for jail in self.get_jails(self.extract_data()):
            g = GaugeMetricFamily("fail2ban_{}".format(self.snake_case(jail)),
                                  "",
                                  labels=['type'])
            for label, value in self.extract_data(jail):
                g.add_metric([self.snake_case(label)], float(value))
            yield g

    def get_jails(self, jails):
        return jails[1][1].split(",")

    def extract_data(self, jail=""):
        r = run(cmd.format(path=path, service=jail),
                stdout=PIPE,
                check=True,
                shell=True)
        return findall(comp, ''.join(bytes(r.stdout).decode('utf-8')).lower())

    def snake_case(self, string):
        return string.strip().replace("-", "_").replace(" ", "_")


# Code execution starts from here
start_http_server(port, addr)
REGISTRY.register(GaugeCollector())
while True:
    sleep(10)
示例#45
0
            return

        with open("/proc/self/stat") as s:
            line = s.read()
            raw_stats = line.split(") ", 1)[1].split(" ")

            user = GaugeMetricFamily("process_cpu_user_seconds_total", "")
            user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
            yield user

            sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
            sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
            yield sys


REGISTRY.register(CPUMetrics())

#
# Python GC metrics
#

gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"])
gc_time = Histogram(
    "python_gc_time",
    "Time taken to GC (sec)",
    ["gen"],
    buckets=[0.0025, 0.005, 0.01, 0.025, 0.05, 0.10, 0.25, 0.50, 1.00, 2.50,
             5.00, 7.50, 15.00, 30.00, 45.00, 60.00],
)

示例#46
0
def cli(**options):
    """Export Elasticsearch query results to Prometheus."""
    if options['basic_user'] and options['basic_password'] is None:
        raise click.BadOptionUsage('basic_user',
                                   'Username provided with no password.')
    elif options['basic_user'] is None and options['basic_password']:
        raise click.BadOptionUsage('basic_password',
                                   'Password provided with no username.')
    elif options['basic_user']:
        http_auth = (options['basic_user'], options['basic_password'])
    else:
        http_auth = None

    if not options['ca_certs'] and options['client_cert']:
        raise click.BadOptionUsage(
            'client_cert',
            '--client-cert can only be used when --ca-certs is provided.')
    elif not options['ca_certs'] and options['client_key']:
        raise click.BadOptionUsage(
            'client_key',
            '--client-key can only be used when --ca-certs is provided.')
    elif options['client_cert'] and not options['client_key']:
        raise click.BadOptionUsage(
            'client_cert',
            '--client-key must be provided when --client-cert is used.')
    elif not options['client_cert'] and options['client_key']:
        raise click.BadOptionUsage(
            'client_key',
            '--client-cert must be provided when --client-key is used.')

    if options['indices_stats_indices'] and options[
            'indices_stats_mode'] != 'indices':
        raise click.BadOptionUsage(
            'indices_stats_indices',
            '--indices-stats-mode must be "indices" for '
            '--indices-stats-indices to be used.')

    log_handler = logging.StreamHandler()
    log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
    formatter = JogFormatter(
        log_format) if options['json_logging'] else logging.Formatter(
            log_format)
    log_handler.setFormatter(formatter)

    log_level = getattr(logging, options['log_level'])
    logging.basicConfig(
        handlers=[log_handler],
        level=logging.DEBUG if options['verbose'] else log_level)
    logging.captureWarnings(True)

    port = options['port']
    es_cluster = options['es_cluster'].split(',')

    if options['ca_certs']:
        es_client = Elasticsearch(es_cluster,
                                  verify_certs=True,
                                  ca_certs=options['ca_certs'],
                                  client_cert=options['client_cert'],
                                  client_key=options['client_key'],
                                  headers=options['header'],
                                  http_auth=http_auth)
    else:
        es_client = Elasticsearch(es_cluster,
                                  verify_certs=False,
                                  headers=options['header'],
                                  http_auth=http_auth)

    scheduler = None

    if not options['query_disable']:
        config = configparser.ConfigParser(converters=CONFIGPARSER_CONVERTERS)
        config.read(options['config_file'])

        config_dir_file_pattern = os.path.join(options['config_dir'], '*.cfg')
        config_dir_sorted_files = sorted(glob.glob(config_dir_file_pattern))
        config.read(config_dir_sorted_files)

        query_prefix = 'query_'
        queries = {}
        for section in config.sections():
            if section.startswith(query_prefix):
                query_name = section[len(query_prefix):]
                interval = config.getfloat(section,
                                           'QueryIntervalSecs',
                                           fallback=15)
                timeout = config.getfloat(section,
                                          'QueryTimeoutSecs',
                                          fallback=10)
                indices = config.get(section, 'QueryIndices', fallback='_all')
                query = json.loads(config.get(section, 'QueryJson'))
                on_error = config.getenum(section,
                                          'QueryOnError',
                                          fallback='drop')
                on_missing = config.getenum(section,
                                            'QueryOnMissing',
                                            fallback='drop')

                queries[query_name] = (interval, timeout, indices, query,
                                       on_error, on_missing)

        scheduler = sched.scheduler()

        if queries:
            for query_name, (interval, timeout, indices, query, on_error,
                             on_missing) in queries.items():
                schedule_job(scheduler, interval, run_query, es_client,
                             query_name, indices, query, timeout, on_error,
                             on_missing)
        else:
            log.error('No queries found in config file(s)')
            return

    if not options['cluster_health_disable']:
        REGISTRY.register(
            ClusterHealthCollector(es_client,
                                   options['cluster_health_timeout'],
                                   options['cluster_health_level']))

    if not options['nodes_stats_disable']:
        REGISTRY.register(
            NodesStatsCollector(es_client,
                                options['nodes_stats_timeout'],
                                metrics=options['nodes_stats_metrics']))

    if not options['indices_aliases_disable']:
        REGISTRY.register(
            IndicesAliasesCollector(es_client,
                                    options['indices_aliases_timeout']))

    if not options['indices_mappings_disable']:
        REGISTRY.register(
            IndicesMappingsCollector(es_client,
                                     options['indices_mappings_timeout']))

    if not options['indices_stats_disable']:
        parse_indices = options['indices_stats_mode'] == 'indices'
        REGISTRY.register(
            IndicesStatsCollector(es_client,
                                  options['indices_stats_timeout'],
                                  parse_indices=parse_indices,
                                  indices=options['indices_stats_indices'],
                                  metrics=options['indices_stats_metrics'],
                                  fields=options['indices_stats_fields']))

    if scheduler:
        REGISTRY.register(QueryMetricCollector())

    log.info('Starting server...')
    start_http_server(port)
    log.info('Server started on port %(port)s', {'port': port})

    if scheduler:
        scheduler.run()
    else:
        while True:
            time.sleep(5)
    )


class Expositor(object):
    """ Responsible for exposing metrics to prometheus """

    def collect(self):
        logging.info("Serving prometheus data")
        for key in sorted(metrics):
            yield metrics[key]


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)
    for collector in list(REGISTRY._collector_to_names):
        REGISTRY.unregister(collector)
    REGISTRY.register(Expositor())

    start_time = time.time()

    # Popluate data before exposing over http
    scrape()
    start_http_server(8000)

    ready_time = time.time()
    print("Ready time: ", ready_time-start_time)

    while True:
        time.sleep(int(os.environ.get('KOJI_POLL_INTERVAL', '3')))
        scrape()
                    finally:
                        response_json.append(data)

            #Generate metric output
            for item in range(len(response_json)):
                docker_namespace = response_json[item].get('namespace')
                docker_name = response_json[item].get('name')
                docker_namespace = docker_namespace.translate(table)
                docker_name = docker_name.translate(table)
                g = GaugeMetricFamily(
                    'dockerpulls_' + docker_namespace + '_' + docker_name +
                    '_total', 'Total Pulls for: ' +
                    response_json[item].get('namespace') + '/' +
                    response_json[item].get('name'))
                g.add_metric('docker_pulls',
                             response_json[item].get('pull_count'))
                yield g


#Register the collector for metrics display
REGISTRY.register(CustomCollector())


#Sets the /metrics
#/ has a welcoming message
def docker_pull(environ, start_fn):
    if environ['PATH_INFO'] == '/metrics':
        return metrics_app(environ, start_fn)
    start_fn('200 OK', [])
    return [b'Hi there \\o\n\nMaybe you wanna go to /metrics!? :)']
示例#49
0
文件: __init__.py 项目: ulope/synapse
            return

        with open("/proc/self/stat") as s:
            line = s.read()
            raw_stats = line.split(") ", 1)[1].split(" ")

            user = GaugeMetricFamily("process_cpu_user_seconds_total", "")
            user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
            yield user

            sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
            sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
            yield sys


REGISTRY.register(CPUMetrics())

#
# Python GC metrics
#

gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects",
                       ["gen"])
gc_time = Histogram(
    "python_gc_time",
    "Time taken to GC (sec)",
    ["gen"],
    buckets=[
        0.0025,
        0.005,
        0.01,
示例#50
0
文件: __init__.py 项目: ulope/synapse
 def collect():
     for metric in REGISTRY.collect():
         if not metric.name.startswith("__"):
             yield metric
示例#51
0
    parser.add_argument('--bamboo_url', type=str, required=True, env_var='bamboo_url')
    parser.add_argument('--bamboo_user', type=str, required=True, env_var='bamboo_user')
    parser.add_argument('--bamboo_password', type=str, required=True, env_var='bamboo_password')
    parser.add_argument('--bamboo_test_jobs', type=str, env_var='bamboo_test_jobs') # CSV of PRJ-XX-JOB, eg COS-COS1-DTOL
#    parser.add_argument('--dashing_event_url', type=str, required=True, env_var='dashing_event_url')
#    parser.add_argument('--redis_host', type=str, required=True, env_var='redis_host')
#    parser.add_argument('--redis_port', type=int, env_var='redis_port', default=6379)
    parser.add_argument('--run_once', action='store_true')
    parser.add_argument('--sonar_url', type=str, required=True, env_var='sonar_url')
    parser.add_argument('--sonar_user', type=str, required=True, env_var='sonar_user')
    parser.add_argument('--sonar_password', type=str, required=True, env_var='sonar_password')


    args = parser.parse_args()

    REGISTRY.register(BambooCollector(args.bamboo_url, args.bamboo_user, args.bamboo_password, args.bamboo_test_jobs))
#    REGISTRY.register(EventStreamCollector(args.dashing_event_url))  # http://192.168.99.100:3030/events
#    REGISTRY.register(RedisCollector(args.redis_host, args.redis_port))
    REGISTRY.register(SonarCollector(args.sonar_url, args.sonar_user, args.sonar_password, []))

    if args.run_once:
        # time.sleep(5) # wait for async
        pp = pprint.PrettyPrinter(indent=4)
        for collector in REGISTRY._collectors:
            # collector = BambooCollector(args.bamboo_url, args.bamboo_user, args.bamboo_password)
            print collector
            for x in collector.collect():
                pp.pprint(x.samples)
        sys.exit("runonce")

    start_http_server(9118)
示例#52
0
import configparser

from prometheus_client import make_wsgi_app
from prometheus_client.core import REGISTRY
from flask import Flask
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from sonar.sonar import SonarCollector

# Import configuration file
config = configparser.ConfigParser()
config.read("config.ini")
sonar_collector = SonarCollector(
    server=config['DEFAULT']['SONAR_SERVER'],
    user=config['DEFAULT']['SONAR_USERNAME'],
    passwd=config['DEFAULT']['SONAR_PASSWORD']
)
REGISTRY.register(sonar_collector)

# Create Flask app
app = Flask(__name__)
@app.route('/ready')
def ready():
    return 'To infinity and beyond!'

# Add prometheus wsgi middleware to route /metrics requests
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {
    '/metrics': make_wsgi_app()
})
                  'Jenkins build timestamp in unixtime for {0}'.format(s), labels=["jobname"]),
          }

    # Request exactly the information we need from Jenkins
    result = json.loads(urllib2.urlopen(
        "{0}/api/json?tree=jobs[name,{1}]".format(
              self._target, ",".join([s + "[number,timestamp,duration]" for s in statuses])))
        .read().decode("utf-8"))

    for job in result['jobs']:
      name = job['name']
      for s in statuses:
        # If there's a null result, we want to export zeros.
        status = job[s] or {}
        metrics[s]['number'].add_metric([name], status.get('number', 0))
        metrics[s]['duration'].add_metric([name], status.get('duration', 0) / 1000.0)
        metrics[s]['timestamp'].add_metric([name], status.get('timestamp', 0) / 1000.0)

    for s in statuses:
      for m in metrics[s].values():
        yield m


if __name__ == "__main__":
  if len(sys.argv) < 2:
    sys.stderr.write("Usage: jenkins_exporter.py http://jenkins:8080\n")
    sys.exit(1)
  REGISTRY.register(JenkinsCollector(sys.argv[1]))
  start_http_server(9118)
  while True: time.sleep(1)