Esempio n. 1
0
def start_exporter(config, port, interval):
    """ run the exporter every <interval> seconds """
    REGISTRY.register(NovaCollector(config))
    start_http_server(port)
    while True:
        generate_latest(REGISTRY)
        time.sleep(30)
Esempio n. 2
0
def collect_snmp(config, host, port=161):
  """Scrape a host and return prometheus text format for it"""

  metrics = {}
  for metric in config['metrics']:
    metrics[metric['name']] = Metric(metric['name'], 'SNMP OID {0}'.format(metric['oid']), 'untyped')

  values = walk_oids(host, port, config['walk'])
  oids = {}
  for oid, value in values:
    oids[tuple(oid)] = value

  for oid, value in oids.items():
    for metric in config['metrics']:
      prefix = oid_to_tuple(metric['oid'])
      if oid[:len(prefix)] == prefix:
        value = float(value)
        indexes = oid[len(prefix):]
        labels = parse_indexes(indexes, metric.get('indexes', {}), oids)
        metrics[metric['name']].add_sample(metric['name'], value=value, labels=labels)

  class Collector():
    def collect(self):
      return metrics.values()
  registry = CollectorRegistry()
  registry.register(Collector())
  return generate_latest(registry)
 def _amqp_loop(self):
     for method, props, unused_body in self._channel.consume(
             self._routing_key, exclusive=True, no_ack=True):
         self._channel.publish("",
                         props.reply_to,
                         prometheus_client.generate_latest(prometheus_client.REGISTRY),
                         pika.BasicProperties(correlation_id=props.correlation_id))
def metrics():
    try:
        content = generate_latest(REGISTRY)
        return content, 200, {'Content-Type': CONTENT_TYPE_LATEST}
    except Exception as error:
        logging.exception("Any exception occured during scraping")
        abort(Response("Scrape failed: {}".format(error), status=502))
Esempio n. 5
0
    def test_histogram(self):
        s = Histogram("hh", "A histogram", registry=self.registry)
        s.observe(0.05)
        self.assertEqual(
            b"""# HELP hh A histogram
# TYPE hh histogram
hh_bucket{le="0.005"} 0.0
hh_bucket{le="0.01"} 0.0
hh_bucket{le="0.025"} 0.0
hh_bucket{le="0.05"} 1.0
hh_bucket{le="0.075"} 1.0
hh_bucket{le="0.1"} 1.0
hh_bucket{le="0.25"} 1.0
hh_bucket{le="0.5"} 1.0
hh_bucket{le="0.75"} 1.0
hh_bucket{le="1.0"} 1.0
hh_bucket{le="2.5"} 1.0
hh_bucket{le="5.0"} 1.0
hh_bucket{le="7.5"} 1.0
hh_bucket{le="10.0"} 1.0
hh_bucket{le="+Inf"} 1.0
hh_count 1.0
hh_sum 0.05
""",
            generate_latest(self.registry),
        )
Esempio n. 6
0
 def test_summary(self):
     s = Summary("ss", "A summary", ["a", "b"], registry=self.registry)
     s.labels("c", "d").observe(17)
     self.assertEqual(
         b'# HELP ss A summary\n# TYPE ss summary\nss_count{a="c",b="d"} 1.0\nss_sum{a="c",b="d"} 17.0\n',
         generate_latest(self.registry),
     )
Esempio n. 7
0
def collect_snmp(config, host, port=161):
  """Scrape a host and return prometheus text format for it"""

  start = time.time()
  metrics = {}
  for metric in config['metrics']:
    metrics[metric['name']] = Metric(metric['name'], 'SNMP OID {0}'.format(metric['oid']), 'untyped')

  values = walk_oids(host, port, config['walk'])
  oids = {}
  for oid, value in values:
    oids[tuple(oid)] = value

  for oid, value in oids.items():
    for metric in config['metrics']:
      prefix = oid_to_tuple(metric['oid'])
      if oid[:len(prefix)] == prefix:
        value = float(value)
        indexes = oid[len(prefix):]
        labels = parse_indexes(indexes, metric.get('indexes', {}), metric.get('lookups', {}), oids)
        metrics[metric['name']].add_sample(metric['name'], value=value, labels=labels)

  class Collector():
    def collect(self):
      return metrics.values()
  registry = CollectorRegistry()
  registry.register(Collector())
  duration = Gauge('snmp_scrape_duration_seconds', 'Time this SNMP scrape took, in seconds', registry=registry)
  duration.set(time.time() - start)
  walked = Gauge('snmp_oids_walked', 'Number of oids walked in this scrape', registry=registry)
  walked.set(len(oids))
  return generate_latest(registry)
Esempio n. 8
0
 def _get_prometheus_snapshot(self) -> str:
     # Actually, this will produce all registered metrics, from all Metrics instances,
     # due to the ``core.REGISTRY`` nature.
     # Will fix it sometimes later.
     snapshot = generate_latest(core.REGISTRY).decode()
     if self._process_collector is not None:
         snapshot += self._process_collector.text_snapshot()
     return snapshot
Esempio n. 9
0
def collect_metrics():
    registry = CollectorRegistry()
    MultiProcessCollector(registry)
    PlatformCollector(registry)
    ExternalMetrics(registry)

    data = generate_latest(registry)

    return Response(data, mimetype=CONTENT_TYPE_LATEST)
Esempio n. 10
0
def ExportToDjangoView(request):
    """Exports /metrics as a Django view.

    You can use django_prometheus.urls to map /metrics to this view.
    """
    metrics_page = prometheus_client.generate_latest()
    return HttpResponse(
        metrics_page,
        content_type=prometheus_client.CONTENT_TYPE_LATEST)
 def get_stats(self):
     registry = CollectorRegistry()
     labels = ['region']
     label_values = [self.region]
     duration = Gauge('openstack_exporter_cache_refresh_duration_seconds',
                      'Cache refresh duration in seconds.',
                      labels, registry=registry)
     duration.labels(*label_values).set(self.duration)
     return generate_latest(registry)
Esempio n. 12
0
def collect_snmp(config, host, port=161):
  """Scrape a host and return prometheus text format for it"""

  start = time.time()
  metrics = {}
  for metric in config['metrics']:
    metrics[metric['name']] = Metric(metric['name'], 'SNMP OID {0}'.format(metric['oid']), 'untyped')

  do_bulkget = 'bulkget' not in config or config['bulkget']
  values = walk_oids(host, port, config['walk'], config.get('community', 'public'), do_bulkget)

  oids = {}
  for oid, value in values:
    oids[oid_to_tuple(oid)] = value

  # Netsnmp doesn't tell us if an error has occured, so
  # try to spot it by no results.
  if not oids:
    raise Exception("No OIDs returned, device not responding?")

  # Build a tree from the rules based on oid for faster lookup.
  metric_tree = {}
  for metric in config['metrics']:
    prefix = oid_to_tuple(metric['oid'])
    head = metric_tree
    for i in prefix:
      head.setdefault('children', {})
      head['children'].setdefault(i, {})
      head = head['children'][i]
    head['entry'] = metric

  for oid, value in oids.items():
    head = metric_tree
    for i in oid:
      head = head.get('children', {}).get(i)
      if not head:
        break
      if 'entry' in head:
        metric = head['entry']

        prefix = oid_to_tuple(metric['oid'])
        value = float(value)
        indexes = oid[len(prefix):]
        labels = parse_indexes(indexes, metric.get('indexes', {}), metric.get('lookups', {}), oids)
        metrics[metric['name']].add_sample(metric['name'], value=value, labels=labels)
        break

  class Collector():
    def collect(self):
      return metrics.values()
  registry = CollectorRegistry()
  registry.register(Collector())
  duration = Gauge('snmp_scrape_duration_seconds', 'Time this SNMP scrape took, in seconds', registry=registry)
  duration.set(time.time() - start)
  walked = Gauge('snmp_oids_walked', 'Number of oids walked in this scrape', registry=registry)
  walked.set(len(oids))
  return generate_latest(registry)
Esempio n. 13
0
 def prometheus_app(environ, start_response):
     query_str = environ.get('QUERY_STRING', '')
     params = parse_qs(query_str)
     reg = registry
     if 'name[]' in params:
         reg = reg.restricted_registry(params['name[]'])
     output = generate_latest(reg)
     status = str('200 OK')
     headers = [(str('Content-type'), CONTENT_TYPE_LATEST)]
     start_response(status, headers)
     return [output]
Esempio n. 14
0
 def do_GET(self):
     try:
         response = generate_latest(REGISTRY)
         status = 200
     except Exception:
         logger.exception('Fetch failed')
         response = ''
         status = 500
     self.send_response(status)
     self.send_header('Content-Type', CONTENT_TYPE_LATEST)
     self.end_headers()
     self.wfile.write(response)
Esempio n. 15
0
 def do_GET(self):
     if self.path == '/':
         self.send_response(200)
         self.send_header('Content-type', 'text/html')
         self.end_headers()
         self.wfile.write('<html><body><p><a href="/metrics">Metrics</a></p></body></html>')
     elif self.path == '/metrics':
         self.send_response(200)
         self.send_header('Content-Type', 'text/plain; version=0.0.4; charset=utf-8')
         self.end_headers()
         self.wfile.write(prometheus_client.generate_latest())
     else:
         self.serve_file(self.path, True)
Esempio n. 16
0
def ExportToDjangoView(request):
    """Exports /metrics as a Django view.

    You can use django_prometheus.urls to map /metrics to this view.
    """
    if 'prometheus_multiproc_dir' in os.environ:
        registry = prometheus_client.CollectorRegistry()
        multiprocess.MultiProcessCollector(registry)
    else:
        registry = prometheus_client.REGISTRY
    metrics_page = prometheus_client.generate_latest(registry)
    return HttpResponse(
        metrics_page,
        content_type=prometheus_client.CONTENT_TYPE_LATEST)
Esempio n. 17
0
def collect_metrics():
    from prometheus_client import (
        CollectorRegistry,
        core,
        generate_latest,
        multiprocess,
    )
    if 'prometheus_multiproc_dir' in os.environ:
        registry = CollectorRegistry()
        multiprocess.MultiProcessCollector(registry)
    else:
        registry = core.REGISTRY
    with try_prometheus_lock():
        return generate_latest(registry)
Esempio n. 18
0
def get_prometheus_inventory_metrics():
    registry = CollectorRegistry()
    g = Gauge('zentral_inventory_osx_apps', 'Zentral inventory OSX apps',
              ['name', 'version_str', 'source'],
              registry=registry)
    for r in osx_app_count():
        count = r.pop('count')
        g.labels(**r).set(count)
    g = Gauge('zentral_inventory_os_versions', 'Zentral inventory OS Versions',
              ['name', 'major', 'minor', 'patch', 'build', 'source'],
              registry=registry)
    for r in os_version_count():
        count = r.pop('count')
        g.labels(**r).set(count)
    return generate_latest(registry)
 def get_stats(self):
     registry = CollectorRegistry()
     labels = ['region', 'host', 'aggregate', 'aggregate_id']
     hypervisor_stats_cache = self.get_cache_data()
     for hypervisor_stat in hypervisor_stats_cache:
         stat_gauge = Gauge(
             self.gauge_name_sanitize(
                 hypervisor_stat['stat_name']),
             'Openstack Hypervisor statistic',
             labels,
             registry=registry)
         label_values = [self.osclient.region,
                         hypervisor_stat.get('host', ''),
                         hypervisor_stat.get('aggregate', ''),
                         hypervisor_stat.get('aggregate_id', '')]
         stat_gauge.labels(*label_values).set(hypervisor_stat['stat_value'])
     return generate_latest(registry)
    def test_collect(self):
        # used generate_latest method for easy assert
        prom_metrics_text = generate_latest(self.marathon_collector)
        expected_text = '''# HELP marathon_org_eclipse_jetty_servlet_servletcontexthandler_percent_4xx_15m from org.eclipse.jetty.servlet.ServletContextHandler.percent-4xx-15m
# TYPE marathon_org_eclipse_jetty_servlet_servletcontexthandler_percent_4xx_15m gauge
marathon_org_eclipse_jetty_servlet_servletcontexthandler_percent_4xx_15m 0.01139209265037441
# HELP marathon_service_mesosphere_marathon_state_marathonstore_appdefinition_read_data_size from service.mesosphere.marathon.state.MarathonStore.AppDefinition.read-data-size
# TYPE marathon_service_mesosphere_marathon_state_marathonstore_appdefinition_read_data_size summary
marathon_service_mesosphere_marathon_state_marathonstore_appdefinition_read_data_size_count 1870979.0
marathon_service_mesosphere_marathon_state_marathonstore_appdefinition_read_data_size_sum 2244691348.289432
marathon_service_mesosphere_marathon_state_marathonstore_appdefinition_read_data_size{quantile="0.98"} 1558.0
marathon_service_mesosphere_marathon_state_marathonstore_appdefinition_read_data_size{quantile="0.99"} 1558.0
marathon_service_mesosphere_marathon_state_marathonstore_appdefinition_read_data_size{quantile="0.75"} 1516.0
marathon_service_mesosphere_marathon_state_marathonstore_appdefinition_read_data_size{quantile="0.95"} 1557.0
marathon_service_mesosphere_marathon_state_marathonstore_appdefinition_read_data_size{quantile="0.5"} 1231.0
marathon_service_mesosphere_marathon_state_marathonstore_appdefinition_read_data_size{quantile="0.999"} 1732.0
# HELP marathon_mesosphere_marathon_state_apprepository_read_request_errors_rate from mesosphere.marathon.state.AppRepository.read-request-errors
# TYPE marathon_mesosphere_marathon_state_apprepository_read_request_errors_rate gauge
marathon_mesosphere_marathon_state_apprepository_read_request_errors_rate{window="1m"} 7.0
marathon_mesosphere_marathon_state_apprepository_read_request_errors_rate{window="5m"} 3.0
marathon_mesosphere_marathon_state_apprepository_read_request_errors_rate{window="15m"} 15.0
marathon_mesosphere_marathon_state_apprepository_read_request_errors_rate{window="mean"} 7.0
# HELP marathon_mesosphere_marathon_state_apprepository_read_request_errors_count from mesosphere.marathon.state.AppRepository.read-request-errors_count
# TYPE marathon_mesosphere_marathon_state_apprepository_read_request_errors_count counter
marathon_mesosphere_marathon_state_apprepository_read_request_errors_count 100.0
# HELP marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson_rate from mesosphere.marathon.api.v2.AppTasksResource$$EnhancerByGuice$$bd9fb6d1.indexJson
# TYPE marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson_rate gauge
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson_rate{window="1m"} 7.0
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson_rate{window="5m"} 3.0
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson_rate{window="15m"} 15.0
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson_rate{window="mean"} 7.0
# HELP marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson from mesosphere.marathon.api.v2.AppTasksResource$$EnhancerByGuice$$bd9fb6d1.indexJson
# TYPE marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson summary
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson_count 1870979.0
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson_sum 2244691348.289432
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson{quantile="0.98"} 1558.0
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson{quantile="0.75"} 1516.0
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson{quantile="0.99"} 1558.0
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson{quantile="0.95"} 1557.0
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson{quantile="0.999"} 1732.0
marathon_mesosphere_marathon_api_v2_apptasksresource__enhancerbyguice__bd9fb6d1_indexjson{quantile="0.5"} 1231.0
# HELP marathon_org_eclipse_jetty_servlet_servletcontexthandler_active_dispatches from org.eclipse.jetty.servlet.ServletContextHandler.active-dispatches
# TYPE marathon_org_eclipse_jetty_servlet_servletcontexthandler_active_dispatches counter
marathon_org_eclipse_jetty_servlet_servletcontexthandler_active_dispatches 1.0
'''
        self.assertEqual(expected_text, prom_metrics_text)
 def get_stats(self):
     registry = CollectorRegistry()
     labels = ['region', 'host', 'service', 'state']
     services_stats_cache = self.get_cache_data()
     for services_stat in services_stats_cache:
         stat_gauge = Gauge(
             self.gauge_name_sanitize(
                 services_stat['stat_name']),
             'Openstack Nova Service statistic',
             labels,
             registry=registry)
         label_values = [self.osclient.region,
                         services_stat.get('host', ''),
                         services_stat.get('service', ''),
                         services_stat.get('state', '')]
         stat_gauge.labels(*label_values).set(services_stat['stat_value'])
     return generate_latest(registry)
 def get_stats(self):
     registry = CollectorRegistry()
     labels = ['region', 'url', 'service']
     check_api_data_cache = self.get_cache_data()
     for check_api_data in check_api_data_cache:
         label_values = [
             check_api_data['region'],
             check_api_data['url'],
             check_api_data['service']]
         gague_name = self.gauge_name_sanitize(
             "check_{}_api".format(check_api_data['service']))
         check_gauge = Gauge(
             gague_name,
             'Openstack API check. fail = 0, ok = 1 and unknown = 2',
             labels,
             registry=registry)
         check_gauge.labels(*label_values).set(check_api_data['status'])
     return generate_latest(registry)
Esempio n. 23
0
def collect_snmp(config, host, port=161):
  """Scrape a host and return prometheus text format for it"""

  start = time.time()
  metrics = {}
  for metric in config['metrics']:
    prom_type = metric['metric_type'] if 'metric_type' in metric else 'gauge'
    prom_help = metric['metric_help'] if 'metric_help' in metric else 'SNMP OID {0}'.format( metric['oid'] if 'oid' in metric else "NaN" )
    metrics[metric['name']] = Metric(metric['name'], prom_help, prom_type)
  values = walk_oids(host, port, config['walk'], config.get('community', 'public'), config.get('timeout', 5), config.get('retries', 3))
  oids = {}
  for oid, value in values:
    if oid_to_tuple(oid) in oids:
      if (((not oids[oid_to_tuple(oid)]) or oids[oid_to_tuple(oid)] == None) and value):
        oids[oid_to_tuple(oid)] = value
    else:
        oids[oid_to_tuple(oid)] = value

  for oid, value in oids.items():
    for metric in config['metrics']:
      prefix = oid_to_tuple(metric['oid'])
      if oid[:len(prefix)] == prefix:
        try:
            value = float(value)
        except ValueError as e:
            print(e)
            value = 0.0

        indexes = oid[len(prefix):]
        labels = parse_indexes(indexes, metric.get('indexes', {}), metric.get('lookups', {}), oids)
        metrics[metric['name']].add_sample(metric['name'], value=value, labels=labels)

  class Collector():
    def collect(self):
      return metrics.values()
  registry = CollectorRegistry()
  registry.register(Collector())
  duration = Gauge('snmp_scrape_duration_seconds', 'Time this SNMP scrape took, in seconds', registry=registry)
  duration.set(time.time() - start)
  walked = Gauge('snmp_oids_walked', 'Number of oids walked in this scrape', registry=registry)
  walked.set(len(oids))
  return generate_latest(registry)
Esempio n. 24
0
    def test_reports_metrics(self):
        """
        ``MetricsResource`` serves the metrics from the provided registry.
        """
        c = Counter('cc', 'A counter', registry=self.registry)
        c.inc()

        root = Resource()
        root.putChild(b'metrics', MetricsResource(registry=self.registry))
        server = reactor.listenTCP(0, Site(root))
        self.addCleanup(server.stopListening)

        agent = Agent(reactor)
        port = server.getHost().port
        url = "http://localhost:{port}/metrics".format(port=port)
        d = agent.request(b"GET", url.encode("ascii"))

        d.addCallback(readBody)
        d.addCallback(self.assertEqual, generate_latest(self.registry))

        return d
Esempio n. 25
0
def metrics():
    resp = make_response(generate_latest())
    resp.mimetype = CONTENT_TYPE_LATEST
    return resp
Esempio n. 26
0
 def requests_count():
     res = []
     for k,v in graphs.items():
         res.append(prometheus_client.generate_latest(v))
     return Response(res, mimetype="text/plain")
Esempio n. 27
0
    def metrics_view_func(self):
        # noinspection PyProtectedMember
        from prometheus_client import generate_latest

        return generate_latest()
Esempio n. 28
0
 def push_metrics(data: FieldData):
     update_fun(data)
     data = generate_latest(registry)
     resp = requests.post(service, data=data)
     resp.raise_for_status()
 def list(self):
     return Response(generate_latest(), mimetype='text')
Esempio n. 30
0
 def test_counter(self):
     c = Counter('cc', 'A counter', registry=self.registry)
     c.inc()
     self.assertEqual(b'# HELP cc A counter\n# TYPE cc counter\ncc 1.0\n',
                      generate_latest(self.registry))
Esempio n. 31
0
 def index(self):
     return Response(generate_latest(), mimetype='text/plain')
Esempio n. 32
0
def metrics():
	"""Expose Prometheus metrics."""
	return prometheus_client.generate_latest()
Esempio n. 33
0
def generate_latest_metrics():
    """Generate Latest."""
    return generate_latest()
Esempio n. 34
0
def root():
    metrics = generate_latest(registry).decode("utf-8")
    response = make_response(metrics, 200)
    response.mimetype = "text/plain"
    return response
def get_data():
    return Response(generate_latest(), mimetype=CONTENT_TYPE_LATEST)
Esempio n. 36
0
 def prom_metrics(environ, start_response):
     status = "200 OK"
     headers = [("Content-type", CONTENT_TYPE_LATEST)]
     start_response(status, headers)
     return [generate_latest(REGISTRY)]
Esempio n. 37
0
def expose_metrics():
    return Response(generate_latest(), headers=_HEADERS)
Esempio n. 38
0
def expose_metrics_multiprocess():
    registry = CollectorRegistry()
    multiprocess.MultiProcessCollector(registry)
    return Response(generate_latest(registry), headers=_HEADERS)
Esempio n. 39
0
async def metrics(request):
    return response.raw(
        prometheus_client.generate_latest(),
        content_type="text/plain; version=0.0.4; charset=utf-8")
Esempio n. 40
0
 async def metrics(_: Request) -> StreamResponse:
     resp = web.Response(body=prometheus_client.generate_latest())
     resp.content_type = prometheus_client.CONTENT_TYPE_LATEST
     return resp
Esempio n. 41
0
 def test_gauge(self):
     g = Gauge('gg', 'A gauge', registry=self.registry)
     g.set(17)
     self.assertEqual(b'# HELP gg A gauge\n# TYPE gg gauge\ngg 17.0\n',
                      generate_latest(self.registry))
Esempio n. 42
0
    def get(self):
        import prometheus_client

        self.write(prometheus_client.generate_latest())
        self.set_header("Content-Type", "text/plain; version=0.0.4")
Esempio n. 43
0
 def get_metrics(self, path, params):
     """Return metric list in printable form"""
     return generate_latest(self._reg).decode('utf-8')
Esempio n. 44
0
def exporter():
    """Agent execution function"""
    # definition tag
    registry = CollectorRegistry(auto_describe=False)
    service_status = Gauge("Node_Get_ServiceStatus",
                           SERVICE_STATUS_TITLE, ["NodeIP", "NodePort"],
                           registry=registry)
    first_block_details = Gauge("Node_Get_FirstBlockNumberDetails",
                                FIRST_BLOCK_DETAILS_TITLE,
                                ["NodeIP", "NodePort", "FirstBlockNumberHash"],
                                registry=registry)
    chain_info = Gauge("Node_Get_ChainInfo",
                       CHAIN_INFO_TITLE, [
                           "NodeIP", "NodePort", "ChainName", "Operator",
                           "TokenName", "TokenSymbol", "Version"
                       ],
                       registry=registry)
    node_peers = Gauge("Node_Get_NodePeers",
                       NODE_PEERS_TITLE, ["NodeIP", "NodePort"],
                       registry=registry)
    chain_nodes = Gauge("Node_Get_ChainNodes",
                        CHAIN_NODES_TITLE, ["NodeIP", "NodePort"],
                        registry=registry)
    last_block_number = Gauge("Node_Get_LastBlockNumber",
                              LAST_BLOCK_NUMBER_TITLE, [
                                  "NodeIP", "NodePort", "FirstBlockNumberHash",
                                  "NodeID", "NodeAddress"
                              ],
                              registry=registry)
    check_proposer = Gauge("Node_CheckProposer",
                           CHECK_PROPOSER_TITLE, ["NodeIP", "NodePort"],
                           registry=registry)
    last_block_details = Gauge(
        "Node_Get_LastBlockNumberDetails",
        LAST_BLOCK_DETAILS_TITLE, [
            "NodeIP", "NodePort", "LastBlocknumber", "LastBlockProposer",
            "LastBlockHash", "NodeID", "HostPlatform", "HostName",
            "ConsensusStatus", "SoftVersion"
        ],
        registry=registry)
    block_height_difference = Gauge(
        "Node_Get_BlockDifference",
        BLOCK_HEIGHT_DIFFERENCE_TITLE,
        ["NodeIP", "NodePort", "CurrentHeight", "PreviousHeight"],
        registry=registry)
    dir_total_size = Gauge("Node_Get_DirInfo_TotalFileSize",
                           DIR_TOTAL_SIZE_TITLE,
                           ["NodeIP", "NodePort", "NodeDir", "NodeDisk"],
                           registry=registry)
    block_interval = Gauge("Node_Get_BlockTimeDifference",
                           BLOCK_INTERVAL_TITLE, ["NodeIP", "NodePort"],
                           registry=registry)
    last_block_transactions = Gauge("Node_Get_LastBlockNumberTransactions",
                                    LAST_BLOCK_TRANSACTIONS_TITLE,
                                    ["NodeIP", "NodePort"],
                                    registry=registry)
    last_block_quota_used = Gauge("Node_Get_LastBlockNumberQuotaUsed",
                                  LAST_BLOCK_QUOTA_USED_TITLE,
                                  ["NodeIP", "NodePort"],
                                  registry=registry)
    chain_quota_price = Gauge("Node_Get_QuotaPrice",
                              CHAIN_QUOTA_PRICE_TITLE, ["NodeIP", "NodePort"],
                              registry=registry)
    block_quota_limit = Gauge("Node_Get_BlockQuotaLimit",
                              BLOCK_QUOTA_LIMIT_TITLE, ["NodeIP", "NodePort"],
                              registry=registry)

    # run exporter
    node_ip = str(NODE.split(':')[0])
    node_port = str(NODE.split(':')[1])
    check_process = os.popen("ps alx |grep 'cita-chain' |grep -c -v grep")
    if check_process.read() == '0\n':
        service_status.labels(NodeIP=node_ip, NodePort=node_port).set(0)
        return Response(prometheus_client.generate_latest(registry),
                        mimetype="text/plain")

    service_status.labels(NodeIP=node_ip, NodePort=node_port).set(1)
    class_result = ExporterFunctions(node_ip, node_port)
    if ',' in NODE_FILE_PATH:
        path_list = NODE_FILE_PATH.split(',')
        for path in path_list:
            dir_analysis(path)
            dir_total_size.labels(NodeIP=node_ip,
                                  NodePort=node_port,
                                  NodeDir=path,
                                  NodeDisk=DISK_TOTAL).set(FILE_TOTAL_SIZE)
    else:
        path = NODE_FILE_PATH
        dir_analysis(path)
        dir_total_size.labels(NodeIP=node_ip,
                              NodePort=node_port,
                              NodeDir=path,
                              NodeDisk=DISK_TOTAL).set(FILE_TOTAL_SIZE)
    first_block_info = class_result.block_number_detail('0x0')
    if 'result' in first_block_info:
        first_block_hash = first_block_info['result']['hash']
        first_block_time = first_block_info['result']['header']['timestamp']
        first_block_details.labels(
            NodeIP=node_ip,
            NodePort=node_port,
            FirstBlockNumberHash=first_block_hash).set(first_block_time)
    else:
        print(first_block_info)
    metadata_info = class_result.metadata()
    if 'result' in metadata_info:
        chain_name = metadata_info['result']['chainName']
        operator = metadata_info['result']['operator']
        token_name = metadata_info['result']['tokenName']
        token_symbol = metadata_info['result']['tokenSymbol']
        economical_model = metadata_info['result']['economicalModel']
        chain_version = metadata_info['result']['version']
        chain_info.labels(NodeIP=node_ip,
                          NodePort=node_port,
                          ChainName=chain_name,
                          Operator=operator,
                          TokenName=token_name,
                          TokenSymbol=token_symbol,
                          Version=chain_version).set(economical_model)
        consensus_node_list = metadata_info['result']['validators']
        consensus_node_count = len(consensus_node_list)
        chain_nodes.labels(NodeIP=node_ip,
                           NodePort=node_port).set(consensus_node_count)
    else:
        print(metadata_info)
    block_number_info = class_result.block_number()
    if 'result' in block_number_info:
        hex_number = block_number_info['result']
        previous_hex_number = hex(int(hex_number, 16) - 1)
        last_block_number.labels(NodeIP=node_ip,
                                 NodePort=node_port,
                                 FirstBlockNumberHash=first_block_hash,
                                 NodeID=NODE_ID,
                                 NodeAddress=ADDRESS).set(int(hex_number, 16))
    else:
        print(block_number_info)
    block_info = class_result.block_number_detail(hex_number)
    previous_block_info = class_result.block_number_detail(previous_hex_number)
    if 'result' in block_info and 'result' in previous_block_info:
        block_head_info = block_info['result']['header']
        if block_head_info.get('quotaUsed'):
            block_quota_used = int(block_head_info['quotaUsed'], 16)
        else:
            #Get the previous version of CITA v0.19.1 gasUsed
            block_head_info.get('gasUsed')
            block_quota_used = int(block_head_info['gasUsed'], 16)
        block_hash = block_info['result']['hash']
        block_time = int(block_head_info['timestamp'])
        block_transactions = int(
            len(block_info['result']['body']['transactions']))
        block_proposer = block_head_info['proposer']
        previous_block_time = int(
            previous_block_info['result']['header']['timestamp'])
        interval = abs(block_time - previous_block_time)
        if ADDRESS in consensus_node_list:
            consensus = 1
        else:
            consensus = 0
        last_block_details.labels(NodeIP=node_ip,
                                  NodePort=node_port,
                                  LastBlocknumber=int(hex_number, 16),
                                  LastBlockProposer=block_proposer,
                                  LastBlockHash=block_hash,
                                  NodeID=NODE_ID,
                                  HostPlatform=EXPORTER_PLATFORM,
                                  HostName=AGENT_NAME,
                                  ConsensusStatus=consensus,
                                  SoftVersion=SOFT_VERSION).set(block_time)
        block_height_difference.labels(NodeIP=node_ip,
                                       NodePort=node_port,
                                       CurrentHeight=int(hex_number, 16),
                                       PreviousHeight=int(
                                           previous_hex_number,
                                           16)).set(interval)
        block_interval.labels(NodeIP=node_ip, NodePort=node_port).set(interval)
        last_block_transactions.labels(
            NodeIP=node_ip, NodePort=node_port).set(block_transactions)
        last_block_quota_used.labels(NodeIP=node_ip,
                                     NodePort=node_port).set(block_quota_used)
        if ADDRESS == block_proposer:
            proposer = 1
        else:
            proposer = 0
        check_proposer.labels(NodeIP=node_ip, NodePort=node_port).set(proposer)
    else:
        print(block_info)
        print(previous_block_info)
    peer_info = class_result.peer_count()
    if 'result' in peer_info:
        peers = peer_info['result']
        node_peers.labels(NodeIP=node_ip,
                          NodePort=node_port).set(int(peers, 16))
    else:
        print(peer_info)
    quota_price = class_result.quota_price()
    if 'result' in quota_price:
        price = quota_price['result']
        chain_quota_price.labels(NodeIP=node_ip,
                                 NodePort=node_port).set(int(price, 16))
    else:
        print(quota_price)
    block_limit = class_result.block_limit()
    if 'result' in block_limit:
        limit = block_limit['result']
        block_quota_limit.labels(NodeIP=node_ip,
                                 NodePort=node_port).set(int(limit, 16))
    else:
        print(block_limit)

    return Response(prometheus_client.generate_latest(registry),
                    mimetype="text/plain")
Esempio n. 45
0
 def list(self):
     return Response(generate_latest(), mimetype='text')
Esempio n. 46
0
def metrics():
    license_info = get_license(show_key=False)
    SYSTEM_INFO.info({
        'install_uuid':
        settings.INSTALL_UUID,
        'insights_analytics':
        str(settings.INSIGHTS_TRACKING_STATE),
        'tower_url_base':
        settings.TOWER_URL_BASE,
        'tower_version':
        get_awx_version(),
        'ansible_version':
        get_ansible_version(),
        'license_type':
        license_info.get('license_type', 'UNLICENSED'),
        'license_expiry':
        str(license_info.get('time_remaining', 0)),
        'pendo_tracking':
        settings.PENDO_TRACKING_STATE,
        'external_logger_enabled':
        str(settings.LOG_AGGREGATOR_ENABLED),
        'external_logger_type':
        getattr(settings, 'LOG_AGGREGATOR_TYPE', 'None')
    })

    LICENSE_INSTANCE_TOTAL.set(str(license_info.get('available_instances', 0)))
    LICENSE_INSTANCE_FREE.set(str(license_info.get('free_instances', 0)))

    current_counts = counts(None)

    ORG_COUNT.set(current_counts['organization'])
    USER_COUNT.set(current_counts['user'])
    TEAM_COUNT.set(current_counts['team'])
    INV_COUNT.set(current_counts['inventory'])
    PROJ_COUNT.set(current_counts['project'])
    JT_COUNT.set(current_counts['job_template'])
    WFJT_COUNT.set(current_counts['workflow_job_template'])

    HOST_COUNT.labels(type='all').set(current_counts['host'])
    HOST_COUNT.labels(type='active').set(current_counts['active_host_count'])

    SCHEDULE_COUNT.set(current_counts['schedule'])
    INV_SCRIPT_COUNT.set(current_counts['custom_inventory_script'])
    CUSTOM_VENVS.set(current_counts['custom_virtualenvs'])

    USER_SESSIONS.labels(type='all').set(current_counts['active_sessions'])
    USER_SESSIONS.labels(type='user').set(
        current_counts['active_user_sessions'])
    USER_SESSIONS.labels(type='anonymous').set(
        current_counts['active_anonymous_sessions'])

    RUNNING_JOBS.set(current_counts['running_jobs'])

    instance_data = instance_info(None)
    for uuid in instance_data:
        INSTANCE_CAPACITY.labels(instance_uuid=uuid).set(
            instance_data[uuid]['capacity'])
        INSTANCE_CPU.labels(instance_uuid=uuid).set(instance_data[uuid]['cpu'])
        INSTANCE_MEMORY.labels(instance_uuid=uuid).set(
            instance_data[uuid]['memory'])
        INSTANCE_INFO.labels(instance_uuid=uuid).info({
            'enabled':
            str(instance_data[uuid]['enabled']),
            'last_isolated_check':
            getattr(instance_data[uuid], 'last_isolated_check', 'None'),
            'managed_by_policy':
            str(instance_data[uuid]['managed_by_policy']),
            'version':
            instance_data[uuid]['version']
        })

    instance_data = job_instance_counts(None)
    for node in instance_data:
        # skipping internal execution node (for system jobs)
        if node == '':
            continue
        types = instance_data[node].get('launch_type', {})
        for launch_type, value in types.items():
            INSTANCE_LAUNCH_TYPE.labels(node=node,
                                        launch_type=launch_type).set(value)
        statuses = instance_data[node].get('status', {})
        for status, value in statuses.items():
            INSTANCE_STATUS.labels(node=node, status=status).set(value)

    return generate_latest()
Esempio n. 47
0
def metrics():
    registry = CollectorRegistry()
    multiprocess.MultiProcessCollector(registry)
    data = generate_latest(registry)
    return Response(data, mimetype=CONTENT_TYPE_LATEST)
Esempio n. 48
0
def handler():
    result = get_health(url)
    generate_metrics(result)

    return Response(prometheus_client.generate_latest(reg),
                    mimetype="text/plain")
 def index(self):
     return Response(generate_latest(), mimetype='text/plain')
Esempio n. 50
0
 def test_escaping(self):
     c = Counter('cc', 'A\ncount\\er', ['a'], registry=self.registry)
     c.labels('\\x\n"').inc(1)
     self.assertEqual(
         b'# HELP cc A\\ncount\\\\er\n# TYPE cc counter\ncc{a="\\\\x\\n\\""} 1.0\n',
         generate_latest(self.registry))
Esempio n. 51
0
async def metrics(request):
    body = prometheus_client.generate_latest()
    resp = web.Response(body=body)
    resp.content_type = prometheus_client.CONTENT_TYPE_LATEST

    return resp
Esempio n. 52
0
 def test_unicode(self):
     c = Counter('cc', '\u4500', ['l'], registry=self.registry)
     c.labels('\u4500').inc()
     self.assertEqual(
         b'# HELP cc \xe4\x94\x80\n# TYPE cc counter\ncc{l="\xe4\x94\x80"} 1.0\n',
         generate_latest(self.registry))
Esempio n. 53
0
def metrics():
    bottle.response.content_type = prom.CONTENT_TYPE_LATEST
    return prom.generate_latest(prom.REGISTRY)
Esempio n. 54
0
 def test_summary(self):
     s = Summary('ss', 'A summary', ['a', 'b'], registry=self.registry)
     s.labels('c', 'd').observe(17)
     self.assertEqual(
         b'# HELP ss A summary\n# TYPE ss summary\nss_count{a="c",b="d"} 1.0\nss_sum{a="c",b="d"} 17.0\n',
         generate_latest(self.registry))
def metrics():
    bottle.response.content_type = prom.CONTENT_TYPE_LATEST
    return prom.generate_latest(prom.REGISTRY)
Esempio n. 56
0
async def requests_count(request):
    try:
        count = generate_latest(redirect_counter)
        return text(count.decode())
    except Exception as error:
        return json({'message': error}, status=500)
Esempio n. 57
0
 def get(self):
     self.set_header('Content-Type', prometheus_client.CONTENT_TYPE_LATEST)
     self.write(prometheus_client.generate_latest(prometheus_client.REGISTRY))
Esempio n. 58
0
 def get(self):
     self.write(prometheus_client.generate_latest())
     self.set_header("Content-Type", prometheus_client.CONTENT_TYPE_LATEST)
Esempio n. 59
0
 def generate_latest(self):
     return generate_latest(self.registry)
Esempio n. 60
0
def Status():
    STATUS().filter()
    return Response(prometheus_client.generate_latest(registry=REGISTRY),
                    mimetype="text/plain")