def metrics(): """Return Prometheus metrics""" return generate_latest() response = Response() response.set_header('Content-Type', 'text/plain; version=0.0.4; charset=utf-8') response.body = generate_latest() return response
async def test_role_metrics(): role_file = os.path.join(os.path.dirname(tests.__file__), 'files/role.json') jdata = json.loads(open(role_file, 'r').read()) role = Role(TEST_ROLE) role.data = jdata['data']['repository'] role = set_role_metrics(role) return generate_latest(registry=role.registry)
async def test_collection_metrics(): collection_file = os.path.join(os.path.dirname(tests.__file__), 'files/collection.json') collection = Collection(TEST_COLLECTION) collection.data = json.loads(open(collection_file, 'r').read()) collection = set_collection_metrics(collection) return generate_latest(registry=collection.registry)
def show_metrics(request): """Display metrics for Prometheus.""" registry = core.REGISTRY output = exposition.generate_latest(registry) resp = response.raw(output, 200) resp.headers["Content-type"] = exposition.CONTENT_TYPE_LATEST return resp
def process(raw_data, zone): class RegistryMock(object): def __init__(self, metrics): self.metrics = metrics def collect(self): for metric in self.metrics: yield metric def generate_metrics(pop_data, families): dns_data = pop_data['dimensions'] rvalue = pop_data['metrics'][0] families['record_queried'].add_metric( [zone, dns_data[0], dns_data[1], dns_data[2], dns_data[3]], rvalue) families = { 'record_queried': GaugeMetricFamily('cloudflare_dns_record_queries', 'DNS queries per record at PoP location.', labels=[ 'zone', 'record_name', 'record_type', 'query_response', 'colo_id' ]) } for pop_data in raw_data: generate_metrics(pop_data, families) return generate_latest(RegistryMock(families.values()))
def process(raw_data): class RegistryMock(object): def __init__(self, metrics): self.metrics = metrics def collect(self): for metric in self.metrics: yield metric def generate_metrics(pop_data, families): data = pop_data['origins'] for i in range(0, len(data), 1): #print data[i]['name'] families['loadbalancer_origins'].add_metric( [data[i]['name'], str(data[i]['address']), str(data[i]['enabled']), str(data[i]['weight'])], 1) families = { 'loadbalancer_origins': GaugeMetricFamily( 'loadbalancer_pool_origin', 'Created origins', labels=[ 'name', 'address', 'enabled', 'weight' ] ) } for pop_data in raw_data: generate_metrics(pop_data, families) return generate_latest(RegistryMock(families.values()))
def metrics(): process = psutil.Process(os.getpid()) memory_usage = process.memory_info().rss processor_usage = psutil.cpu_percent() global total_request, start_time, list_request_response, served_request avg_response_time = 0 if len(list_request_response) == 0: avg_response_time = 0 else: avg_response_time = sum(list_request_response) * 1000 / len( list_request_response) request_rate = int(total_request / (time.time() - start_time)) monitoring.setMetric("response_time", avg_response_time) monitoring.setMetric("memory", memory_usage) monitoring.setMetric("cpu_usage", processor_usage) monitoring.setMetric("request_rate", request_rate) monitoring.setMetric("served_request", served_request) monitoring.setMetric("lost_request", abs(total_request - served_request)) del list_request_response[:] #monitoring.increment('request') metrics = monitoring.getMetrics() app_name, replicas = monitoring.getIdentity() registry = Collector([app_name, replicas], metrics=metrics) collected_metric = generate_latest(registry) start_time = time.time() total_request = 0 served_request = 0 #monitoring.increment('request_success') return Response(collected_metric, status=200, mimetype=CONTENT_TYPE_LATEST)
def prometheus_metrics(): # check auth if (not request.authorization or request.authorization.username != PROMETHEUS_AUTH_USERNAME or request.authorization.password != PROMETHEUS_AUTH_PASSWORD): return ( "Unauthorized", 401, { "WWW-Authenticate": 'Basic realm="Login Required"' }, ) # update metrics data = get_data() PROMETHEUS_GRANTS_COUNT_GAUGE.set(len(data["grants"])) PROMETHEUS_RECIPIENTS_COUNT_GAUGE.set( len(data["grants"]["recipientOrganization.0.id"].unique())) PROMETHEUS_FUNDERS_COUNT_GAUGE.set( len(data["grants"]["fundingOrganization.0.id"].unique())) PROMETHEUS_AMOUNT_AWARDED_GBP_GAUGE.set( data["grants"].loc[data["grants"]["currency"] == "GBP", "amountAwarded"].sum()) # Send output output = make_response(generate_latest()) output.headers["Content-type"] = "text/plain" return output
def server(): sample = get('http://192.168.1.109/current-sample').json() for channel in sample["channels"]: power.labels( type=channel["type"], ch_label=(channel.get('label') or ''), ch=channel["ch"], ).set(channel["p_W"]) voltage.labels( type=channel["type"], ch_label=(channel.get('label') or ''), ch=channel["ch"], ).set(channel["v_V"]) disk_info = json.loads( subprocess.getoutput( "df -B1 | tail -n +2 | awk '{print \"[\\\"\"$1\"\\\",\"$2\",\"$3\",\"$4\",\\\"\"$6\"\\\"]\"}' | jq -s ." )) for line in disk_info: fs_size = float(line[1]) fs_used = float(line[2]) fs_utilized_pct = fs_used * 100.0 / fs_size fs.labels(id=line[0], mount=line[-1], stat="size").set(line[1]) fs.labels(id=line[0], mount=line[-1], stat="used").set(line[2]) fs.labels(id=line[0], mount=line[-1], stat="avail").set(line[3]) fs.labels(id=line[0], mount=line[-1], stat="util").set(fs_utilized_pct) response_text = exposition.generate_latest() print(response_text[:1000]) return response_text
def get(self): """ get endpoint """ latest = generate_latest() resp = Response(latest, headers={'Content-Type': 'text/plain'}) return resp
def generate_metrics(self): myregistry = CollectorRegistry() myregistry.register(self) return ( exposition.generate_latest(myregistry).decode("utf-8"), exposition.CONTENT_TYPE_LATEST, )
def main(): parser = argparse.ArgumentParser() parser.add_argument("--outfile", metavar="FILE.prom", help="Output file (stdout)") parser.add_argument( "-d", "--debug", dest="log_level", action="store_const", const=logging.DEBUG, default=logging.WARNING, help="Enable debug logging (false)", ) args = parser.parse_args() logging.basicConfig(level=args.log_level) if args.outfile and not args.outfile.endswith(".prom"): parser.error("Output file does not end with .prom") registry = CollectorRegistry() collect_sge_stats(registry) if args.outfile: write_to_textfile(args.outfile, registry) else: sys.stdout.write(generate_latest(registry).decode("utf-8"))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--outfile', metavar='FILE.prom', help='Output file (stdout)') parser.add_argument('-d', '--debug', action='store_true', default=False, help='Enable debug logging (%(default)s)') parser.add_argument('--puppet-state-dir', default='/var/lib/puppet/state', dest='puppet_state_dir', help='Puppet state directory (%(default)s)') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) if args.outfile and not args.outfile.endswith('.prom'): parser.error('Output file does not end with .prom') registry = CollectorRegistry() collect_puppet_stats(args.puppet_state_dir, registry) if args.outfile: write_to_textfile(args.outfile, registry) else: sys.stdout.write(generate_latest(registry).decode('utf-8'))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--outfile', metavar='FILE.prom', help='Output file (stdout)') parser.add_argument('-d', '--debug', action='store_true', help='Enable debug logging (false)') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) if args.outfile and not args.outfile.endswith('.prom'): parser.error('Output file does not end with .prom') registry = CollectorRegistry() collect_stats_from_romc_smi(registry) if args.outfile: write_to_textfile(args.outfile, registry) else: sys.stdout.write(generate_latest(registry).decode('utf-8'))
def test_exposed_to_prometheus(self): """ Forward extremity counts are exposed via Prometheus. """ room_creator = self.hs.get_room_creation_handler() user = UserID("alice", "test") requester = Requester(user, None, False, None, None) # Real events, forward extremities events = [(3, 2), (6, 2), (4, 6)] for event_count, extrems in events: info = self.get_success(room_creator.create_room(requester, {})) room_id = info["room_id"] last_event = None # Make a real event chain for i in range(event_count): ev = self.create_and_send_event(room_id, user, False, last_event) last_event = [ev] # Sprinkle in some extremities for i in range(extrems): ev = self.create_and_send_event(room_id, user, False, last_event) # Let it run for a while, then pull out the statistics from the # Prometheus client registry self.reactor.advance(60 * 60 * 1000) self.pump(1) items = set( filter( lambda x: b"synapse_forward_extremities_" in x, generate_latest(REGISTRY).split(b"\n"), ) ) expected = set( [ b'synapse_forward_extremities_bucket{le="1.0"} 0.0', b'synapse_forward_extremities_bucket{le="2.0"} 2.0', b'synapse_forward_extremities_bucket{le="3.0"} 2.0', b'synapse_forward_extremities_bucket{le="5.0"} 2.0', b'synapse_forward_extremities_bucket{le="7.0"} 3.0', b'synapse_forward_extremities_bucket{le="10.0"} 3.0', b'synapse_forward_extremities_bucket{le="15.0"} 3.0', b'synapse_forward_extremities_bucket{le="20.0"} 3.0', b'synapse_forward_extremities_bucket{le="50.0"} 3.0', b'synapse_forward_extremities_bucket{le="100.0"} 3.0', b'synapse_forward_extremities_bucket{le="200.0"} 3.0', b'synapse_forward_extremities_bucket{le="500.0"} 3.0', b'synapse_forward_extremities_bucket{le="+Inf"} 3.0', b"synapse_forward_extremities_count 3.0", b"synapse_forward_extremities_sum 10.0", ] ) self.assertEqual(items, expected)
def metrics(self): """Provides the registered metrics""" self._record_quota_free_count_metric() self._record_ports_quota_per_subnet_metric() collected_metric = generate_latest(self.registry) return flask.Response(collected_metric, mimetype='text/plain')
def on_get(self, req, resp): param = req.get_param("target").split(':') self._target = param[0] self._port = param[1] resp.set_header('Content-Type', CONTENT_TYPE_LATEST) if not self._target: msg = "No target parameter provided!" logging.error(msg) raise falcon.HTTPMissingParam('target') try: socket.gethostbyname(self._target) except socket.gaierror as excptn: msg = "Target does not exist in DNS: {0}".format(excptn) logging.error(msg) resp.status = falcon.HTTP_400 resp.body = msg else: registry = AristaMetricsCollector( self._config, exclude=self._exclude, target=self._target, port=self._port ) collected_metric = generate_latest(registry) resp.body = collected_metric
def on_get(self, req, resp): self._target = req.get_param('target') modules = req.get_param('modules') if modules: if re.match(r"^([a-zA-Z]+)(,[a-zA-Z]+)*$", modules): self._config['module_names'] = modules else: msg = 'Invalid modules specified' logging.error(msg) resp.status = falcon.HTTP_400 resp.body = msg return resp.set_header('Content-Type', CONTENT_TYPE_LATEST) if not self._target: msg = 'No target parameter provided!' logging.error(msg) resp.status = falcon.HTTP_400 resp.body = msg try: socket.gethostbyname(self._target) except socket.gaierror as e: msg = f'Target does not exist in DNS: {e}' logging.error(msg) resp.status = falcon.HTTP_400 resp.body = msg else: registry = AristaMetricsCollector(self._config, target=self._target) collected_metric = generate_latest(registry) resp.body = collected_metric
def _get_metrics_data(self): if not self._multiprocess_on: registry = core.REGISTRY else: registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) data = generate_latest(registry) return data
def prometheus_metrics(request): compute_save_requests_metrics() return HttpResponse( content=generate_latest(registry=SWH_WEB_METRICS_REGISTRY), content_type=CONTENT_TYPE_LATEST, )
async def process_metrics() -> str: """ Fetch this exporter's own Prometheus metrics Returns: str in Prometheus' exporter format of this exporters metrics """ update_base_metrics() return generate_latest()
def server_stats(request): """ Return a web response with the plain text version of the metrics. :rtype: :class:`aiohttp.web.Response` """ rsp = web.Response(body=generate_latest(core.REGISTRY)) rsp.content_type = CONTENT_TYPE_LATEST return rsp
def show_metrics(): """Display metrics for Prometheus.""" status = checker.global_status() set_status_metrics(status, []) registry = core.REGISTRY output = exposition.generate_latest(registry) resp = flask.make_response(output, 200) resp.headers["Content-type"] = exposition.CONTENT_TYPE_LATEST return resp
def metrics(): timeout = float( request.headers.get('X-Prometheus-Scrape-Timeout-Seconds', default=CheckerBase.nrpe_timeout)) spec_segment = request.params.get('spec_segment', '/?spec') url = get_url(request.params.target, request.params.get('path', ''), spec_segment) metrics = get_metrics(url, spec_segment, timeout) return generate_latest(metrics)
def _metrics(self): """Display metrics for Prometheus.""" output = exposition.generate_latest(core.REGISTRY) self.send_response(200) self.send_header('Content-Type', exposition.CONTENT_TYPE_LATEST) self.end_headers() self.wfile.write(output)
def on_get(self, req, resp): resp.set_header('Content-Type', CONTENT_TYPE_LATEST) registry = Collector( self._url, self._service, exclude=self._exclude ) collected_metric = generate_latest(registry) resp.body = collected_metric
def onSuccess(metric_list): registry = CollectorRegistry() registry.register(ListCollector(metric_list)) output = generate_latest(registry) request.setHeader("Content-Type", "text/plain; charset=UTF-8") request.setResponseCode(200) request.write(output) request.finish()
def main(): parser = argparse.ArgumentParser(description='Collect SMART information from all physical disks' 'and report as Prometheus metrics') parser.add_argument('--outfile', metavar='FILE.prom', help='Output file (stdout)') parser.add_argument('--syslog', action='store_true', default=False, help='Log to syslog (%(default)s)') parser.add_argument('-d', '--debug', action='store_true', default=False, help='Enable debug logging (%(default)s)') args = parser.parse_args() script_name = parser.prog if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) if args.syslog: handler = SysLogHandler(address='/dev/log') handler.setFormatter(logging.Formatter(script_name + ': %(message)s')) root_logger = logging.getLogger() root_logger.handlers = [] root_logger.addHandler(handler) if args.outfile and not args.outfile.endswith('.prom'): parser.error('Output file does not end with .prom') physical_disks = [] raid_drivers = get_raid_drivers() if raid_drivers is None: log.error('Invalid value for "raid" fact: %r', raid_drivers) return 1 for driver in raid_drivers: handler = DRIVER_HANDLERS.get(driver) if handler is None: continue for pd in handler(): physical_disks.append(pd) # TODO(filippo): handle machines with disks attached to raid controllers _and_ regular sata if not raid_drivers or raid_drivers == ['md']: for pd in noraid_list_pd(): physical_disks.append(pd) log.debug('Gathering SMART data from physical disks: %r', [x.name for x in physical_disks]) registry = CollectorRegistry() metrics = get_metrics_cache(registry, 'device_smart') collect_smart_metrics(physical_disks, metrics) if args.outfile: write_to_textfile(args.outfile, registry) else: sys.stdout.write(generate_latest(registry).decode('utf-8'))
def metrics(): platform = request.args.get("platform") name = request.args.get("target") if platform is None or name is None: abort(400) apex_process_request(platform, name) return Response(generate_latest(), mimetype="text/plain; version=0.0.4; charset=utf-8")
async def server_stats(request): """ Return a web response with the plain text version of the metrics. :rtype: :class:`aiohttp.web.Response` """ rsp = web.Response(body=generate_latest(core.REGISTRY)) # This is set separately because aiohttp complains about `;` in # content_type thinking it means there's also a charset. # cf. https://github.com/aio-libs/aiohttp/issues/2197 rsp.content_type = CONTENT_TYPE_LATEST return rsp
def metrics(): prom_metrics = generate_latest(REGISTRY) return Response(prom_metrics, content_type='text/plain')