class TestSummary(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.summary = Summary('s', 'help', registry=self.registry) def test_summary(self): self.assertEqual(0, self.registry.get_sample_value('s_count')) self.assertEqual(0, self.registry.get_sample_value('s_sum')) self.summary.observe(10) self.assertEqual(1, self.registry.get_sample_value('s_count')) self.assertEqual(10, self.registry.get_sample_value('s_sum')) def test_function_decorator(self): self.assertEqual(0, self.registry.get_sample_value('s_count')) @self.summary.time() def f(): pass f() self.assertEqual(1, self.registry.get_sample_value('s_count')) def test_block_decorator(self): self.assertEqual(0, self.registry.get_sample_value('s_count')) with self.summary.time(): pass self.assertEqual(1, self.registry.get_sample_value('s_count'))
class RQCollector(object): """RQ stats collector. Args: connection (redis.Redis): Redis connection instance. worker_class (type): RQ Worker class queue_class (type): RQ Queue class """ def __init__(self, connection=None, worker_class=None, queue_class=None): self.connection = connection self.worker_class = worker_class self.queue_class = queue_class # RQ data collection count and time in seconds self.summary = Summary('rq_request_processing_seconds', 'Time spent collecting RQ data') def collect(self): """Collect RQ Metrics. Note: This method will be called on registration and every time the metrics are requested. Yields: RQ metrics for workers and jobs. Raises: redis.exceptions.RedisError: On Redis connection errors """ logger.debug('Collecting the RQ metrics...') with self.summary.time(): with Connection(self.connection): rq_workers = GaugeMetricFamily( 'rq_workers', 'RQ workers', labels=['name', 'state', 'queues']) rq_jobs = GaugeMetricFamily('rq_jobs', 'RQ jobs by state', labels=['queue', 'status']) for worker in get_workers_stats(self.worker_class): rq_workers.add_metric([ worker['name'], worker['state'], ','.join( worker['queues']) ], 1) yield rq_workers for (queue_name, jobs) in get_jobs_by_queue(self.queue_class).items(): for (status, count) in jobs.items(): rq_jobs.add_metric([queue_name, status], count) yield rq_jobs logger.debug('RQ metrics collection finished')
class DummyExecutor(Executor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.summary = Summary('a', 'A', registry=self.runtime_args.metrics_registry) @requests(on='/foo') def foo(self, docs, **kwargs): with self.summary.time(): ...
class RequestProxy: def __init__(self, base_url: str): self.base_url = base_url self.request_counter = Counter( "total_reqeusts", "the total amount of reqeust send with the since process start", ) self.request_time = Summary("request_latency", "time of request send") self.headers = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0" } self.logger = logging.getLogger("scraper.proxy") def get_bs_from_url(self, url: str) -> BeautifulSoup: response_object = self.__send_request(url) content = self.__get_response_content_as_str(response_object) return BeautifulSoup(content, "html.parser") def get_picture_from_url(self, url: str) -> bytes: response_object = self.__send_request(url) return response_object.read() def __send_request(self, url: str) -> HTTPResponse: request_obj = request.Request(self.base_url + url, headers=self.headers) self.logger.debug(f"Sending request to {url}") with self.request_time.time(): response: HTTPResponse = request.urlopen(request_obj) self.request_counter.inc() if response.status == 200: return response else: self.logger.error( f"Send request to {url} and got response code {response.status}" ) raise HTTPException def __get_response_content_as_str(self, response: HTTPResponse) -> str: encoding = response.info().get_content_charset("iso-8859-1") return response.read().decode(encoding)
def prometheus_solid(context): s = Summary( 'request_latency_seconds', 'Description of summary', registry=context.resources.prometheus.registry, ) s.observe(4.7) request_time = Summary( 'response_latency_seconds', 'Response latency (seconds)', registry=context.resources.prometheus.registry, ) with request_time.time(): time.sleep(1) recorded = context.resources.prometheus.registry.get_sample_value( 'request_latency_seconds_sum') assert abs(4.7 - recorded) < EPS recorded = context.resources.prometheus.registry.get_sample_value( 'response_latency_seconds_sum') assert abs(1.0 - recorded) < 1.0
class FritzBoxExporter(): # pylint: disable=too-few-public-methods '''FrizBox exporter implementation retrieving metrics from a FritzBox by TR-064. This is used by the prometheus client implementation to publish the metrics.''' config_desc = { 'FRITZ_HOST': { 'default': 'fritz.box', 'help': 'Hostname of the FritzBox to query.' }, 'FRITZ_USER': { 'required': True, 'help': 'Username to log in to the FritzBox to retrieve metrics' }, 'FRITZ_PASS': { 'required': True, 'help': 'Password to log in to the FritzBox to retrieve metrics' }, 'CACHE_TIME': { 'default': 30, 'help': ('Time in seconds to keep results in the internal cache before ' 'querying the FritzBox again') }, } def __init__(self, settings, metrics): self.request_tm = Summary( 'pfbex_tr64_requests', 'Time and count for each TR-64 request to the FritzBox') self.conn = fc.FritzConnection(address=settings.FRITZ_HOST, user=settings.FRITZ_USER, password=settings.FRITZ_PASS) self._settings = settings self._cfg = metrics for item in self._cfg.values(): item['fails'] = 0 self._serial = 'n/a' self._data = {} self._last_clear_time = datetime.now() def _reset_request_cache(self): '''Clear the request result cache.''' now = datetime.now() if (now - self._last_clear_time).seconds > MIN_UPDATE_TIME: logger.debug('Clearing request cache.') self._data.clear() self._last_clear_time = now def _call_action(self, service, action): '''Call an TR-64 service action and return the result. If the call fails, returns None. The result (both valid results and errors) are stored in the cache for the current scrape. ''' key = f'{service}:{action}' # Return result from cache if available if key in self._data: return self._data[key] # Retrieve service information try: with self.request_tm.time(): res = self.conn.call_action(service, action) except Exception as ex: # pylint: disable=broad-except res = None logger.debug( f'Failed to call service action {service}:{action}: {ex}') self._data[key] = res return res def _collect_pfbex_info(self): # pylint: disable=no-self-use '''Provide pfbex version information to Prometheus.''' label_names = ['Version'] label_values = [APP_VERSION] met = GaugeMetricFamily('pfbex_info', 'pfbex information', labels=label_names) met.add_metric(label_values, 1.0) yield met def _collect_device_info(self): '''Provide a Prometheus metric with the device information (model name, software version and serial number). At the same time, this function stores the FritzBox serial number for later use in all other metrics. ''' res = self._call_action('DeviceInfo1', 'GetInfo') if not res: self._serial = 'n/a' return self._serial = res['NewSerialNumber'] label_names = ['ModelName', 'SoftwareVersion', 'Serial'] met = GaugeMetricFamily('fritzbox_info', 'FritzBox device information', labels=label_names) label_values = [ res['NewModelName'], res['NewSoftwareVersion'], res['NewSerialNumber'] ] met.add_metric(label_values, 1.0) yield met def _get_metric_label_names(self, metric): # pylint: disable=no-self-use '''Calculate label names for a Prometheus metric''' m_labels = set() for item in metric['items']: labels = item.get('labels', {}) m_labels.update(labels.keys()) label_names = ['serial'] label_names.extend(m_labels) return label_names def _collect_metric_item(self, item, label_names): '''Collect metric data for one Prometheus metric instance (i.e. one assignment of label values).''' labels = item.get('labels', {}) labels.update({'serial': self._serial}) label_values = [labels[name] for name in label_names] service = item['service'] action = item['action'] attr = item['attr'] service_data = self._call_action(service, action) if not service_data: logger.debug(f"No data available for '{service}:{action}'.") return None if not attr in service_data: # data has been retrieved, but attribute is missing logger.warning( f"Attribute '{attr}' not found in data of '{service}:{action}'." ) logger.debug(f'Available data: {service_data}') return None fct = item.get('fct', lambda x: x) value = service_data[attr] try: value = fct(value) value = float(value) except Exception as ex: # pylint: disable=broad-except logger.warning(f"Could not convert value '{attr}={value}' for " f"'{service}:{action}: {ex}") return (label_values, value) def _collect_metric(self, metric_name, metric): '''Collect data for one Prometheus metric.''' label_names = self._get_metric_label_names(metric) metric_type = metric.get('type', 'gauge') if metric_type == 'counter': met = CounterMetricFamily(metric_name, metric['doc'], labels=label_names) elif metric_type == 'gauge': met = GaugeMetricFamily(metric_name, metric['doc'], labels=label_names) else: logger.error( f"Invalid metric type definition '{metric_type}' for metric " f"'{metric_name}'. Using default type 'gauge'.") met = GaugeMetricFamily(metric_name, metric['doc'], labels=label_names) # Calculate the metric labels and values item_count = 0 for item in metric['items']: result = self._collect_metric_item(item, label_names) if result is None: continue item_count += 1 (label_values, value) = result met.add_metric(label_values, value) if item_count > 0: yield met else: logger.debug( f"Dropping metric '{metric_name}', because no data was added.") def _collect_metrics(self): '''Loop over all metrics configurations and collect the data.''' for name, metric in self._cfg.items(): yield from self._collect_metric(name, metric) def collect(self): '''Collect all metrics. This function is called by the Prometheus client implementation.. ''' # Clear the cache, so that no old data is reported. self._reset_request_cache() yield from self._collect_pfbex_info() # Fetch device info about the FritzBox. This function has to be called first to # set the FritzBox serial number for the following metrics. yield from self._collect_device_info() # Fetch data from FritzBox and generate metrics yield from self._collect_metrics()