def collect(self): _output = run(["speedtest", "-s", "25305", "-b", "-f", "json"], capture_output=True) results = json.loads(_output.stdout) yield GaugeMetricFamily("speedtest_ping_jitter", "Ping jitter (ms)", value=results["ping"]["jitter"]) yield GaugeMetricFamily( "speedtest_ping_latency", "Ping latency ms", value=results["ping"]["latency"], ) yield GaugeMetricFamily( "speedtest_download_bandwidth", "Download bandwidth (Mibps)", value=results["download"]["bandwidth"], ) yield GaugeMetricFamily( "speedtest_upload_bandwidth", "Upload bandwidth (Mibps)", value=results["upload"]["bandwidth"], ) yield GaugeMetricFamily("speedtest_packetloss", "Packet Loss (%)", value=results.get("packetLoss"))
def collect(self): dts_list = self.get_dts_list() kv = { "name": "SynchronizationJobName", "status": "Status", "jid": "SynchronizationJobId" } metrics = { "delay": "Delay", "flow": "Performance.FLOW", "rps": "Performance.RPS", "initpercent": "DataInitializationStatus.Percent" } for dts in dts_list: data = self.get_dts_status(dts) for metric_key, metric_value in metrics.items(): gauge = GaugeMetricFamily(self.format_metric_name() + metric_key, '', labels=list(kv.keys())) labels = [jmespath.search(v, data) for k, v in kv.items()] value = jmespath.search(metric_value, data) if value is not None: value = "".join(filter(str.isdigit, value)) gauge.add_metric(labels=labels, value=value) else: yield self.metric_up_gauge(self.format_metric_name() + metric_key) yield gauge
def collect(self): """ collect method collects the command output from device and return the metrics """ self._device.enable_test_commands() output = self._device.exec('vppctl "show ip fib mem heap-verbosity 3"') rows = self._parser.ParseText(output) metrics = [ GaugeMetricFamily("epc_vppctl_ip_fib_heap_memory_total_bytes", "ip fib heap memory total bytes"), GaugeMetricFamily("epc_vppctl_ip_fib_heap_memory_used_bytes", "ip fib heap memory used bytes"), GaugeMetricFamily("epc_vppctl_ip_fib_heap_memory_free_bytes", "ip fib heap memory free bytes"), GaugeMetricFamily("epc_vppctl_ip_fib_vrf_status", "ip fib vrf status", labels=["vrf"]), ] if not rows: return metrics row = rows[0] add_gauge_metrics(metrics[0], [], parse_size(row[FIELD_MEMORY_TOTAL])) add_gauge_metrics(metrics[1], [], parse_size(row[FIELD_MEMORY_USED])) add_gauge_metrics(metrics[2], [], parse_size(row[FIELD_MEMORY_FREE])) for vrf in row[FIELD_VRF]: add_gauge_metrics(metrics[3], [vrf], 1) return metrics
def oss_info(self) -> GaugeMetricFamily: auth = oss2.Auth(self.ak, self.secret) service = oss2.Service( auth, 'http://oss-{resion_id}.aliyuncs.com'.format( resion_id=self.region_id)) nested_handler = None gauge = None label_keys = None for instance in oss2.BucketIterator(service): bucket = oss2.Bucket(auth, 'http://oss-cn-beijing.aliyuncs.com', instance.name) bucket_info = bucket.get_bucket_info() instance_dict = { 'name': bucket_info.name, 'storage_class': bucket_info.storage_class, 'creation_date': bucket_info.creation_date, 'intranet_endpoint': bucket_info.intranet_endpoint, 'extranet_endpoint': bucket_info.extranet_endpoint, 'owner': bucket_info.owner.id, 'grant': bucket_info.acl.grant, 'data_redundancy_type': bucket_info.data_redundancy_type, } if gauge == None: label_keys = self.label_keys(instance_dict, nested_handler) gauge = GaugeMetricFamily('aliyun_meta_oss_info', '', labels=label_keys) gauge.add_metric(labels=self.label_values(instance_dict, label_keys, nested_handler), value=1.0) return gauge
def es_info_template( self, req, name, desc='', page_size=100, page_num=1, nested_handler=None, to_list=(lambda data: data['Instances']['Instance']) ) -> GaugeMetricFamily: """ 为了适配新版本sdk :param req: :param name: :param desc: :param page_size: :param page_num: :param nested_handler: :param to_list: :return: """ gauge = None label_keys = None for instance in self.es_pager_generator(req, page_size, page_num, to_list): if gauge is None: label_keys = self.label_keys(instance, nested_handler) gauge = GaugeMetricFamily(name, desc, labels=label_keys) gauge.add_metric(labels=self.label_values(instance, label_keys, nested_handler), value=1.0) return gauge
def metric_meetings_video_participants(self, meetings): no_video_participants = reduce(lambda total, meeting: total + int(meeting['videoCount']), meetings, 0) metric = GaugeMetricFamily('bbb_meetings_video_participants', "Total number of video participants in all BigBlueButton meetings") metric.add_metric([], no_video_participants) return metric
def describe(self): yield GaugeMetricFamily("speedtest_ping_jitter", "Ping jitter ms") yield GaugeMetricFamily("speedtest_ping_latency", "Ping latency ms") yield GaugeMetricFamily("speedtest_download_bandwidth", "Download bandwidth (Mibps)") yield GaugeMetricFamily("speedtest_upload_bandwidth", "Upload bandwidth (Mibps)") yield GaugeMetricFamily("speedtest_packetloss", "Packet Loss (%)")
def metric_meetings_participant_clients(self, meetings): participants_by_client = self._get_participant_count_by_client(meetings) metric = GaugeMetricFamily('bbb_meetings_participant_clients', "Total number of participants in all BigBlueButton meetings by client", labels=["type"]) for client, num in participants_by_client.items(): metric.add_metric([client.lower()], num) return metric
def metric_meetings_listeners(self, meetings): no_listeners = reduce( lambda total, meeting: total + int(meeting['listenerCount']), meetings, 0) metric = GaugeMetricFamily( 'bbb_meetings_listeners', "Total number of listeners in all BigBlueButton meetings") metric.add_metric([], no_listeners) return metric
def metric_meetings_participants_origin(self, meetings): participants_by_origin = self._get_participants_count_by_origin( meetings) metric = GaugeMetricFamily( 'bbb_meetings_participants_origin', "Total number of participants in all BigBlueButton meetings by origin servername", labels=["server", "name"]) for (servername, origin), num in participants_by_origin.items(): metric.add_metric([servername.lower(), origin.lower()], num) return metric
def metric_recordings_unprocessed_from_disk(self): logging.debug("Querying disk for recordings unprocessed data") metric = GaugeMetricFamily( 'bbb_recordings_unprocessed', "Total number of BigBlueButton recordings enqueued to " "be processed (scraped from disk)") metric.add_metric([], recordings_unprocessed_from_disk( self.recordings_metrics_base_dir)) return metric
def metric_recordings_deleted_from_disk(self): logging.debug("Querying disk for recordings deleted data") metric = GaugeMetricFamily( 'bbb_recordings_deleted', "Total number of BigBlueButton recordings deleted " "(scraped from disk)") metric.add_metric([], recordings_deleted_from_disk( self.recordings_metrics_base_dir)) return metric
def metric_recordings_deleted(self, bbb_api_latency_metric): logging.debug("Requesting via API recordings deleted data") metric = GaugeMetricFamily('bbb_recordings_deleted', "Total number of BigBlueButton recordings deleted") recording_deleted_data, recording_deleted_latency = execution_duration(api.get_recordings)("deleted") metric.add_metric([], len(recording_deleted_data)) self.histogram_data_recording_deleted_latency.add(recording_deleted_latency) bbb_api_latency_metric.add_metric(["getRecordings", "state=deleted"], self.histogram_data_recording_deleted_latency.get_buckets(), self.histogram_data_recording_deleted_latency.sum) return metric
def collect(self): gauge = GaugeMetricFamily( name="extra_metrics_application_version", documentation= 'a summary of how many devices are using a particular app & version', labels=['query_name', 'application_version', 'query_id']) for item in self.app_results: gauge.add_metric( [item['query_name'], item['app_version'], item["id"]], item["total"]) yield gauge
def metric_recordings_processing(self, bbb_api_latency_metric): logging.debug("Requesting via API recordings processing data") histogram = GaugeMetricFamily('bbb_recordings_processing', "Total number of BigBlueButton recordings processing") recording_processing_data, recording_processing_latency = execution_duration(api.get_recordings)("processing") histogram.add_metric([], len(recording_processing_data)) self.histogram_data_recording_processing_latency.add(recording_processing_latency) bbb_api_latency_metric.add_metric(["getRecordings", "state=processing"], self.histogram_data_recording_processing_latency.get_buckets(), self.histogram_data_recording_processing_latency.sum) return histogram
def get_total_squest_request_per_state(): field_name = 'state' requests = Request.objects.values(field_name).order_by( field_name).annotate(counter=Count(field_name)) gauge = GaugeMetricFamily( "squest_request_per_state_total", 'Total number of request per state in squest', labels=['state']) for request in requests: gauge.add_metric([request["state"]], request["counter"]) return gauge
def __overall_grid(self, site, timestamp, prefix): P_Grid = site.get('P_Grid') P_Grid = P_Grid if P_Grid else 0.0 c = GaugeMetricFamily(f'{prefix}fronius_power_W_average', '', labels=['chart', 'family', 'dimension']) c.add_metric( value=P_Grid, labels=['fronius_GetPowerFlowRealtimeData.power', 'power', 'grid'], timestamp=timestamp) return c
def __E_Day(self, site, timestamp, prefix): # TODO check the accumulator E_Day = site.get('E_Day', 0.0) E_Day = E_Day / 1000 if E_Day else 0.0 c = GaugeMetricFamily(f'{prefix}fronius_energy_today_kWh_average', '', labels=['chart', 'family', 'dimension']) c.add_metric( value=E_Day, labels=['fronius_GetPowerFlowRealtimeData', 'energy', 'today'], timestamp=timestamp) return c
def get_total_support(): gauge = GaugeMetricFamily("squest_support_total", 'Total number of support in squest', labels=['service', 'state']) supports = Support.objects.values( 'instance__service__name', 'state').annotate(total_count=Count('id')) for support in supports: gauge.add_metric( [support["instance__service__name"], support["state"]], support["total_count"]) return gauge
def get_total_squest_instance_per_state(): field_name = 'state' instances = Instance.objects.values(field_name).order_by( field_name).annotate(counter=Count(field_name)) gauge_squest_instance_total = GaugeMetricFamily( "squest_instance_per_state_total", 'Total number of instance per state in squest', labels=['state']) for instance in instances: gauge_squest_instance_total.add_metric([instance["state"]], instance["counter"]) return gauge_squest_instance_total
def get_total_request(): gauge = GaugeMetricFamily("squest_request_total", 'Total number of request in squest', labels=['service', 'state']) requests = Request.objects.values( 'instance__service__name', 'state').annotate(total_count=Count('id')) for request in requests: gauge.add_metric( [request["instance__service__name"], request["state"]], request["total_count"]) return gauge
def __E_Year(self, site, timestamp, prefix): # TODO check the accumulator E_Year = site.get('E_Year', 0.0) E_Year = E_Year / 1000 if E_Year else 0.0 c = GaugeMetricFamily(f'{prefix}fronius_year_kWh_average', '', labels=['chart', 'family', 'dimension']) c.add_metric(value=E_Year, labels=[ 'fronius_GetPowerFlowRealtimeData.energy.year', 'energy', 'year' ], timestamp=timestamp) return c
def get_quota_limit(): """ squest_quota_limit{billing_group="5G", quota_attribute='cpu'} 34 """ gauge = GaugeMetricFamily( "squest_quota_limit", 'Limit of quota per billing group and attribute', labels=['billing_group', 'quota_attribute']) for quota_binding in QuotaBinding.objects.all(): gauge.add_metric( [quota_binding.billing_group.name, quota_binding.quota.name], quota_binding.limit) return gauge
def collect(self): result = self._probe.probe() yield GaugeMetricFamily('websocket_probe_success', '1 if websocket is up 0 otherwise', value=result.up) yield GaugeMetricFamily('websocket_probe_latency', 'latency in connection', value=result.latency, unit='milliseconds') yield GaugeMetricFamily( 'websocket_probe_received_expected_response', '1 if the expected message received after connection established 0 otherwise', value=result.received)
def __inverter_output(self, id_, inverter, timestamp, prefix): P_PV = inverter.get('P', 0.0) P_PV = P_PV if P_PV else 0.0 c = GaugeMetricFamily(f'{prefix}fronius_inverter_output_W_average', '', labels=['chart', 'family', 'dimension']) c.add_metric(value=P_PV, labels=[ 'fronius_GetPowerFlowRealtimeData.inverters.output', 'inverters', f'inverter_{id_}' ], timestamp=timestamp) return c
def __overall_pvs(self, site, timestamp, prefix): P_PV = site.get('P_PV', 0.0) P_PV = P_PV if P_PV else 0.0 c = GaugeMetricFamily(f'{prefix}fronius_power_W_average', '', labels=['chart', 'family', 'dimension']) c.add_metric(value=P_PV, labels=[ 'fronius_GetPowerFlowRealtimeData.power', 'power', 'photovoltaics' ], timestamp=timestamp) return c
def get_quota_consumed(): """ squest_quota_consumed{billing_group="5G", quota_attribute='cpu'} 34 """ gauge = GaugeMetricFamily( "squest_quota_consumed", 'Consumption of quota per billing group and attribute', labels=['billing_group', 'quota_attribute']) for quota_binding in QuotaBinding.objects.all(): gauge.add_metric( [quota_binding.billing_group.name, quota_binding.quota.name], quota_binding.consumed) return gauge
def __watts_per_sqm(self, sensor_data, timestamp, prefix): value = sensor_data.get('2', {}).get("Value", 0.0) value = value if value else 0.0 c = GaugeMetricFamily( f'{prefix}fronius_watts_per_sqm_output_W_m2_average', '', labels=['chart', 'family', 'dimension']) c.add_metric(value=value, labels=[ 'fronius_GetSensorRealtimeData.watts.per.sqm.output', 'sensors', 'wattsPerSqm' ], timestamp=timestamp) return c
def collect(self): logger.info('Scraping osta.ee for new metrics...') user_items = self.osta.get_user_items(self.user_id) for user_item in user_items: gauge = GaugeMetricFamily("osta_item_price", 'Price of an auctioned item', labels=['user_id', 'item_id', 'title']) gauge.add_metric([ str(self.user_id), str(user_item.get('itemId')), slugify(user_item.get('title')) ], float(user_item.get('currentPriceEur'))) yield gauge gauge = GaugeMetricFamily("osta_item_bids", 'Number of bids for an auctioned item', labels=['user_id', 'item_id', 'title']) gauge.add_metric([ str(self.user_id), str(user_item.get('itemId')), slugify(user_item.get('title')) ], int(user_item.get('currentBids'))) yield gauge logger.info('Scraping completed')
def collect(self): global map_of_queue found_key = "" for key in map_of_queue.keys(): if self.ip == key.split("_")[1]: found_key = key if found_key == "": return None queue = map_of_queue[found_key] summ_m = SummMessages() while not queue.empty(): summ_m.add(queue.get()) result = summ_m.get_result() metrics = [] for parameter, values in result.items(): if parameter == "linklatency": gmf = GaugeMetricFamily(parameter, "avg_linklatency", labels=['host']) elif parameter == "jitter": gmf = GaugeMetricFamily(parameter, "avg_jitter", labels=['host']) else: gmf = GaugeMetricFamily(parameter, parameter, labels=['host']) for value in values: gmf.add_metric([value['host']], value['value']) metrics.append(gmf) for metric in metrics: yield metric