def get_metric_vector(name): result = [] for metric in registry.collect(): for k, l, v in metric.samples: if k == name: result.append((l, v)) return result
def test_converge_complete(self): """ At the end of a convergence iteration, ``_CONVERGE_COMPLETE`` is updated to the current time. """ interval = 45 reactor = MemoryReactorClock() deploy_config = DeploymentConfiguration( domain=u"s4.example.com", kubernetes_namespace=u"testing", subscription_manager_endpoint=URL.from_text( u"http://localhost:8000"), s3_access_key_id=u"access key id", s3_secret_key=u"secret key", introducer_image=u"introducer:abcdefgh", storageserver_image=u"storageserver:abcdefgh", ) state_path = FilePath(self.mktemp().decode("ascii")) state_path.makedirs() subscription_client = memory_client( state_path, deploy_config.domain, ) k8s_client = KubeClient(k8s=memory_kubernetes().client()) aws_region = FakeAWSServiceRegion( access_key=deploy_config.s3_access_key_id, secret_key=deploy_config.s3_secret_key, ) d = aws_region.get_route53_client().create_hosted_zone( u"foo", deploy_config.domain, ) self.successResultOf(d) service = _convergence_service( reactor, interval, deploy_config, subscription_client, k8s_client, aws_region, ) service.startService() reactor.advance(interval) last_completed = next( iter( list(metric.samples[-1][-1] for metric in REGISTRY.collect() if metric.name == u"s4_last_convergence_succeeded"))) self.assertThat(reactor.seconds(), Equals(last_completed))
def test_converge_complete(self): """ At the end of a convergence iteration, ``_CONVERGE_COMPLETE`` is updated to the current time. """ interval = 45 reactor = MemoryReactorClock() deploy_config = DeploymentConfiguration( domain=u"s4.example.com", kubernetes_namespace=u"testing", subscription_manager_endpoint=URL.from_text(u"http://localhost:8000"), s3_access_key_id=u"access key id", s3_secret_key=u"secret key", introducer_image=u"introducer:abcdefgh", storageserver_image=u"storageserver:abcdefgh", ) state_path = FilePath(self.mktemp().decode("ascii")) state_path.makedirs() subscription_client = memory_client( state_path, deploy_config.domain, ) k8s_client = KubeClient(k8s=memory_kubernetes().client()) aws_region = FakeAWSServiceRegion( access_key=deploy_config.s3_access_key_id, secret_key=deploy_config.s3_secret_key, ) d = aws_region.get_route53_client().create_hosted_zone( u"foo", deploy_config.domain, ) self.successResultOf(d) service = _convergence_service( reactor, interval, deploy_config, subscription_client, k8s_client, aws_region, ) service.startService() reactor.advance(interval) last_completed = next(iter(list( metric.samples[-1][-1] for metric in REGISTRY.collect() if metric.name == u"s4_last_convergence_succeeded" ))) self.assertThat(reactor.seconds(), Equals(last_completed))
def getMetricVector(self, metric_name): """Returns the values for all labels of a given metric. The result is returned as a list of (labels, value) tuples, where `labels` is a dict. This is quite a hack since it relies on the internal representation of the prometheus_client, and it should probably be provided as a function there instead. """ all_metrics = REGISTRY.collect() output = [] for metric in all_metrics: for n, l, value in metric._samples: if n == metric_name: output.append((l, value)) return output
def getMetricVector(self, metric_name): """Returns the values for all labels of a given metric. The result is returned as a list of (labels, value) tuples, where `labels` is a dict. This is quite a hack since it relies on the internal representation of the prometheus_client, and it should probably be provided as a function there instead. """ all_metrics = REGISTRY.collect() output = [] for metric in all_metrics: for n, l, value in metric._samples: if n == metric_name: output.append((l, value)) return output
def test_timer(log_content): expected = { "haproxy_log_session_duration_milliseconds_count": { "cache.api.finn.no-backend": 11.0, "statistics": 2.0 }, "haproxy_log_session_duration_milliseconds_sum": { "cache.api.finn.no-backend": 19.0, "statistics": 0.0 } } t = timer("session_duration_milliseconds", ["frontend_name", "backend_name"], DEFAULT_TIMER_BUCKETS) for raw_line in log_content.splitlines(): raw_line = JOURNAL_REGEX.sub('', raw_line.strip()) line = Line(raw_line.strip()) if line.valid: t(line) for metric in REGISTRY.collect(): for name, labels, value in metric.samples: if name in expected: assert value == expected[name][labels["backend_name"]]
def get_metric(name, **labels): for metric in registry.collect(): for k, l, v, t, e in metric.samples: if k == name and l == labels: return v return None
nr_of_slots = sorted(slots)[-1] for slot in range(1, nr_of_slots + 1): # print(slot) filter_tasks = list( filter(lambda task: task['Slot'] == slot, tasks)) sorted_tasks = time_helpers.sort_tasks_by_timestamp( filter_tasks) try: if sorted_tasks[-1]['Status']['State'] == 'running': live_replicas += 1 except IndexError: if len(sorted_tasks) == 1 and sorted_tasks[0]['Status'][ 'State'] == 'running': live_replicas += 1 elif 'Global' in service.attrs['Spec']['Mode'].keys(): sorted_tasks = time_helpers.sort_tasks_by_timestamp(tasks) if sorted_tasks[-1]['Status']['State'] == 'running': live_replicas += 1 return live_replicas if __name__ == '__main__': # Start up the server to expose the metrics. start_http_server(8000) REGISTRY.register(CustomCollector()) # Generate some requests. while True: REGISTRY.collect() time.sleep(2)
def _get_counter_value(name): _metrics = [m for m in REGISTRY.collect() if m.name == name] counter = _metrics[0] sample = counter.samples[0] return sample.value