Esempio n. 1
0
def emit_histogram_metric(registry: CollectorRegistry, metric_name: str,
                          metric_description: str, seconds: float):
    """
    Emits a metric of type Histogram, that takes into account the number of times a function is called in a period of time.
    """
    try:
        # Add suffix to the metric name and prefix to the metric description
        metric_name = metric_name + "Histogram"
        metric_description = "Histogram metric description: " + metric_description

        histogram = Histogram(metric_name,
                              metric_description,
                              registry=registry)
        histogram.observe(seconds)

        @histogram.time()
        def dummy_function_with_sleep(seconds):
            """A dummy function"""
            time.sleep(seconds)

        dummy_function_with_sleep(0.1)
        dummy_function_with_sleep(0.2)
        dummy_function_with_sleep(0.3)
        dummy_function_with_sleep(0.2)
        dummy_function_with_sleep(0.1)

        ok_message("Histogram metric '{}' was created".format(metric_name))
    except Exception as error:
        error_message(
            "Error while emitting Histogram metric: {}".format(error))
Esempio n. 2
0
 def test_histogram(self):
     s = Histogram('hh', 'A histogram', registry=self.registry)
     s.observe(0.05)
     self.assertEqual(
         json.loads(
             """{"hh": {"samples": [{"sample_name": "hh_bucket", "labels": {"le": "0.005"}, 
     "value": "0.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "0.01"}, 
     "value": "0.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "0.025"}, 
     "value": "0.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "0.05"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "0.075"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "0.1"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "0.25"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "0.5"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "0.75"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "1.0"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "2.5"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "5.0"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "7.5"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "10.0"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_bucket", "labels": {"le": "+Inf"}, 
     "value": "1.0", "timestamp": null, "exemplar": {}}, {"sample_name": "hh_count", "labels": {}, "value": "1.0", 
     "timestamp": null, "exemplar": {}}, {"sample_name": "hh_sum", "labels": {}, "value": "0.05", "timestamp": 
     null, "exemplar": {}}, {"sample_name": "hh_created", "labels": {}, "value": "123.456", "timestamp": null, 
     "exemplar": {}}], "help": "A histogram", "type": "histogram"}}"""),
         json.loads(self.json_exporter.generate_latest_json()))
Esempio n. 3
0
 def time(self, key, time):
     """ Timer metric
     """
     prometheus_histogram = Histogram(  # pylint: disable=no-value-for-parameter
         key
     )
     prometheus_histogram.observe(time)
    def test_histogram(self):
        s = Histogram('hh', 'A histogram', registry=self.registry)
        s.observe(0.05)
        self.assertEqual(
            b"""# HELP hh A histogram
# TYPE hh histogram
hh_bucket{le="0.005"} 0.0
hh_bucket{le="0.01"} 0.0
hh_bucket{le="0.025"} 0.0
hh_bucket{le="0.05"} 1.0
hh_bucket{le="0.075"} 1.0
hh_bucket{le="0.1"} 1.0
hh_bucket{le="0.25"} 1.0
hh_bucket{le="0.5"} 1.0
hh_bucket{le="0.75"} 1.0
hh_bucket{le="1.0"} 1.0
hh_bucket{le="2.5"} 1.0
hh_bucket{le="5.0"} 1.0
hh_bucket{le="7.5"} 1.0
hh_bucket{le="10.0"} 1.0
hh_bucket{le="+Inf"} 1.0
hh_count 1.0
hh_sum 0.05
hh_created 123.456
# EOF
""", generate_latest(self.registry))
Esempio n. 5
0
    def test_histogram(self):
        s = Histogram("hh", "A histogram", registry=self.registry)
        s.observe(0.05)
        self.assertEqual(
            b"""# HELP hh A histogram
# TYPE hh histogram
hh_bucket{le="0.005"} 0.0
hh_bucket{le="0.01"} 0.0
hh_bucket{le="0.025"} 0.0
hh_bucket{le="0.05"} 1.0
hh_bucket{le="0.075"} 1.0
hh_bucket{le="0.1"} 1.0
hh_bucket{le="0.25"} 1.0
hh_bucket{le="0.5"} 1.0
hh_bucket{le="0.75"} 1.0
hh_bucket{le="1.0"} 1.0
hh_bucket{le="2.5"} 1.0
hh_bucket{le="5.0"} 1.0
hh_bucket{le="7.5"} 1.0
hh_bucket{le="10.0"} 1.0
hh_bucket{le="+Inf"} 1.0
hh_count 1.0
hh_sum 0.05
""",
            generate_latest(self.registry),
        )
Esempio n. 6
0
def histogram_example():
    registry = CollectorRegistry()
    h = Histogram('test_summary',
                  'Description of histogram',
                  registry=registry)
    h.observe(2)
    push_to_gateway('192.168.10.3:9091',
                    job=Histogram.__name__.lower(),
                    registry=registry)
Esempio n. 7
0
 def prometheus_solid(context):
     h = Histogram(
         'pipeline_runtime_seconds',
         'Description of histogram',
         registry=context.resources.prometheus.registry,
     )
     h.observe(4.7)
     recorded = context.resources.prometheus.registry.get_sample_value(
         'pipeline_runtime_seconds_sum')
     assert abs(4.7 - recorded) < EPS
Esempio n. 8
0
class OperationMetricSet:
    """Collection of Prometheus metrics representing a logical operation"""

    requests: Counter
    requests_duration: Histogram
    exceptions: Counter
    requests_in_progress: Gauge

    def __init__(self, operation_name: str, labels: List[str]):
        self.requests = Counter(
            f"pyncette_{operation_name}_total",
            f"Total count of {operation_name} operations",
            labels,
        )
        self.requests_duration = Histogram(
            f"pyncette_{operation_name}_duration_seconds",
            f"Histogram of {operation_name} processing time",
            labels,
        )
        self.exceptions = Counter(
            f"pyncette_{operation_name}_failures_total",
            f"Total count of failed {operation_name} failures",
            [*labels, "exception_type"],
        )
        self.requests_in_progress = Gauge(
            f"pyncette_{operation_name}_in_progress",
            f"Gauge of {operation_name} operations currently being processed",
            labels,
        )

    @contextlib.asynccontextmanager
    async def measure(self, **labels: Dict[str, str]) -> AsyncIterator[None]:
        """An async context manager that measures the execution of the wrapped code"""
        if labels:
            self.requests_in_progress.labels(**labels).inc()
            self.requests.labels(**labels).inc()
        else:
            self.requests_in_progress.inc()
            self.requests.inc()

        before_time = time.perf_counter()
        try:
            yield
        except Exception as e:
            self.exceptions.labels(**labels, exception_type=type(e).__name__).inc()
            raise e from None
        finally:
            if labels:
                self.requests_duration.labels(**labels).observe(
                    time.perf_counter() - before_time
                )
                self.requests_in_progress.labels(**labels).dec()
            else:
                self.requests_duration.observe(time.perf_counter() - before_time)
                self.requests_in_progress.dec()
Esempio n. 9
0
class TelemetryClient(object):
    @Inject
    def __init__(self, environment: SystemEnvironmentProperties):
        self.endpoint = environment.get("PROMETHEUS_GATEWAY_ENDPOINT")
        self.registry = CollectorRegistry()
        self.get_request_counter = Counter("invertpdf_get_request_count",
                                           "Number of successful GET requests",
                                           registry=self.registry)
        self.post_request_counter = Counter(
            "invertpdf_post_request_count",
            "Number of successful POST requests",
            registry=self.registry)
        self.duration_histogram = Histogram("invertpdf_request_duration_ms",
                                            "Request duration",
                                            registry=self.registry,
                                            buckets=[
                                                0, 50, 100, 200, 500, 1000,
                                                2000, 5000, 10000, 30000,
                                                60000, 1800000, 3600000
                                            ])
        self.failure_counter = Counter("invertpdf_failed_requests",
                                       "Number of failed requests",
                                       registry=self.registry)

        self.requests_in_progress = Gauge("invertpdf_requests_in_progress",
                                          "Number of pending requests",
                                          registry=self.registry)
        self.free_disk = Gauge("invertpdf_free_disk_space",
                               "Free disk space on tmpfs",
                               registry=self.registry)
        self.logger = logging.getLogger(self.__class__.__name__)

    def track_request(self, method: str, duration: int):
        self.logger.info(f"Request took {duration}ms.")
        self.duration_histogram.observe(duration)
        if method == "GET":
            self.get_request_counter.inc()
        elif method == "POST":
            self.post_request_counter.inc()

    def track_failure(self, method: str, duration: int):
        self.failure_counter.inc()
        self.duration_histogram.observe(duration)

    def track_start(self):
        self.requests_in_progress.inc()

    def track_end(self):
        self.requests_in_progress.dec()

    def submit(self):
        push_to_gateway(self.endpoint, "invertpdf", self.registry)
    def test_customize_reducer(self):
        h = Histogram('test_value', 'Testing roller', registry=self.registry)
        roller_max = HistogramRoller(h, registry=self.registry, options={
            'reducer': 'max'
        })
        roller_min = HistogramRoller(h, registry=self.registry, options={
            'reducer': 'sum'
        })

        def always_one(*args, **kwargs):
            return 1
        roller_one = HistogramRoller(h, registry=self.registry, options={
            'reducer': always_one
        })


        for state in [2.6, 4.7, 3.8, 2.8]:
            h.observe(state)
            roller_max.collect()
            roller_min.collect()
            roller_one.collect()

        # Deltas = 1, 1, 1
        nchecks = 0
        for m in self.registry.collect():
            if m.name.endswith('max_rolled'):
                for name, labels, val in m.samples:
                    if labels['le'] == '5.0':
                        nchecks += 1
                        self.assertEqual(val, 1.0)
        self.assertTrue(nchecks > 0)

        nchecks = 0
        for m in self.registry.collect():
            if m.name.endswith('sum_rolled'):
                for name, labels, val in m.samples:
                    if labels['le'] == '5.0':
                        self.assertEqual(val, 3.0)
                        nchecks += 1
        self.assertTrue(nchecks > 0)

        nchecks = 0
        for m in self.registry.collect():
            if m.name.endswith('always_one_rolled'):
                for name, labels, val in m.samples:
                    if labels['le'] == '5.0':
                        self.assertEqual(val, 1.0)
                        nchecks += 1
        self.assertTrue(nchecks > 0)
Esempio n. 11
0
    def test_histogram_negative_buckets(self):
        s = Histogram('hh', 'A histogram', buckets=[-1, -0.5, 0, 0.5, 1], registry=self.registry)
        s.observe(-0.5)
        self.assertEqual(b"""# HELP hh A histogram
# TYPE hh histogram
hh_bucket{le="-1.0"} 0.0
hh_bucket{le="-0.5"} 1.0
hh_bucket{le="0.0"} 1.0
hh_bucket{le="0.5"} 1.0
hh_bucket{le="1.0"} 1.0
hh_bucket{le="+Inf"} 1.0
hh_count 1.0
hh_created 123.456
# EOF
""", generate_latest(self.registry))
    def test_collect(self):
        h = Histogram('test_value', 'Testing roller', registry=self.registry)
        roller = HistogramRoller(h, registry=self.registry)

        # Get values
        roller.collect()

        n_buckets = 0
        for _, _, _ in self.get_hist_samples():
            n_buckets += 1

        n_created_guages = 0
        for _, _, _ in self.get_rolled_samples():
            n_created_guages += 1

        self.assertTrue(n_buckets > 0)
        self.assertTrue(n_created_guages > 0)
        self.assertEqual(n_buckets, n_created_guages)

        # Check that roller values are still 0.0 after initial collection
        for name, labels, value in self.get_rolled_samples():
            self.assertEqual(value, 0.0)

        # Add some samples
        for i in range(100):
            h.observe(pow(2, i/10 - 2))

        # Collect hisogram values
        hist_values = dict()
        for name, labels, value in self.get_hist_samples():
            hist_values[labels['le']] = value

        # Make sure they are still equal after collection
        for name, labels, value in self.get_rolled_samples():
            self.assertEqual(value, 0.0)

        roller.collect()

        for name, labels, value in self.get_rolled_samples():
            self.assertEqual(value, hist_values[labels['le']])
class FinalityInfoUpdater(ExporterPeriodicTask):
    def __init__(self, rpc, cache):
        super(FinalityInfoUpdater, self).__init__(rpc, 0.2)
        self._cache = cache
        self._gauge_final_block = Gauge('polkadot_final_block',
                                        'Number of last finalized block')

        self._gauge_finality_delay_blocks = Gauge(
            'polkadot_finality_delay_blocks',
            'Difference in blocks between head and finalized blocks')
        self._histogram_finality_delay_blocks = Histogram(
            'polkadot_finality_delay_blocks_histogram',
            'Histogram of the difference in blocks between head and '
            'finalized blocks')

    def _perform_internal(self):
        final_block_hash = self._rpc.request(
            'chain_getFinalizedHead')['result']
        latest_block_hash = self._rpc.request('chain_getBlockHash')['result']
        if final_block_hash is None:
            return

        block = self._cache.get(final_block_hash)
        check(block is not None,
              'finalized block {} must not be none'.format(final_block_hash))
        final_block_num = get_block_num(block)
        self._gauge_final_block.set(final_block_num)

        check(latest_block_hash is not None,
              'head block is absent but finalized block isn\'t')
        block = self._cache.get(latest_block_hash)
        check(block is not None,
              'head block {} must not be none'.format(latest_block_hash))
        latest_block_num = get_block_num(block)

        self._gauge_finality_delay_blocks.set(latest_block_num -
                                              final_block_num)
        self._histogram_finality_delay_blocks.observe(latest_block_num -
                                                      final_block_num)
Esempio n. 14
0
class Metrics:
    def __init__(self, port=8000):
        self.port = port

        self.fps = Gauge('fps', 'Frames per second observed by client')
        self.fps_hist = Histogram('fps_hist',
                                  'Histogram of FPS observed by client',
                                  buckets=FPS_HIST_BUCKETS)
        self.gpu_utilization = Gauge('gpu_utilization',
                                     'Utilization percentage reported by GPU')
        self.latency = Gauge('latency', 'Latency observed by client')

    def set_fps(self, fps):
        self.fps.set(fps)
        self.fps_hist.observe(fps)

    def set_gpu_utilization(self, utilization):
        self.gpu_utilization.set(utilization)

    def set_latency(self, latency_ms):
        self.latency.set(latency_ms)

    def start(self):
        start_http_server(self.port)
Esempio n. 15
0
    def test_histogram_exemplar(self):
        s = Histogram('hh', 'A histogram', buckets=[1, 2, 3, 4], registry=self.registry)
        s.observe(0.5, {'a': 'b'})
        s.observe(1.5, {'le': '7'})
        s.observe(2.5, {'a': 'b'})
        s.observe(3.5, {'a': '\n"\\'})
        print(generate_latest(self.registry))
        self.assertEqual(b"""# HELP hh A histogram
# TYPE hh histogram
hh_bucket{le="1.0"} 1.0 # {a="b"} 0.5 123.456
hh_bucket{le="2.0"} 2.0 # {le="7"} 1.5 123.456
hh_bucket{le="3.0"} 3.0 # {a="b"} 2.5 123.456
hh_bucket{le="4.0"} 4.0 # {a="\\n\\"\\\\"} 3.5 123.456
hh_bucket{le="+Inf"} 4.0
hh_count 4.0
hh_sum 8.0
hh_created 123.456
# EOF
""", generate_latest(self.registry))
Esempio n. 16
0
counter = Counter('sobi3ch_counter', 'Description of a counter')
gauge = Gauge('sobi3ch_gauge', 'Description of gauge')
gauge.set(50)
SUMMARY = Summary('sobi3ch_summary_request_processing_seconds',
                  'Time spent processing request')
histogram = Histogram('sobi3ch_histogram_request_latency_seconds',
                      'Description of histogram')


# Decorate function with metric.
@SUMMARY.time()
def process_request(t):
    """A dummy function that takes some time."""
    time.sleep(t)


if __name__ == '__main__':
    # Start up the server to expose the metrics.
    start_http_server(8000)
    # Generate some requests.
    while True:
        r = random.random()
        process_request(r)
        if r > 0.8:
            counter.inc()
        if r < 0.5:
            gauge.inc()  # Increment by 1
        else:
            gauge.dec()
        histogram.observe(4.7)  # Observe 4.7 (seconds in this case)
from prometheus_client import Histogram
from prometheus_client import start_http_server, Summary
import random
import time

h = Histogram('request_latency_seconds', 'Description of histogram')
h.observe(4.7)  # Observe 4.7 (seconds in this case)


@h.time()
def f():
    pass


with h.time():
    pass

if __name__ == '__main__':
    # Start up the server to expose the metrics.
    start_http_server(8000)
    # Generate some requests.

    while True:
        h.observe(random.uniform(0, 5))
'''
http://localhost:8000/

# HELP python_gc_collected_objects Objects collected during gc
# TYPE python_gc_collected_objects histogram
# HELP python_gc_uncollectable_objects Uncollectable object found during GC
# TYPE python_gc_uncollectable_objects histogram
Esempio n. 18
0
class Source:
    @check_type
    def __init__(
        self,
        tap,
        tap_schema: dict = None,
        tap_name: str = None,
        tap_key: str = None,
        port: int = 8000,
    ):
        """
        Parameters
        ----------
        tap: str / object
            tap source.
        tap_schema: Dict, (default=None)
            data schema if tap an object. If `tap_schema` is None, it will auto generate schema.
        tap_name: str, (default=None)
            name for tap, necessary if tap is an object. it will throw an error if not a string if tap is an object.
        tap_key: str, (default=None)
            important non-duplicate key from `tap.emit()`, usually a timestamp.
        port: int, (default=8000)
            prometheus exporter port.
        """
        if not isinstance(tap, str) and not hasattr(tap, 'emit'):
            raise ValueError(
                'tap must a string or an object with method `emit`')

        if hasattr(tap, '__dict__'):
            self.tap = helper.Tap(
                tap,
                tap_schema=tap_schema,
                tap_name=tap_name,
                tap_key=tap_key,
            )
            f = tap_name
            self.tap_schema = tap_schema
        else:
            self.tap = tap
            self.tap_schema = None
            f = tap
        self._targets = []
        start_http_server(port)
        f = function.parse_name(f)

        self._tap_count = Counter(f'total_{f}', f'total rows {f}')
        self._tap_data = Summary(f'data_size_{f}',
                                 f'summary of data size {f} (KB)')
        self._tap_data_histogram = Histogram(
            f'data_size_histogram_{f}', f'histogram of data size {f} (KB)')

    def add(self, target):
        """
        Parameters
        ----------
        target: str / object
            target source.
        """
        if not isinstance(target, str) and not hasattr(target, 'parse'):
            raise ValueError(
                'target must a string or an object with method `parse`')

        if isinstance(target, str):
            if '.py' in target:
                target = f'python3 {target}'
        self._targets.append(target)

    def get_targets(self):
        """
        Returns
        ----------
        result: list of targets
        """
        return self._targets

    @check_type
    def delete_target(self, index: int):
        """
        Parameters
        ----------
        index: int
            target index from `get_targets()`.
        """
        self._targets.pop(index)

    @check_type
    def start(
        self,
        transformation: Callable = None,
        asynchronous: bool = False,
        debug: bool = True,
        ignore_null: bool = True,
        graceful_shutdown: int = 30,
    ):
        """
        Parameters
        ----------
        transformation: Callable, (default=None)
            a callable variable to transform tap data, this will auto generate new data schema.
        asynchronous: bool, (default=False)
            If True, emit to targets in async manner, else, loop from first target until last target.
        debug: bool, (default=True)
            If True, will print every rows emitted and parsed.
        ignore_null: bool, (default=True)
            If False, if one of schema value is Null, it will throw an exception.
        graceful_shutdown: int, (default=30)
            If bigger than 0, any error happened, will automatically shutdown after sleep.
        """
        if graceful_shutdown < 0:
            raise ValueError('`graceful_shutdown` must bigger than -1')
        if not len(self._targets):
            raise Exception(
                'targets are empty, please add a target using `source.add()` first.'
            )
        self._pipes = []
        for target in self._targets:
            if isinstance(target, str):
                p = Popen(target.split(), stdout=PIPE, stdin=PIPE, stderr=PIPE)
                t = helper.Check_Error(p, graceful_shutdown)
                t.start()
            else:
                p = target

            self._pipes.append(helper.Target(p, target))

        if isinstance(self.tap, str):
            pse = Popen(self.tap.split(), stdout=PIPE, stdin=PIPE, stderr=PIPE)
            t = helper.Check_Error(pse, graceful_shutdown)
            t.start()

            pse = iter(pse.stdout.readline, b'')
        else:
            pse = self.tap
            self.tap.tap.count = 0

        if transformation:
            from genson import SchemaBuilder

            builder = SchemaBuilder()
            builder.add_schema({'type': 'object', 'properties': {}})
        else:
            builder = None

        try:
            for lines in pse:
                if lines is None:
                    break
                if isinstance(lines, bytes):
                    lines = [lines]
                if transformation:
                    lines = helper.transformation(
                        lines,
                        builder,
                        transformation,
                        tap_schema=self.tap_schema,
                    )
                for line in lines:
                    line = line.decode().strip()
                    if len(line):
                        if debug:
                            logger.info(line)

                        if '"type": "SCHEMA"' in line and not ignore_null:
                            l = json.loads(line)
                            for k, v in l['schema']['properties'].items():
                                if v['type'].lower() == 'null':
                                    raise ValueError(
                                        f'{k} is a NULL, some of database cannot accept NULL schema. To ignore this exception, simply set `ignore_null` = True.'
                                    )

                        self._tap_count.inc()
                        self._tap_data.observe(sys.getsizeof(line) / 1000)
                        self._tap_data_histogram.observe(
                            sys.getsizeof(line) / 1000)

                        if asynchronous:

                            @gen.coroutine
                            def loop():
                                r = yield [
                                    _sinking(line, pipe)
                                    for pipe in self._pipes
                                ]

                            result = loop()
                            if debug:
                                logger.info(result.result())

                        else:
                            for pipe in self._pipes:
                                result = _sinking(line, pipe)
                                if debug:
                                    logger.info(result.result())

                        if '"type": "RECORD"' in line and not isinstance(
                                self.tap, str):
                            self.tap.tap.count += 1

            for pipe in self._pipes:
                if isinstance(pipe.target, Popen):
                    try:
                        pipe.target.communicate()
                    except:
                        pass

        except Exception as e:
            if graceful_shutdown > 0:
                logger.error(e)
                time.sleep(graceful_shutdown)
                os._exit(1)
            else:
                raise Exception(e)
    result_ = result.read()
    # for line in result_.splitlines():  
    #    print(line)
    arg_2 += 1
    times = time.time()
    # print(time.time() - times)
    print("result is", result_.splitlines()[1][-126:-62])
    hash_ = result_.splitlines()[1][-126:-62]

    while(True):
        time_1 = time.time()
        sub = subprocess.Popen('./docker2.sh ' + str(hash_), shell=True, stdout=subprocess.PIPE)
        print(time.time() - time_1)
        # sub = subprocess.Popen('./docker2.sh ' + "0x4910470174017491004179012", shell=True, stdout=subprocess.PIPE)
        content = str(sub.stdout.read())
        # print(time.time() - time_1)
        if "txID" in content:
            print("false")
            pass
        else:
            latency = time.time() - times
            print(latency)
            ard_histogram.observe(latency)
            ard_gauge.set(latency)
            break
    time.sleep(20)
    



def process_request(t):
    """A dummy function that takes some time."""
    time.sleep(t)

if __name__ == '__main__':
    # Start up the server to expose the metrics.
    start_http_server(8111)

    # examples for counter/gauge/summary/histogram
    c = Counter('myfake_failures_total', 'Description of counter')
    g = Gauge('myfake_inprogress_requests', 'Description of gauge')
    s = Summary('myfake_summary_request_latency_seconds', 'Description of summary')
    h = Histogram('myfake_histogram_request_latency_seconds', 'Description of histogram')
    while True:
        # counter example
        c.inc()  # Increment by 1
        # c.inc(random.random())  # Increment by given value

        # gauge example
        g.inc()  # Increment by 1
        # g.dec(10)  # Decrement by given value
        # g.set(4.2)  # Set to a given value

        # summary example
        s.observe(1.1)  # Observe 1.1 (seconds in this case)
        # Generate some requests.
        process_request(random.random())

        # histogram example
        h.observe(4.7)  # Observe 4.7 (seconds in this case)
Esempio n. 21
0
from flask import Response, Flask, request
import prometheus_client
from prometheus_client import Summary, Counter, Histogram, Gauge

# Flask
app = Flask(__name__)

# Example values
example_summary = Summary('alicek106_summary', 'Summary example')
example_summary.observe(5.5)

example_histogram = Histogram('alicek106_histogram', 'Histogram example', buckets=(1, 5, 10, 50, 100, 200, 500, 1000))
example_histogram.observe(1)
example_histogram.observe(5)
example_histogram.observe(10)
example_histogram.observe(100)

example_counter = Counter('alicek106_counter', 'Counter example')

rate_example_gauge = Gauge('alicek106_gauge', 'Gauge example for rate()')
rate_example_gauge.set(5)
initial_value = 5
offset_value = 5

## group_left example. It should be 'counter' type if you want to use.

methods = ["get", "get", "put", "post", "post"]
queries = [500, 404, 501, 500, 404]
values = [24, 30, 3, 6, 21]
error_gauge = Gauge('alicek106_http_errors', 'Test', ['method', 'code'])
for i in range(0, len(methods)):
Esempio n. 22
0
class TestHistogram(unittest.TestCase):
    def setUp(self):
        self.registry = CollectorRegistry()
        self.histogram = Histogram('h', 'help', registry=self.registry)
        self.labels = Histogram('hl', 'help', ['l'], registry=self.registry)

    def test_histogram(self):
        self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
        self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
        self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
        self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
        self.assertEqual(0, self.registry.get_sample_value('h_count'))
        self.assertEqual(0, self.registry.get_sample_value('h_sum'))

        self.histogram.observe(2)
        self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
        self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
        self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
        self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
        self.assertEqual(1, self.registry.get_sample_value('h_count'))
        self.assertEqual(2, self.registry.get_sample_value('h_sum'))

        self.histogram.observe(2.5)
        self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
        self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
        self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
        self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
        self.assertEqual(2, self.registry.get_sample_value('h_count'))
        self.assertEqual(4.5, self.registry.get_sample_value('h_sum'))

        self.histogram.observe(float("inf"))
        self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
        self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
        self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
        self.assertEqual(3, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
        self.assertEqual(3, self.registry.get_sample_value('h_count'))
        self.assertEqual(float("inf"), self.registry.get_sample_value('h_sum'))

    def test_setting_buckets(self):
        h = Histogram('h', 'help', registry=None, buckets=[0, 1, 2])
        self.assertEqual([0.0, 1.0, 2.0, float("inf")], h._upper_bounds)

        h = Histogram('h', 'help', registry=None, buckets=[0, 1, 2, float("inf")])
        self.assertEqual([0.0, 1.0, 2.0, float("inf")], h._upper_bounds)

        self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[])
        self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[float("inf")])
        self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[3, 1])

    def test_labels(self):
        self.labels.labels('a').observe(2)
        self.assertEqual(0, self.registry.get_sample_value('hl_bucket', {'le': '1.0', 'l': 'a'}))
        self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '2.5', 'l': 'a'}))
        self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '5.0', 'l': 'a'}))
        self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '+Inf', 'l': 'a'}))
        self.assertEqual(1, self.registry.get_sample_value('hl_count', {'l': 'a'}))
        self.assertEqual(2, self.registry.get_sample_value('hl_sum', {'l': 'a'}))

    def test_function_decorator(self):
        self.assertEqual(0, self.registry.get_sample_value('h_count'))
        self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))

        @self.histogram.time()
        def f():
            pass

        f()
        self.assertEqual(1, self.registry.get_sample_value('h_count'))
        self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))

    def test_block_decorator(self):
        self.assertEqual(0, self.registry.get_sample_value('h_count'))
        self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
        with self.histogram.time():
            pass
        self.assertEqual(1, self.registry.get_sample_value('h_count'))
        self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
Esempio n. 23
0
class PrometheusMonitor(Monitor):
    """
    Prometheus Faust Sensor.

    This sensor, records statistics using prometheus_client and expose
    them using the aiohttp server running under /metrics by default

    Usage:
        import faust
        from faust.sensors.prometheus import PrometheusMonitor

        app = faust.App('example', broker='kafka://')
        app.monitor = PrometheusMonitor(app, pattern='/metrics')
    """

    ERROR = 'error'
    COMPLETED = 'completed'
    KEYS_RETRIEVED = 'keys_retrieved'
    KEYS_UPDATED = 'keys_updated'
    KEYS_DELETED = 'keys_deleted'

    def __init__(self,
                 app: AppT,
                 pattern: str = '/metrics',
                 **kwargs: Any) -> None:
        self.app = app
        self.pattern = pattern

        if prometheus_client is None:
            raise ImproperlyConfigured(
                'prometheus_client requires `pip install prometheus_client`.')

        self._initialize_metrics()
        self.expose_metrics()
        super().__init__(**kwargs)

    def _initialize_metrics(self) -> None:
        """
        Initialize Prometheus metrics
        """
        # On message received
        self.messages_received = Counter('messages_received',
                                         'Total messages received')
        self.active_messages = Gauge('active_messages',
                                     'Total active messages')
        self.messages_received_per_topics = Counter(
            'messages_received_per_topic', 'Messages received per topic',
            ['topic'])
        self.messages_received_per_topics_partition = Gauge(
            'messages_received_per_topics_partition',
            'Messages received per topic/partition', ['topic', 'partition'])
        self.events_runtime_latency = Histogram('events_runtime_ms',
                                                'Events runtime in ms')

        # On Event Stream in
        self.total_events = Counter('total_events', 'Total events received')
        self.total_active_events = Gauge('total_active_events',
                                         'Total active events')
        self.total_events_per_stream = Counter('total_events_per_stream',
                                               'Events received per Stream',
                                               ['stream'])

        # On table changes get/set/del keys
        self.table_operations = Counter('table_operations',
                                        'Total table operations',
                                        ['table', 'operation'])

        # On message send
        self.topic_messages_sent = Counter('topic_messages_sent',
                                           'Total messages sent per topic',
                                           ['topic'])
        self.total_sent_messages = Counter('total_sent_messages',
                                           'Total messages sent')
        self.producer_send_latency = Histogram('producer_send_latency',
                                               'Producer send latency in ms')
        self.total_error_messages_sent = Counter('total_error_messages_sent',
                                                 'Total error messages sent')
        self.producer_error_send_latency = Histogram(
            'producer_error_send_latency', 'Producer error send latency in ms')

        # Assignment
        self.assignment_operations = Counter(
            'assignment_operations',
            'Total assigment operations (completed/error)', ['operation'])
        self.assign_latency = Histogram('assign_latency',
                                        'Assignment latency in ms')

        # Revalances
        self.total_rebalances = Gauge('total_rebalances', 'Total rebalances')
        self.total_rebalances_recovering = Gauge(
            'total_rebalances_recovering', 'Total rebalances recovering')
        self.revalance_done_consumer_latency = Histogram(
            'revalance_done_consumer_latency',
            'Consumer replying that rebalance is done to broker in ms')
        self.revalance_done_latency = Histogram(
            'revalance_done_latency', 'Revalance finished latency in ms')

        # Count Metrics by name
        self.count_metrics_by_name = Gauge('metrics_by_name',
                                           'Total metrics by name', ['metric'])

        # Web
        self.http_status_codes = Counter('http_status_codes',
                                         'Total http_status code',
                                         ['status_code'])
        self.http_latency = Histogram('http_latency',
                                      'Http response latency in ms')

        # Topic/Partition Offsets
        self.topic_partition_end_offset = Gauge(
            'topic_partition_end_offset', 'Offset ends per topic/partition',
            ['topic', 'partition'])
        self.topic_partition_offset_commited = Gauge(
            'topic_partition_offset_commited',
            'Offset commited per topic/partition', ['topic', 'partition'])
        self.consumer_commit_latency = Histogram(
            'consumer_commit_latency', 'Consumer commit latency in ms')

    def on_message_in(self, tp: TP, offset: int, message: Message) -> None:
        """Call before message is delegated to streams."""
        super().on_message_in(tp, offset, message)

        self.messages_received.inc()
        self.active_messages.inc()
        self.messages_received_per_topics.labels(topic=tp.topic).inc()
        self.messages_received_per_topics_partition.labels(
            topic=tp.topic, partition=tp.partition).set(offset)

    def on_stream_event_in(self, tp: TP, offset: int, stream: StreamT,
                           event: EventT) -> typing.Optional[typing.Dict]:
        """Call when stream starts processing an event."""
        state = super().on_stream_event_in(tp, offset, stream, event)
        self.total_events.inc()
        self.total_active_events.inc()
        self.total_events_per_stream.labels(
            stream=f'stream.{self._stream_label(stream)}.events').inc()

        return state

    def _stream_label(self, stream: StreamT) -> str:
        return self._normalize(
            stream.shortlabel.lstrip('Stream:'), ).strip('_').lower()

    def on_stream_event_out(self,
                            tp: TP,
                            offset: int,
                            stream: StreamT,
                            event: EventT,
                            state: typing.Dict = None) -> None:
        """Call when stream is done processing an event."""
        super().on_stream_event_out(tp, offset, stream, event, state)
        self.total_active_events.dec()
        self.events_runtime_latency.observe(
            self.secs_to_ms(self.events_runtime[-1]))

    def on_message_out(self, tp: TP, offset: int, message: Message) -> None:
        """Call when message is fully acknowledged and can be committed."""
        super().on_message_out(tp, offset, message)
        self.active_messages.dec()

    def on_table_get(self, table: CollectionT, key: typing.Any) -> None:
        """Call when value in table is retrieved."""
        super().on_table_get(table, key)
        self.table_operations.labels(table=f'table.{table.name}',
                                     operation=self.KEYS_RETRIEVED).inc()

    def on_table_set(self, table: CollectionT, key: typing.Any,
                     value: typing.Any) -> None:
        """Call when new value for key in table is set."""
        super().on_table_set(table, key, value)
        self.table_operations.labels(table=f'table.{table.name}',
                                     operation=self.KEYS_UPDATED).inc()

    def on_table_del(self, table: CollectionT, key: typing.Any) -> None:
        """Call when key in a table is deleted."""
        super().on_table_del(table, key)
        self.table_operations.labels(table=f'table.{table.name}',
                                     operation=self.KEYS_DELETED).inc()

    def on_commit_completed(self, consumer: ConsumerT,
                            state: typing.Any) -> None:
        """Call when consumer commit offset operation completed."""
        super().on_commit_completed(consumer, state)
        self.consumer_commit_latency.observe(
            self.ms_since(typing.cast(float, state)))

    def on_send_initiated(self, producer: ProducerT, topic: str,
                          message: PendingMessage, keysize: int,
                          valsize: int) -> typing.Any:
        """Call when message added to producer buffer."""
        self.topic_messages_sent.labels(topic=f'topic.{topic}').inc()

        return super().on_send_initiated(producer, topic, message, keysize,
                                         valsize)

    def on_send_completed(self, producer: ProducerT, state: typing.Any,
                          metadata: RecordMetadata) -> None:
        """Call when producer finished sending message."""
        super().on_send_completed(producer, state, metadata)
        self.total_sent_messages.inc()
        self.producer_send_latency.observe(
            self.ms_since(typing.cast(float, state)))

    def on_send_error(self, producer: ProducerT, exc: BaseException,
                      state: typing.Any) -> None:
        """Call when producer was unable to publish message."""
        super().on_send_error(producer, exc, state)
        self.total_error_messages_sent.inc()
        self.producer_error_send_latency.observe(
            self.ms_since(typing.cast(float, state)))

    def on_assignment_error(self, assignor: PartitionAssignorT,
                            state: typing.Dict, exc: BaseException) -> None:
        """Partition assignor did not complete assignor due to error."""
        super().on_assignment_error(assignor, state, exc)
        self.assignment_operations.labels(operation=self.ERROR).inc()
        self.assign_latency.observe(self.ms_since(state['time_start']))

    def on_assignment_completed(self, assignor: PartitionAssignorT,
                                state: typing.Dict) -> None:
        """Partition assignor completed assignment."""
        super().on_assignment_completed(assignor, state)
        self.assignment_operations.labels(operation=self.COMPLETED).inc()
        self.assign_latency.observe(self.ms_since(state['time_start']))

    def on_rebalance_start(self, app: AppT) -> typing.Dict:
        """Cluster rebalance in progress."""
        state = super().on_rebalance_start(app)
        self.total_rebalances.inc()

        return state

    def on_rebalance_return(self, app: AppT, state: typing.Dict) -> None:
        """Consumer replied assignment is done to broker."""
        super().on_rebalance_return(app, state)
        self.total_rebalances.dec()
        self.total_rebalances_recovering.inc()
        self.revalance_done_consumer_latency.observe(
            self.ms_since(state['time_return']))

    def on_rebalance_end(self, app: AppT, state: typing.Dict) -> None:
        """Cluster rebalance fully completed (including recovery)."""
        super().on_rebalance_end(app, state)
        self.total_rebalances_recovering.dec()
        self.revalance_done_latency.observe(self.ms_since(state['time_end']))

    def count(self, metric_name: str, count: int = 1) -> None:
        """Count metric by name."""
        super().count(metric_name, count=count)
        self.count_metrics_by_name.labels(metric=metric_name).inc(count)

    def on_tp_commit(self, tp_offsets: TPOffsetMapping) -> None:
        """Call when offset in topic partition is committed."""
        super().on_tp_commit(tp_offsets)
        for tp, offset in tp_offsets.items():
            self.topic_partition_offset_commited.labels(
                topic=tp.topic, partition=tp.partition).set(offset)

    def track_tp_end_offset(self, tp: TP, offset: int) -> None:
        """Track new topic partition end offset for monitoring lags."""
        super().track_tp_end_offset(tp, offset)
        self.topic_partition_end_offset.labels(
            topic=tp.topic, partition=tp.partition).set(offset)

    def on_web_request_end(self,
                           app: AppT,
                           request: web.Request,
                           response: typing.Optional[web.Response],
                           state: typing.Dict,
                           *,
                           view: web.View = None) -> None:
        """Web server finished working on request."""
        super().on_web_request_end(app, request, response, state, view=view)
        status_code = int(state['status_code'])
        self.http_status_codes.labels(status_code=status_code).inc()
        self.http_latency.observe(self.ms_since(state['time_end']))

    def expose_metrics(self) -> None:
        """Expose promethues metrics using the current aiohttp application."""
        @self.app.page(self.pattern)
        async def metrics_handler(self: _web.View,
                                  request: _web.Request) -> _web.Response:
            headers = {
                'Content-Type': 'text/plain; version=0.0.4; charset=utf-8',
            }

            return cast(
                _web.Response,
                Response(body=generate_latest(REGISTRY),
                         headers=headers,
                         status=200))
Esempio n. 24
0
#!/usr/bin/python

from prometheus_client import Counter, Gauge, Summary, Histogram, start_http_server

# need install prometheus_client

if __name__ == '__main__':
    c = Counter('cc', 'A counter')
    c.inc()

    g = Gauge('gg', 'A gauge')
    g.set(17)

    s = Summary('ss', 'A summary', ['a', 'b'])
    s.labels('c', 'd').observe(17)

    h = Histogram('hh', 'A histogram')
    h.observe(.6)

    start_http_server(8000)
    import time

    while True:
        time.sleep(1)
Esempio n. 25
0
from prometheus_client import start_http_server, Summary, Histogram
import random
import time

#REQUEST_TIME = Summary('request_processing_seconds','Time spent processing request')
#s = Summary('request_latency_seconds', 'Description of summary')
h = Histogram('request_latency_seconds','Description of histogram')
h.observe(4.7)
#s.observe(4.7)
def process_request(t):
	"""A dummy function that takes some time."""
	time.sleep(t)

if __name__=='__main__':
	start_http_server(8800)

	while True:
		process_request(random.random())

Esempio n. 26
0
                        help='a URL to probe')
    return parser.parse_args()


def do_transact(url):
    logging.info('url:{}'.format(url))
    start = time.time()
    try:
        r = requests.get(url)
    except Exception as e:
        logging.warning("url fetch error:{}".format(e))
        return -1
    end = time.time()
    return end - start


args = get_cmd_opts()
# xact_time = Gauge('http_get_time', 'Time to look up URL {}'.format(args.url))
xact_time = Histogram('http_get_time_seconds', 'Time to lookup URL {}'.format(args.url))
xact_gauge = Gauge('http_get_time_seconds', 'Time to lookup URL {}'.format(args.url))

if __name__ == "__main__":
    port = int(os.getenv("PORT", default=8000))
    logging.info("Starting; listening on {} target is {}".format(port, args.url))
    start_http_server(port)
    while True:
        t_time = do_transact(args.url)
        xact_time.observe(t_time)
        xact_gauge.set(t_time)
        time.sleep(30)
Esempio n. 27
0
class TestHistogram(unittest.TestCase):
    def setUp(self):
        self.registry = CollectorRegistry()
        self.histogram = Histogram('h', 'help', registry=self.registry)
        self.labels = Histogram('hl', 'help', ['l'], registry=self.registry)

    def test_histogram(self):
        self.assertEqual(
            0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
        self.assertEqual(
            0, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
        self.assertEqual(
            0, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
        self.assertEqual(
            0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
        self.assertEqual(0, self.registry.get_sample_value('h_count'))
        self.assertEqual(0, self.registry.get_sample_value('h_sum'))

        self.histogram.observe(2)
        self.assertEqual(
            0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
        self.assertEqual(
            1, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
        self.assertEqual(
            1, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
        self.assertEqual(
            1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
        self.assertEqual(1, self.registry.get_sample_value('h_count'))
        self.assertEqual(2, self.registry.get_sample_value('h_sum'))

        self.histogram.observe(2.5)
        self.assertEqual(
            0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
        self.assertEqual(
            2, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
        self.assertEqual(
            2, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
        self.assertEqual(
            2, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
        self.assertEqual(2, self.registry.get_sample_value('h_count'))
        self.assertEqual(4.5, self.registry.get_sample_value('h_sum'))

        self.histogram.observe(float("inf"))
        self.assertEqual(
            0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
        self.assertEqual(
            2, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
        self.assertEqual(
            2, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
        self.assertEqual(
            3, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
        self.assertEqual(3, self.registry.get_sample_value('h_count'))
        self.assertEqual(float("inf"), self.registry.get_sample_value('h_sum'))

    def test_setting_buckets(self):
        h = Histogram('h', 'help', registry=None, buckets=[0, 1, 2])
        self.assertEqual([0.0, 1.0, 2.0, float("inf")], h._upper_bounds)

        h = Histogram('h',
                      'help',
                      registry=None,
                      buckets=[0, 1, 2, float("inf")])
        self.assertEqual([0.0, 1.0, 2.0, float("inf")], h._upper_bounds)

        self.assertRaises(ValueError,
                          Histogram,
                          'h',
                          'help',
                          registry=None,
                          buckets=[])
        self.assertRaises(ValueError,
                          Histogram,
                          'h',
                          'help',
                          registry=None,
                          buckets=[float("inf")])
        self.assertRaises(ValueError,
                          Histogram,
                          'h',
                          'help',
                          registry=None,
                          buckets=[3, 1])

    def test_labels(self):
        self.labels.labels('a').observe(2)
        self.assertEqual(
            0,
            self.registry.get_sample_value('hl_bucket', {
                'le': '1.0',
                'l': 'a'
            }))
        self.assertEqual(
            1,
            self.registry.get_sample_value('hl_bucket', {
                'le': '2.5',
                'l': 'a'
            }))
        self.assertEqual(
            1,
            self.registry.get_sample_value('hl_bucket', {
                'le': '5.0',
                'l': 'a'
            }))
        self.assertEqual(
            1,
            self.registry.get_sample_value('hl_bucket', {
                'le': '+Inf',
                'l': 'a'
            }))
        self.assertEqual(
            1, self.registry.get_sample_value('hl_count', {'l': 'a'}))
        self.assertEqual(2,
                         self.registry.get_sample_value('hl_sum', {'l': 'a'}))

    def test_function_decorator(self):
        self.assertEqual(0, self.registry.get_sample_value('h_count'))
        self.assertEqual(
            0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))

        @self.histogram.time()
        def f():
            pass

        f()
        self.assertEqual(1, self.registry.get_sample_value('h_count'))
        self.assertEqual(
            1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))

    def test_block_decorator(self):
        self.assertEqual(0, self.registry.get_sample_value('h_count'))
        self.assertEqual(
            0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
        with self.histogram.time():
            pass
        self.assertEqual(1, self.registry.get_sample_value('h_count'))
        self.assertEqual(
            1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))