コード例 #1
0
    def test_fallback_to_state_machine_label_parsing(self):
        from unittest.mock import patch

        from prometheus_client.openmetrics.parser import _parse_sample

        parse_sample_function = "prometheus_client.openmetrics.parser._parse_sample"
        parse_labels_function = "prometheus_client.openmetrics.parser._parse_labels"
        parse_remaining_function = "prometheus_client.openmetrics.parser._parse_remaining_text"
        state_machine_function = "prometheus_client.openmetrics.parser._parse_labels_with_state_machine"

        parse_sample_return_value = Sample("a_total", {"foo": "foo # bar"}, 1)
        with patch(parse_sample_function, return_value=parse_sample_return_value) as mock:
            families = text_string_to_metric_families("""# TYPE a counter
# HELP a help
a_total{foo="foo # bar"} 1
# EOF
""")
            a = CounterMetricFamily("a", "help", labels=["foo"])
            a.add_metric(["foo # bar"], 1)
            self.assertEqual([a], list(families))
            mock.assert_called_once_with('a_total{foo="foo # bar"} 1')

        # First fallback case
        state_machine_return_values = [{"foo": "foo # bar"}, len('foo="foo # bar"}')]
        parse_remaining_values = [1, None, None]
        with patch(parse_labels_function) as mock1:
            with patch(state_machine_function, return_value=state_machine_return_values) as mock2:
                with patch(parse_remaining_function, return_value=parse_remaining_values) as mock3:
                    sample = _parse_sample('a_total{foo="foo # bar"} 1')
                    s = Sample("a_total", {"foo": "foo # bar"}, 1)
                    self.assertEqual(s, sample)
                    mock1.assert_not_called()
                    mock2.assert_called_once_with('foo="foo # bar"} 1')
                    mock3.assert_called_once_with('1')

        # Second fallback case
        state_machine_return_values = [{"le": "1.0"}, len('le="1.0"}')]
        parse_remaining_values = [0.0, Timestamp(123, 0), Exemplar({"a": "b"}, 0.5)]
        with patch(parse_labels_function) as mock1:
            with patch(state_machine_function, return_value=state_machine_return_values) as mock2:
                with patch(parse_remaining_function, return_value=parse_remaining_values) as mock3:
                    sample = _parse_sample('a_bucket{le="1.0"} 0 123 # {a="b"} 0.5')
                    s = Sample("a_bucket", {"le": "1.0"}, 0.0, Timestamp(123, 0), Exemplar({"a": "b"}, 0.5))
                    self.assertEqual(s, sample)
                    mock1.assert_not_called()
                    mock2.assert_called_once_with('le="1.0"} 0 123 # {a="b"} 0.5')
                    mock3.assert_called_once_with('0 123 # {a="b"} 0.5')

        # No need to fallback case
        parse_labels_return_values = {"foo": "foo#bar"}
        parse_remaining_values = [1, None, None]
        with patch(parse_labels_function, return_value=parse_labels_return_values) as mock1:
            with patch(state_machine_function) as mock2:
                with patch(parse_remaining_function, return_value=parse_remaining_values) as mock3:
                    sample = _parse_sample('a_total{foo="foo#bar"} 1')
                    s = Sample("a_total", {"foo": "foo#bar"}, 1)
                    self.assertEqual(s, sample)
                    mock1.assert_called_once_with('foo="foo#bar"')
                    mock2.assert_not_called()
                    mock3.assert_called_once_with('1')
コード例 #2
0
def test_collect_metric_using_service_method():
    mocks = create_session_mocks_using_service_method("describe_instances", [
        {
            "Reservations": [{"Instances": [{"InstanceId": "instance_id_1"}, {"InstanceId": "instance_id_2"}, ]}],
            "NextToken": "123abc"
        },
        {
            "Reservations": [{"Instances": [{"InstanceId": "instance_id_3"}]}],
            "NextToken": None
        },
    ])
    metrics = parse_aws_metrics(SINGLE_METRIC_YAML_WITH_PAGINATOR_WITH_SERVICE_METHOD)
    collector = AwsMetricsCollector(metrics, mocks.session)
    collector.update()
    gauge_family = list(collector.collect())[0]
    mocks.session.client.assert_called_once_with("ec2")
    mocks.service.describe_instances.assert_has_calls([
        call(Filters=[{
            "Name": "instance-state-name",
            "Values": ["Running"],
        }]),
        call(Filters=[{
            "Name": "instance-state-name",
            "Values": ["Running"],
        }], NextToken="123abc")
    ])
    assert gauge_family.samples == [
        Sample("ec2_instance_ids", {"id": "instance_id_1"}, 1),
        Sample("ec2_instance_ids", {"id": "instance_id_2"}, 1),
        Sample("ec2_instance_ids", {"id": "instance_id_3"}, 1)
    ]
コード例 #3
0
    def test_collector_to_metric_histogram(self):
        registry = mock.Mock()
        options = prometheus.Options("test1", 8001, "localhost", registry)
        collector = prometheus.Collector(options=options)
        collector.register_view(VIDEO_SIZE_VIEW)
        desc = collector.registered_views[list(REGISTERED_VIEW)[0]]
        distribution = copy.deepcopy(VIDEO_SIZE_DISTRIBUTION.aggregation_data)
        distribution.add_sample(280.0 * MiB, None, None)
        metric = collector.to_metric(
            desc=desc,
            tag_values=[tag_value_module.TagValue("ios")],
            agg_data=distribution)

        self.assertEqual(desc['name'], metric.name)
        self.assertEqual(desc['documentation'], metric.documentation)
        self.assertEqual('histogram', metric.type)
        expected_samples = [
            Sample(metric.name + '_bucket', {
                "myorg_keys_frontend": "ios",
                "le": str(16.0 * MiB)
            }, 0),
            Sample(metric.name + '_bucket', {
                "myorg_keys_frontend": "ios",
                "le": str(256.0 * MiB)
            }, 0),
            Sample(metric.name + '_bucket', {
                "myorg_keys_frontend": "ios",
                "le": "+Inf"
            }, 1),
            Sample(metric.name + '_count', {"myorg_keys_frontend": "ios"}, 1),
            Sample(metric.name + '_sum', {"myorg_keys_frontend": "ios"},
                   280.0 * MiB)
        ]
        self.assertEqual(expected_samples, metric.samples)
コード例 #4
0
    def test_target_info_restricted_registry(self):
        registry = CollectorRegistry(target_info={'foo': 'bar'})
        Summary('s', 'help', registry=registry).observe(7)

        m = Metric('s', 'help', 'summary')
        m.samples = [Sample('s_sum', {}, 7)]
        self.assertEqual([m], registry.restricted_registry(['s_sum']).collect())

        m = Metric('target', 'Target metadata', 'info')
        m.samples = [Sample('target_info', {'foo': 'bar'}, 1)]
        self.assertEqual([m], registry.restricted_registry(['target_info']).collect())
コード例 #5
0
    def test_restricted_registry_does_not_yield_while_locked(self):
        registry = CollectorRegistry(target_info={'foo': 'bar'})
        Summary('s', 'help', registry=registry).observe(7)

        m = Metric('s', 'help', 'summary')
        m.samples = [Sample('s_sum', {}, 7)]
        self.assertEqual([m], list(registry.restricted_registry(['s_sum']).collect()))

        m = Metric('target', 'Target metadata', 'info')
        m.samples = [Sample('target_info', {'foo': 'bar'}, 1)]
        for _ in registry.restricted_registry(['target_info', 's_sum']).collect():
            self.assertFalse(registry._lock.locked())
コード例 #6
0
    def test_untyped(self):
        # https://github.com/prometheus/client_python/issues/79
        families = text_string_to_metric_families("""# HELP redis_connected_clients Redis connected clients
# TYPE redis_connected_clients untyped
redis_connected_clients{instance="rough-snowflake-web",port="6380"} 10.0
redis_connected_clients{instance="rough-snowflake-web",port="6381"} 12.0
""")
        m = Metric("redis_connected_clients", "Redis connected clients", "untyped")
        m.samples = [
            Sample("redis_connected_clients", {"instance": "rough-snowflake-web", "port": "6380"}, 10),
            Sample("redis_connected_clients", {"instance": "rough-snowflake-web", "port": "6381"}, 12),
        ]
        self.assertEqualMetrics([m], list(families))
コード例 #7
0
    def test_restricted_registry(self):
        registry = CollectorRegistry()
        Counter('c_total', 'help', registry=registry)
        Summary('s', 'help', registry=registry).observe(7)

        m = Metric('s', 'help', 'summary')
        m.samples = [Sample('s_sum', {}, 7)]
        self.assertEqual([m], registry.restricted_registry(['s_sum']).collect())
コード例 #8
0
 def test_serves_metrics(self):
     labels = dict((i, i) for i in 'abcd')
     c = Counter('c', 'help', labelnames=labels.keys(), registry=None)
     c.labels(**labels).inc(1)
     self.assertEqual(None,
                      self.registry.get_sample_value('c_total', labels))
     archive_metrics()
     self.assertEqual(self.collector.collect()[0].samples,
                      [Sample('c_total', labels, 1.0)])
コード例 #9
0
    def test_collect_doesnt_block_other_collects(self):
        values.ValueClass = MultiProcessValue(lambda: 0)
        labels = dict((i, i) for i in 'abcd')
        c = Counter('c', 'help', labelnames=labels.keys(), registry=None)
        c.labels(**labels).inc(1)

        with advisory_lock(LOCK_SH):
            metrics = dict(
                (m.name, m) for m in self.collector.collect(blocking=False))
            self.assertEqual(metrics['c'].samples,
                             [Sample('c_total', labels, 1.0)])
コード例 #10
0
    def test_restricted_registry_does_not_call_extra(self):
        from unittest.mock import MagicMock
        registry = CollectorRegistry()
        mock_collector = MagicMock()
        mock_collector.describe.return_value = [Metric('foo', 'help', 'summary')]
        registry.register(mock_collector)
        Summary('s', 'help', registry=registry).observe(7)

        m = Metric('s', 'help', 'summary')
        m.samples = [Sample('s_sum', {}, 7)]
        self.assertEqual([m], list(registry.restricted_registry(['s_sum']).collect()))
        mock_collector.collect.assert_not_called()
コード例 #11
0
def test_collect_metric_with_extra_labels():
    mocks = create_session_mocks_using_paginator([
        {"id": "instance_id_1", "value": 1},
        {"id": "instance_id_2", "value": 1},
        {"id": "instance_id_3", "value": 1}
    ])
    metrics = parse_aws_metrics(SINGLE_METRIC_YAML_WITH_PAGINATOR)
    collector = AwsMetricsCollector(metrics, mocks.session, ["region_name", "env"], ["us-east-1", "dev"])
    collector.update()
    gauge_family = list(collector.collect())[0]
    mocks.session.client.assert_called_once_with("ec2")
    mocks.service.get_paginator.assert_called_once_with("describe_instances")
    mocks.paginator.paginate.assert_called_once_with(Filters=[{
        "Name": "instance-state-name",
        "Values": ["Running"]
    }])
    mocks.paginate_response_iterator.search.assert_called_once_with(metrics[0].search)
    assert gauge_family.samples == [
        Sample("ec2_instance_ids", {"region_name": "us-east-1", "env": "dev", "id": "instance_id_1"}, 1),
        Sample("ec2_instance_ids", {"region_name": "us-east-1", "env": "dev", "id": "instance_id_2"}, 1),
        Sample("ec2_instance_ids", {"region_name": "us-east-1", "env": "dev", "id": "instance_id_3"}, 1)
    ]
コード例 #12
0
    def test_collector_collect(self):
        agg = aggregation_module.LastValueAggregation(256)
        view = view_module.View("new_view", "processed video size over time",
                                [FRONTEND_KEY], VIDEO_SIZE_MEASURE, agg)
        registry = mock.Mock()
        options = prometheus.Options("test2", 8001, "localhost", registry)
        collector = prometheus.Collector(options=options)
        collector.register_view(view)
        desc = collector.registered_views['test2_new_view']
        metric = collector.to_metric(
            desc=desc,
            tag_values=[tag_value_module.TagValue("value")],
            agg_data=agg.aggregation_data)

        self.assertEqual(desc['name'], metric.name)
        self.assertEqual(desc['documentation'], metric.documentation)
        self.assertEqual('gauge', metric.type)
        expected_samples = [
            Sample(metric.name, {"myorg_keys_frontend": "value"}, 256)]
        self.assertEqual(expected_samples, metric.samples)
コード例 #13
0
def test_prometheus_logger_metrics(routemaster_serve_subprocess):
    with routemaster_serve_subprocess(
            wait_for_output=b'Booting worker', ) as (proc, port):
        # Populate metrics with a request
        requests.get(f'http://127.0.0.1:{port}/')

        metrics_response = requests.get(f'http://127.0.0.1:{port}/metrics')
        metric_families = list(
            text_string_to_metric_families(metrics_response.text))
        samples = [y for x in metric_families for y in x.samples]

        assert Sample(
            name='routemaster_api_request_duration_seconds_count',
            labels={
                'method': 'GET',
                'status': '200',
                'endpoint': '/'
            },
            value=1.0,
        ) in samples
コード例 #14
0
def test_prometheus_cleanup(registry):
    pid = 1

    def getpid():
        return pid

    # override use of os.getpid. _ValueClass is recreated after every test,
    # so we don't need to clean up
    from prometheus_client import core
    core._ValueClass = core._MultiProcessValue(getpid)

    histogram = metrics.Histogram(
        name='histogram',
        documentation='test histogram',
        labelnames=['foo', 'bar', 'baz'],
        statsd='{name}.{label}',
        registry=registry,
    )
    counter = metrics.Counter(
        name='counter',
        documentation='test counter',
        labelnames=['foo', 'bar', 'baz'],
        statsd='{name}.{label}',
        registry=registry,
    )

    from prometheus_client.multiprocess import MultiProcessCollector
    collector = MultiProcessCollector(registry)
    labels = {'foo': 'foo', 'bar': 'bar', 'baz': 'baz'}

    def collect():
        return {m.name: m for m in collector.collect()}

    def files():
        return list(sorted(os.listdir(os.environ['prometheus_multiproc_dir'])))

    counter.inc(1, **labels)
    histogram.observe(0.5, **labels)
    histogram.observe(2.5, **labels)

    assert files() == [
        'counter_1.db',
        'histogram_1.db',
    ]

    before = collect()
    metrics.prometheus_cleanup_worker(pid)
    after = collect()
    assert files() == [
        'counter_archive.db',
        'histogram_archive.db',
    ]
    assert before == after

    # magic!
    pid += 1

    # new worker, create some new metrics, check they are all combined
    counter.inc(2, **labels)
    histogram.observe(0.5, **labels)
    histogram.observe(2.5, **labels)

    later = collect()
    assert files() == [
        'counter_2.db',
        'counter_archive.db',
        'histogram_2.db',
        'histogram_archive.db',
    ]

    # check counter is correct

    assert later['counter'].samples == [
        Sample(counter_name('counter_total'), labels, 3.0),
    ]

    expected_histogram = [
        Sample('histogram_bucket', dict(le='0.005', **labels), 0.0),
        Sample('histogram_bucket', dict(le='0.01', **labels), 0.0),
        Sample('histogram_bucket', dict(le='0.025', **labels), 0.0),
        Sample('histogram_bucket', dict(le='0.05', **labels), 0.0),
        Sample('histogram_bucket', dict(le='0.075', **labels), 0.0),
        Sample('histogram_bucket', dict(le='0.1', **labels), 0.0),
        Sample('histogram_bucket', dict(le='0.25', **labels), 0.0),
        Sample('histogram_bucket', dict(le='0.5', **labels), 2.0),
        Sample('histogram_bucket', dict(le='0.75', **labels), 2.0),
        Sample('histogram_bucket', dict(le='1.0', **labels), 2.0),
        Sample('histogram_bucket', dict(le='2.5', **labels), 4.0),
        Sample('histogram_bucket', dict(le='5.0', **labels), 4.0),
        Sample('histogram_bucket', dict(le='7.5', **labels), 4.0),
        Sample('histogram_bucket', dict(le='10.0', **labels), 4.0),
        Sample('histogram_bucket', dict(le='+Inf', **labels), 4.0),
        Sample('histogram_count', labels, 4.0),
        Sample('histogram_sum', labels, 6.0),
    ]

    # check histogram is correct
    later['histogram'].samples.sort(key=metrics.histogram_sorter)
    assert later['histogram'].samples == expected_histogram

    # check the final files produce the correct numbers
    metrics.prometheus_cleanup_worker(pid)
    final = collect()
    assert files() == [
        'counter_archive.db',
        'histogram_archive.db',
    ]
    final['histogram'].samples.sort(key=metrics.histogram_sorter)
    assert later == final
コード例 #15
0
    def test_collect(self):
        pid = 0
        values.ValueClass = MultiProcessValue(lambda: pid)
        labels = {i: i for i in 'abcd'}

        def add_label(key, value):
            l = labels.copy()
            l[key] = value
            return l

        c = Counter('c', 'help', labelnames=labels.keys(), registry=None)
        g = Gauge('g', 'help', labelnames=labels.keys(), registry=None)
        h = Histogram('h', 'help', labelnames=labels.keys(), registry=None)

        c.labels(**labels).inc(1)
        g.labels(**labels).set(1)
        h.labels(**labels).observe(1)

        pid = 1

        c.labels(**labels).inc(1)
        g.labels(**labels).set(1)
        h.labels(**labels).observe(5)

        metrics = {m.name: m for m in self.collector.collect()}

        self.assertEqual(metrics['c'].samples,
                         [Sample('c_total', labels, 2.0)])
        metrics['g'].samples.sort(key=lambda x: x[1]['pid'])
        self.assertEqual(metrics['g'].samples, [
            Sample('g', add_label('pid', '0'), 1.0),
            Sample('g', add_label('pid', '1'), 1.0),
        ])

        metrics['h'].samples.sort(
            key=lambda x: (x[0], float(x[1].get('le', 0))))
        expected_histogram = [
            Sample('h_bucket', add_label('le', '0.005'), 0.0),
            Sample('h_bucket', add_label('le', '0.01'), 0.0),
            Sample('h_bucket', add_label('le', '0.025'), 0.0),
            Sample('h_bucket', add_label('le', '0.05'), 0.0),
            Sample('h_bucket', add_label('le', '0.075'), 0.0),
            Sample('h_bucket', add_label('le', '0.1'), 0.0),
            Sample('h_bucket', add_label('le', '0.25'), 0.0),
            Sample('h_bucket', add_label('le', '0.5'), 0.0),
            Sample('h_bucket', add_label('le', '0.75'), 0.0),
            Sample('h_bucket', add_label('le', '1.0'), 1.0),
            Sample('h_bucket', add_label('le', '2.5'), 1.0),
            Sample('h_bucket', add_label('le', '5.0'), 2.0),
            Sample('h_bucket', add_label('le', '7.5'), 2.0),
            Sample('h_bucket', add_label('le', '10.0'), 2.0),
            Sample('h_bucket', add_label('le', '+Inf'), 2.0),
            Sample('h_count', labels, 2.0),
            Sample('h_sum', labels, 6.0),
        ]

        self.assertEqual(metrics['h'].samples, expected_histogram)
コード例 #16
0
    def test_merge_no_accumulate(self):
        self.pid = 0
        labels = dict((i, i) for i in 'abcd')

        def add_label(key, value):
            l = labels.copy()
            l[key] = value
            return l

        h = Histogram('hna', 'help', labelnames=labels.keys(), registry=None)
        h.labels(**labels).observe(1)
        self.pid = 1
        h.labels(**labels).observe(5)

        self.collector.accumulate = False
        metrics = self.collector.collect()
        self.collector.accumulate = True

        metric = [x for x in metrics if x.name == 'hna'][0]
        metric.samples.sort(
            key=lambda x: (x[0], float(x[1].get('le', 0)))
        )
        expected_histogram = [
            Sample('hna_bucket', add_label('le', '0.005'), 0.0),
            Sample('hna_bucket', add_label('le', '0.01'), 0.0),
            Sample('hna_bucket', add_label('le', '0.025'), 0.0),
            Sample('hna_bucket', add_label('le', '0.05'), 0.0),
            Sample('hna_bucket', add_label('le', '0.075'), 0.0),
            Sample('hna_bucket', add_label('le', '0.1'), 0.0),
            Sample('hna_bucket', add_label('le', '0.25'), 0.0),
            Sample('hna_bucket', add_label('le', '0.5'), 0.0),
            Sample('hna_bucket', add_label('le', '0.75'), 0.0),
            Sample('hna_bucket', add_label('le', '1.0'), 1.0),
            Sample('hna_bucket', add_label('le', '2.5'), 0.0),
            Sample('hna_bucket', add_label('le', '5.0'), 1.0),
            Sample('hna_bucket', add_label('le', '7.5'), 0.0),
            Sample('hna_bucket', add_label('le', '10.0'), 0.0),
            Sample('hna_bucket', add_label('le', '+Inf'), 0.0),
            Sample('hna_sum', labels, 6.0),
        ]

        self.assertEqual(metric.samples, expected_histogram)
コード例 #17
0
    def test_merge_no_accumulate(self):
        pid = 0
        values.ValueClass = MultiProcessValue(lambda: pid)
        labels = dict((i, i) for i in 'abcd')

        def add_label(key, value):
            l = labels.copy()
            l[key] = value
            return l

        h = Histogram('h', 'help', labelnames=labels.keys(), registry=None)
        h.labels(**labels).observe(1)
        pid = 1
        h.labels(**labels).observe(5)

        path = os.path.join(os.environ['PROMETHEUS_MULTIPROC_DIR'], '*.db')
        files = glob.glob(path)
        metrics = dict(
            (m.name, m) for m in self.collector.merge(files, accumulate=False))

        metrics['h'].samples.sort(
            key=lambda x: (x[0], float(x[1].get('le', 0))))
        expected_histogram = [
            Sample('h_bucket', add_label('le', '0.005'), 0.0),
            Sample('h_bucket', add_label('le', '0.01'), 0.0),
            Sample('h_bucket', add_label('le', '0.025'), 0.0),
            Sample('h_bucket', add_label('le', '0.05'), 0.0),
            Sample('h_bucket', add_label('le', '0.075'), 0.0),
            Sample('h_bucket', add_label('le', '0.1'), 0.0),
            Sample('h_bucket', add_label('le', '0.25'), 0.0),
            Sample('h_bucket', add_label('le', '0.5'), 0.0),
            Sample('h_bucket', add_label('le', '0.75'), 0.0),
            Sample('h_bucket', add_label('le', '1.0'), 1.0),
            Sample('h_bucket', add_label('le', '2.5'), 0.0),
            Sample('h_bucket', add_label('le', '5.0'), 1.0),
            Sample('h_bucket', add_label('le', '7.5'), 0.0),
            Sample('h_bucket', add_label('le', '10.0'), 0.0),
            Sample('h_bucket', add_label('le', '+Inf'), 0.0),
            Sample('h_sum', labels, 6.0),
        ]

        self.assertEqual(metrics['h'].samples, expected_histogram)
コード例 #18
0
    def test_collect(self):
        self.pid = 0

        labels = dict((i, i) for i in 'abcd')

        def add_label(key, value):
            l = labels.copy()
            l[key] = value
            return l

        c = Counter('c', 'help', labelnames=labels.keys(), registry=None)
        g = Gauge('g', 'help', labelnames=labels.keys(), registry=None, multiprocess_mode='liveall')
        gall = Gauge('gall', 'help', labelnames=labels.keys(), registry=None, multiprocess_mode='all')
        gempty = Gauge('gempty', 'help', labelnames=labels.keys(), registry=None, multiprocess_mode='all')
        h = Histogram('h', 'help', labelnames=labels.keys(), registry=None)

        c.labels(**labels).inc(1)
        g.labels(**labels).set(1)
        gall.labels(**labels).set(1)
        h.labels(**labels).observe(1)

        self.pid = 1

        c.labels(**labels).inc(1)
        g.labels(**labels).set(1)
        gall.labels(**labels).set(1)
        h.labels(**labels).observe(5)

        metrics = dict((m.name, m) for m in self.collector.collect())

        self.assertEqual(
            metrics['c'].samples, [Sample('c_total', labels, 2.0)]
        )
        metrics['g'].samples.sort(key=lambda x: x[1]['pid'])
        for sample in metrics['g'].samples:
            sample.labels.pop('hostname')
        for sample in metrics['gall'].samples:
            sample.labels.pop('hostname')
        self.assertEqual(metrics['g'].samples, [
            Sample('g', add_label('pid', '0'), 1.0),
            Sample('g', add_label('pid', '1'), 1.0),
        ])
        self.assertEqual(metrics['gall'].samples, [
            Sample('gall', add_label('pid', '0'), 1.0),
            Sample('gall', add_label('pid', '1'), 1.0),
        ])

        metrics['h'].samples.sort(
            key=lambda x: (x[0], float(x[1].get('le', 0)))
        )
        expected_histogram = [
            Sample('h_bucket', add_label('le', '0.005'), 0.0),
            Sample('h_bucket', add_label('le', '0.01'), 0.0),
            Sample('h_bucket', add_label('le', '0.025'), 0.0),
            Sample('h_bucket', add_label('le', '0.05'), 0.0),
            Sample('h_bucket', add_label('le', '0.075'), 0.0),
            Sample('h_bucket', add_label('le', '0.1'), 0.0),
            Sample('h_bucket', add_label('le', '0.25'), 0.0),
            Sample('h_bucket', add_label('le', '0.5'), 0.0),
            Sample('h_bucket', add_label('le', '0.75'), 0.0),
            Sample('h_bucket', add_label('le', '1.0'), 1.0),
            Sample('h_bucket', add_label('le', '2.5'), 1.0),
            Sample('h_bucket', add_label('le', '5.0'), 2.0),
            Sample('h_bucket', add_label('le', '7.5'), 2.0),
            Sample('h_bucket', add_label('le', '10.0'), 2.0),
            Sample('h_bucket', add_label('le', '+Inf'), 2.0),
            Sample('h_count', labels, 2.0),
            Sample('h_sum', labels, 6.0),
        ]

        self.assertEqual(metrics['h'].samples, expected_histogram)