def test_reporter(metrics): reporter = DictReporter() foo_reporter = DictReporter(prefix='foo') metrics.add_reporter(reporter) metrics.add_reporter(foo_reporter) sensor = metrics.sensor('kafka.requests') sensor.add(metrics.metric_name('pack.bean1.avg', 'grp1'), Avg()) sensor.add(metrics.metric_name('pack.bean2.total', 'grp2'), Total()) sensor2 = metrics.sensor('kafka.blah') sensor2.add(metrics.metric_name('pack.bean1.some', 'grp1'), Total()) sensor2.add(metrics.metric_name('pack.bean2.some', 'grp1', tags={'a': 42, 'b': 'bar'}), Total()) # kafka-metrics-count > count is the total number of metrics and automatic expected = { 'kafka-metrics-count': {'count': 5.0}, 'grp2': {'pack.bean2.total': 0.0}, 'grp1': {'pack.bean1.avg': 0.0, 'pack.bean1.some': 0.0}, 'grp1.a=42,b=bar': {'pack.bean2.some': 0.0}, } assert expected == reporter.snapshot() for key in list(expected.keys()): metrics = expected.pop(key) expected['foo.%s' % (key,)] = metrics assert expected == foo_reporter.snapshot()
def test_Quotas(metrics): sensor = metrics.sensor('test') sensor.add(metrics.metric_name('test1.total', 'grp1'), Total(), MetricConfig(quota=Quota.upper_bound(5.0))) sensor.add(metrics.metric_name('test2.total', 'grp1'), Total(), MetricConfig(quota=Quota.lower_bound(0.0))) sensor.record(5.0) with pytest.raises(QuotaViolationError): sensor.record(1.0) assert abs(6.0 - metrics.metrics.get(metrics.metric_name('test1.total', 'grp1')).value()) \ < EPS sensor.record(-6.0) with pytest.raises(QuotaViolationError): sensor.record(-1.0)
def test_simple_stats(mocker, time_keeper, config, metrics): mocker.patch('time.time', side_effect=time_keeper.time) measurable = ConstantMeasurable() metrics.add_metric(metrics.metric_name('direct.measurable', 'grp1', 'The fraction of time an appender waits for space allocation.'), measurable) sensor = metrics.sensor('test.sensor') sensor.add(metrics.metric_name('test.avg', 'grp1'), Avg()) sensor.add(metrics.metric_name('test.max', 'grp1'), Max()) sensor.add(metrics.metric_name('test.min', 'grp1'), Min()) sensor.add(metrics.metric_name('test.rate', 'grp1'), Rate(TimeUnit.SECONDS)) sensor.add(metrics.metric_name('test.occurences', 'grp1'),Rate(TimeUnit.SECONDS, Count())) sensor.add(metrics.metric_name('test.count', 'grp1'), Count()) percentiles = [Percentile(metrics.metric_name('test.median', 'grp1'), 50.0), Percentile(metrics.metric_name('test.perc99_9', 'grp1'), 99.9)] sensor.add_compound(Percentiles(100, BucketSizing.CONSTANT, 100, -100, percentiles=percentiles)) sensor2 = metrics.sensor('test.sensor2') sensor2.add(metrics.metric_name('s2.total', 'grp1'), Total()) sensor2.record(5.0) sum_val = 0 count = 10 for i in range(count): sensor.record(i) sum_val += i # prior to any time passing elapsed_secs = (config.time_window_ms * (config.samples - 1)) / 1000.0 assert abs(count / elapsed_secs - metrics.metrics.get(metrics.metric_name('test.occurences', 'grp1')).value()) \ < EPS, 'Occurrences(0...%d) = %f' % (count, count / elapsed_secs) # pretend 2 seconds passed... sleep_time_seconds = 2.0 time_keeper.sleep(sleep_time_seconds) elapsed_secs += sleep_time_seconds assert abs(5.0 - metrics.metrics.get(metrics.metric_name('s2.total', 'grp1')).value()) \ < EPS, 's2 reflects the constant value' assert abs(4.5 - metrics.metrics.get(metrics.metric_name('test.avg', 'grp1')).value()) \ < EPS, 'Avg(0...9) = 4.5' assert abs((count - 1) - metrics.metrics.get(metrics.metric_name('test.max', 'grp1')).value()) \ < EPS, 'Max(0...9) = 9' assert abs(0.0 - metrics.metrics.get(metrics.metric_name('test.min', 'grp1')).value()) \ < EPS, 'Min(0...9) = 0' assert abs((sum_val / elapsed_secs) - metrics.metrics.get(metrics.metric_name('test.rate', 'grp1')).value()) \ < EPS, 'Rate(0...9) = 1.40625' assert abs((count / elapsed_secs) - metrics.metrics.get(metrics.metric_name('test.occurences', 'grp1')).value()) \ < EPS, 'Occurrences(0...%d) = %f' % (count, count / elapsed_secs) assert abs(count - metrics.metrics.get(metrics.metric_name('test.count', 'grp1')).value()) \ < EPS, 'Count(0...9) = 10'
def test_duplicate_MetricName(metrics): metrics.sensor('test').add(metrics.metric_name('test', 'grp1'), Avg()) with pytest.raises(ValueError): metrics.sensor('test2').add(metrics.metric_name('test', 'grp1'), Total())