def test_metrics_export(redis_connection):
    channel = forge.get_metrics_sink(redis_connection)

    start = time.time()
    read = {}
    sent = False

    for metric_message in channel.listen(blocking=False):
        if 'counter' in read and 'performance_counter.t' in read:
            break

        if time.time() - start > 20:
            assert False, read

        if not sent:
            sent = True
            export_metrics_once('test',
                                Metrics, {
                                    'counter': 99,
                                    'performance_counter': 6
                                },
                                redis=redis_connection)

        if metric_message is None:
            time.sleep(0.1)
            continue

        if metric_message['type'] == 'test':
            for key, value in metric_message.items():
                if isinstance(value, (int, float)):
                    read[key] = read.get(key, 0) + value

    assert read['counter'] == 99
    assert read['performance_counter.t'] == 6
    assert read['performance_counter.c'] == 1
def test_metrics_counter(redis_connection):
    source = MetricsFactory('test', Metrics, redis=redis_connection)

    channel = forge.get_metrics_sink(redis_connection)
    channel.listen(blocking=False)

    source.increment('counter', 55)
    source.increment_execution_time('performance_counter', 6)
    source.increment_execution_time('performance_counter', 6)

    start = time.time()
    read = {}
    for metric_message in channel.listen(blocking=False):
        if 'counter' in read and 'performance_counter.t' in read:
            break

        if time.time() - start > 30:
            pytest.fail()

        if metric_message is None:
            time.sleep(0.1)
            continue

        if metric_message['type'] == 'test':
            for key, value in metric_message.items():
                if isinstance(value, (int, float)):
                    read[key] = read.get(key, 0) + value

    assert read['counter'] == 55
    assert read['performance_counter.t'] == 12
    assert read['performance_counter.c'] == 2

    source.stop()
Beispiel #3
0
    def __init__(self,
                 name,
                 host=None,
                 export_interval_secs=None,
                 counter_type=None,
                 config=None,
                 redis=None,
                 counter_names=None,
                 timer_names=None,
                 export_zero=True):
        config = config or forge.get_config()
        self.channel = forge.get_metrics_sink(redis)
        self.export_interval = export_interval_secs or config.core.metrics.export_interval
        self.name = name
        self.host = host or get_random_id()
        self.type = counter_type or name
        self.export_zero = export_zero

        self.counter_schema = set(counter_names)
        self.timer_schema = set(timer_names)

        self.counts = Counters({key: 0 for key in self.counter_schema})
        self.values = {}
        self.lock = threading.Lock()
        self.scheduler = None
        self.reset()

        assert self.channel
        assert (self.export_interval > 0)
Beispiel #4
0
def export_metrics_once(name,
                        schema,
                        metrics,
                        host=None,
                        counter_type=None,
                        config=None,
                        redis=None):
    """Manually publish metric counts to the metrics system.

    This was built for when the service server is reporting metrics for execution and caching
    on behalf of many services. At the moment the metrics system uses the hosts to count the number
    of instances of each service. This could be done with a single auto exporting counter for
    the service server, but that may require significant downstream changes in the metrics system.
    """
    config = config or forge.get_config()
    redis = redis or get_client(config.core.metrics.redis.host,
                                config.core.metrics.redis.port, False)

    # Separate out the timers and normal counters
    timer_schema = set()
    counter_schema = set()

    for _k, field_type in schema.fields().items():
        if isinstance(field_type, PerformanceTimer):
            timer_schema.add(_k)
        else:
            counter_schema.add(_k)

    for _k in timer_schema:
        counter_schema.discard(_k + '_count')

    channel = forge.get_metrics_sink(redis)

    counts = Counters({key: 0 for key in counter_schema})
    counts.update({key + '.t': 0 for key in timer_schema})
    counts.update({key + '.c': 0 for key in timer_schema})

    for metric, value in metrics.items():
        if metric in counter_schema:
            counts[metric] += value
        elif metric in timer_schema:
            counts[metric + ".c"] += 1
            counts[metric + ".t"] += value
        else:
            raise ValueError(f"{metric} is not an accepted counter")

    counts['type'] = counter_type or name
    counts['name'] = name
    counts['host'] = host

    channel.publish(dict(counts.items()))
 def __enter__(self):
     self.channel = forge.get_metrics_sink(self.redis)
     self.sync_messages()
     return self