def test_gunicorn_logger_status_url(environ, log, statsd_metrics): response, environ, delta, expected = access_extra_args( environ, '/_status/ping') cfg = Config() cfg.set('accesslog', '-') logger = gunicorn.GunicornLogger(cfg) statsd.get_client() # force the statsd creationg log message log[:] = [] logger.access(response, None, environ, delta) assert len(log) == 0 assert len(statsd_metrics) == 0
def test_gunicorn_logger_status_url_enabled(environ, context, monkeypatch): response, environ, delta, expected = access_extra_args( environ, '/_status/ping') cfg = Config() cfg.set('accesslog', '-') logger = gunicorn.GunicornLogger(cfg) statsd.get_client() # force the statsd creationg log message context.logs[:] = [] monkeypatch.setitem(os.environ, 'TALISKER_LOGSTATUS', 'true') logger.access(response, None, environ, delta) assert len(context.logs) == 1 assert len(context.statsd) == 0
def before_task_publish(sender, body, **kwargs): # TODO: find a way to avoid thread locals if not hasattr(_local, 'timers'): _local.timers = {} name = 'celery.{}.enqueue'.format(sender) timer = statsd.get_client().timer(name) _local.timers[body['id']] = timer timer.start()
def before_task_publish(sender, body, headers={}, **kwargs): # TODO: find a way to avoid thread locals if not hasattr(_local, 'timers'): _local.timers = {} name = 'celery.{}.enqueue'.format(sender) timer = statsd.get_client().timer(name) id = get_id(body, headers) if id is not None: _local.timers[id] = timer timer.start()
def task_prerun(sender, task_id, task, **kwargs): name = 'celery.{}.run'.format(sender.name) task.__talisker_timer = statsd.get_client().timer(name) task.__talisker_timer.start()
def signal(sender, **kwargs): stat_name = 'celery.{}.{}'.format(sender.name, name) statsd.get_client().incr(stat_name)
def metrics(monkeypatch): # avoid users environment causing failures monkeypatch.delitem(os.environ, 'STATSD_DSN', raising=False) client = statsd.get_client() with client.collect() as stats: yield stats