def test_make_record_global_extra(): Context.new() logger = logs.StructuredLogger('test') logs.set_global_extra({'a': 1}) record = logger.makeRecord(*record_args()) assert record.__dict__['a'] == 1 assert record._structured == {'a': 1}
def test_make_record_context_renamed(): Context.new() logger = logs.StructuredLogger('test') logs.set_global_extra({'a': 1}) logs.logging_context.push(a=2) record = logger.makeRecord(*record_args()) assert record._structured == {'a': 1, 'a_': 2}
def test_make_record_context_extra(): Context.new() logger = logs.StructuredLogger('test') logs.logging_context.push(a=1) record = logger.makeRecord(*record_args()) assert record.__dict__['a'] == 1 assert record._structured == {'a': 1}
def test_celery_basic(celery_signals): from tests.celery_app import basic_task, error_task, propagate_task cmd = ['talisker.celery', 'worker', '-q', '-A', 'tests.celery_app'] with ServerProcess(cmd) as pr: pr.wait_for_output(' ready.', timeout=30) Context.new() result = basic_task.delay() error_result = error_task.delay() with request_id('myid'): propagate = propagate_task.delay() output = result.wait(timeout=3) with pytest.raises(Exception): error_result.wait(timeout=3) propagate.wait(timeout=3) assert output == 'basic' pr.log.assert_log( msg='basic task', extra={'task_name': 'tests.celery_app.basic_task'}, ) pr.log.assert_log( msg='propagate_task', extra={'request_id': 'myid'}, )
def test_logging_context_ctx(): Context.new() with logs.logging_context(a=1): assert logs.logging_context.flat == {'a': 1} with logs.logging_context(a=2): assert logs.logging_context.flat == {'a': 2} assert logs.logging_context.flat == {'a': 1}
def test_logging_context_push(): Context.new() logs.logging_context.push(a=1) assert logs.logging_context.flat == {'a': 1} logs.logging_context.push(a=2) assert logs.logging_context.flat == {'a': 2} logs.logging_context.pop() assert logs.logging_context.flat == {'a': 1} logs.logging_context.pop() assert logs.logging_context.flat == {}
def test_tracking(): Context.new() Context.track('sql', 1.0) Context.track('sql', 2.0) Context.track('http', 3.0) assert Context.current().tracking['sql'].count == 2 assert Context.current().tracking['sql'].time == 3.0 assert Context.current().tracking['http'].count == 1 assert Context.current().tracking['http'].time == 3.0
def test_make_record_ordering(): Context.new() logger = logs.StructuredLogger('test') logs.set_global_extra({'global': 1}) logs.logging_context.push(context=2) extra = OrderedDict() extra['user1'] = 3 extra['user2'] = 4 record = logger.makeRecord(*record_args(), extra=extra) assert list( record._structured.keys()) == ['user1', 'user2', 'context', 'global']
def test_make_record_all_extra(): Context.new() logger = logs.StructuredLogger('test') logs.set_global_extra({'a': 1}) logs.logging_context.push(b=2) record = logger.makeRecord(*record_args(), extra={'c': 3}) assert record.__dict__['a'] == 1 assert record.__dict__['b'] == 2 assert record.__dict__['c'] == 3 assert record._structured == {'a': 1, 'b': 2, 'c': 3}
def worker(): Context.new() Context.logging.push(a=2) Context.track('test', 1.0) e1.set() e2.wait() assert Context.logging.flat == {'a': 2} Context.logging.pop() e1.set() assert Context.logging.flat == {} assert Context.current().tracking['test'].count == 1
def test_request_timeout(): Context.new() result = {} @request_timeout(timeout=1000, soft_timeout=500) def f(): result['timeout'] = Context.current().deadline result['soft_timeout'] = Context.soft_timeout f() assert result['timeout'] == time.time() + 1.0 assert result['soft_timeout'] == 500
def test_context_api(): Context.new() Context.logging.push(a=1) Context.request_id = 'id' Context.track('test', 1.0) assert Context.current().logging.flat == {'a': 1} assert Context.current().request_id == 'id' assert Context.current().tracking['test'].count == 1 assert Context.current().tracking['test'].time == 1.0 Context.clear() assert Context.current().logging.flat == {} assert Context.current().request_id is None assert Context.current().tracking == {}
def task_prerun(sender, task_id, task, **kwargs): Context.new() rid = get_header(task.request, REQUEST_ID) if rid is not None: Context.request_id = rid start_time = get_header(task.request, ENQUEUE_START) if start_time is not None: ms = (time.time() - start_time) * 1000 CeleryMetric.queue_latency.observe(ms, job=sender.name) if hasattr(task, 'talisker_timestamp') and task.request.is_eager: # eager task, but retry has happened immeadiately, so send metrics send_run_metric(sender.name, task.talisker_timestamp) task_retry(sender) task.talisker_timestamp = time.time() talisker.logs.logging_context.push(task_id=task_id, task_name=task.name)
def task_prerun(sender, task_id, task, **kwargs): Context.new() rid = get_header(task.request, REQUEST_ID) if rid is not None: Context.request_id = rid start_time = get_header(task.request, ENQUEUE_START) if start_time is not None: ms = (time.time() - start_time) * 1000 CeleryMetric.queue_latency.observe(ms, job=sender.name) if hasattr(task, 'talisker_timestamp') and task.request.is_eager: # If there is a retry, celery behavior is to send signals in the # following order: # task-pre -> retry -> task-post # (retry 1): task-pre -> (retry again, if needed) -> task-post # in task-post, task latency is stored (via a call to # `send_run_metric`.) # So for a task with 2 retries, this looks something like: # pre -> retry -> post -> pre -> retry -> post -> pre (max retries # reached, so no more retry) -> post # In celery 4.x: # When the task is called eagerly, because the call is synchronous # the call stack ends up looking something like: # pre -> pre -> pre -> post -> post -> post # In celery 5.x, the behavior has been fixed and works similarly # in both eager and non-eager mode # To emulate non-eager behavior in eager mode for celery 4.x, we check # if the request is eager (task.request.is_eager) and it's a retry # (task.talisker_timestamp is already set). If so, we manually track # the calls to retry and task-post here. task_retry(sender) send_run_metric(sender.name, task.talisker_timestamp) task.talisker_timestamp = time.time() talisker.logs.logging_context.push(task_id=task_id, task_name=task.name)
def test_gunicorn_worker_exit(wsgi_env, context): wsgi_env['start_time'] = time.time() wsgi_env['REQUEST_ID'] = 'ID' Context.new() Context.request_id = 'ID' request = talisker.wsgi.TaliskerWSGIRequest(wsgi_env, None, []) talisker.wsgi.REQUESTS['ID'] = request class worker: pid = 100 gunicorn.gunicorn_worker_exit(None, worker()) context.assert_log( name='talisker.wsgi', msg='GET /', extra={ 'request_id': 'ID', 'timeout': True, }, ) assert len(context.sentry) == 1
def test_context_thread(): e1 = threading.Event() e2 = threading.Event() def worker(): Context.new() Context.logging.push(a=2) Context.track('test', 1.0) e1.set() e2.wait() assert Context.logging.flat == {'a': 2} Context.logging.pop() e1.set() assert Context.logging.flat == {} assert Context.current().tracking['test'].count == 1 t = threading.Thread(target=worker) Context.new() Context.track('test', 1.0) Context.logging.push(a=1) assert Context.logging.flat == {'a': 1} t.start() e1.wait() e1.clear() assert Context.logging.flat == {'a': 1} assert Context.current().tracking['test'].count == 1 e2.set() e1.wait() assert Context.logging.flat == {'a': 1} t.join()
def start(self): self.old_statsd = talisker.statsd.get_client.raw_update( self.statsd_client) talisker.logs.add_talisker_handler(logging.NOTSET, self.handler) self.sentry_context.start() Context.new()
def __call__(self, environ, start_response): Context.new() config = talisker.get_config() # setup environment environ['start_time'] = Context.current.start_time if self.environ: environ.update(self.environ) # ensure request id if config.wsgi_id_header not in environ: environ[config.wsgi_id_header] = str(uuid.uuid4()) rid = environ[config.wsgi_id_header] environ['REQUEST_ID'] = rid # needs to be different from request id, as request can be passed on to # upstream services environ['SENTRY_ID'] = uuid.uuid4().hex Context.request_id = rid Context.current.soft_timeout = config.soft_request_timeout # calculate ip route route = None try: forwarded = environ.get('HTTP_X_FORWARDED_FOR') if forwarded: route = [a.strip() for a in forwarded.split(',')] elif "REMOTE_ADDR" in environ: route = [environ["REMOTE_ADDR"]] except Exception as e: logger.exception(e) else: if route is not None: environ['ACCESS_ROUTE'] = route environ['CLIENT_ADDR'] = route[-1] if 'HTTP_X_DEBUG' in environ: if config.is_trusted_addr(environ.get('CLIENT_ADDR')): Context.set_debug() else: logger.warning('X-Debug header set but not trusted IP address', extra={ "access_route": ','.join(environ.get('ACCESS_ROUTE')), "x_debug": environ['HTTP_X_DEBUG'], }) set_deadline = False header_deadline = environ.get('HTTP_X_REQUEST_DEADLINE') if header_deadline: try: deadline = datetime.strptime( header_deadline, "%Y-%m-%dT%H:%M:%S.%fZ", ) except ValueError: pass else: # set deadline directly # TODO: validate deadline is in future? Context.current.deadline = datetime_to_timestamp(deadline) set_deadline = True if not set_deadline and config.request_timeout is not None: Context.current.set_deadline(config.request_timeout) # create the response container request = TaliskerWSGIRequest(environ, start_response, self.headers) # track in-flight requests if rid in REQUESTS: logger.warning('duplicate request id received by gunicorn worker', extra={'request_id': rid}) else: REQUESTS[rid] = request try: response_iter = self.app(environ, request.start_response) except Exception as e: # store details for later request.exc_info = sys.exc_info() if isinstance(e, RequestTimeout): request.timedout = True # switch to generating an error response response_iter = request.error(request.exc_info) except SystemExit as e: if e.code != 0: request.exc_info = sys.exc_info() request.finish_request() raise return request.wrap_response(response_iter)
def test_extra_logging(): Context.new() with logs.extra_logging({'a': 1}): assert logs.logging_context.flat == {'a': 1}
def test_make_record_no_extra(): Context.new() logger = logs.StructuredLogger('test') record = logger.makeRecord(*record_args()) assert record._structured == {}
def test_set_logging_context(): Context.new() logs.set_logging_context(a=1) assert logs.logging_context.flat == {'a': 1}