def prepare_response(self, span, downstream): observed_span = get_observed_span(span) trace_response = TraceResponse(span=observed_span, notImplementedError='') if downstream: with request_context.span_in_stack_context(span): future = self.call_downstream(span, downstream) downstream_trace_resp = yield future trace_response.downstream = downstream_trace_resp raise tornado.gen.Return(trace_response)
def run_coroutine_with_span(span, coro, *args, **kwargs): """Wrap the execution of a Tornado coroutine func in a tracing span. This makes the span available through the get_current_span() function. :param span: The tracing span to expose. :param coro: Co-routine to execute in the scope of tracing span. :param args: Positional args to func, if any. :param kwargs: Keyword args to func, if any. """ with span_in_stack_context(span): return coro(*args, **kwargs)
def dispatch_wrapper(self, incoming): message = incoming.message method = message.get('method') namespace = message.get('namespace') log.debug("dispatch target method: {}, namespace: {}".format( method, namespace)) span = before_dispatcher(incoming) # Load span to local threading request context with span_in_stack_context(span): ret = _RPCDispatcher_dispatch(self, incoming) log.debug("Dispatch done") span.finish() return ret
def test__request_context_is_thread_safe(tracer): """ Port of Uber's internal tornado-extras (by @sema). This test illustrates that the default Tornado's StackContext is not thread-safe. The test can be made to fail by commenting out these lines in the ThreadSafeStackContext constructor: if hasattr(self, 'contexts'): # only patch if context exists self.contexts = LocalContexts() """ num_iterations = 1000 num_workers = 10 exception = [0] def async_task(): time.sleep(0.001) assert get_current_span() is not None class Worker(Thread): def __init__(self, fn): super(Worker, self).__init__() self.fn = fn def run(self): try: for _ in range(0, num_iterations): self.fn() except Exception as e: exception[0] = e raise with span_in_stack_context(span='span'): workers = [] for i in range(0, num_workers): worker = Worker(wrap(async_task)) workers.append(worker) for worker in workers: worker.start() for worker in workers: worker.join() if exception[0]: raise exception[0]
def __call__(self, request, handler, next_mw): # TODO find out if the route can be read from handler self._initializer.initialize_tracer() request_wrapper = http_server.TornadoRequestWrapper(request=request) span = http_server.before_request(request=request_wrapper) span.set_operation_name("{0}: {1}".format(request.method, handler.__class__.__name__)) try: with request_context.span_in_stack_context(span=span): next_mw_future = next_mw() # cannot yield inside StackContext yield next_mw_future except Exception as e: span.set_tag('error', True) span.log_event(event='error', payload=e) raise finally: span.finish()
def post(self): span = self._get_span() if span: try: with span_in_stack_context(span): res = self.client_channel.json( service='handler2', hostport=self.request.body, endpoint="endpoint2", ) res = yield res body = res.body except Exception as e: traceback.print_exc() self.write('ERROR: %s' % e) self.set_status(200) return else: self.write(body) self.set_status(200) finally: span.finish()