def test_exception_tracking(self): servicer = MagicMock(test_service_pb2_grpc.TestServiceServicer) servicer.UnaryUnary = lambda x, _: time.sleep(10) count_labels = { "client_name": "foo", "server_name": "bar", "service": "eagr_TestService", "endpoint": "UnaryUnary", } exception_labels = dict(count_labels, exception="TimeoutError") with inprocess_grpc_server( servicer, test_service_pb2_grpc. add_TestServiceServicer_to_server) as address: set_global_tracer(global_tracer()) client = make_grpc_client("foo", "bar", address, test_service_pb2_grpc.TestServiceStub) calls_before = (REGISTRY.get_sample_value( "clientside_grpc_endpoint_count", labels=count_labels) or 0) exceptions_before = (REGISTRY.get_sample_value( "clientside_grpc_endpoint_error_total", labels=exception_labels) or 0) with self.assertRaises(TimeoutError): client.UnaryUnary(StringValue(value="foo"), timeout=1) calls_after = REGISTRY.get_sample_value( "clientside_grpc_endpoint_count", labels=count_labels) exceptions_after = REGISTRY.get_sample_value( "clientside_grpc_endpoint_error_total", labels=exception_labels) self.assertEqual(1, calls_after - calls_before) print(exceptions_after, exceptions_before) # noqa self.assertEqual(1, exceptions_after - exceptions_before)
def test_call_metrics(self): servicer = MagicMock(test_service_pb2_grpc.TestServiceServicer) servicer.UnaryUnary = lambda x, _: x with inprocess_grpc_server( servicer, test_service_pb2_grpc. add_TestServiceServicer_to_server) as address: set_global_tracer(global_tracer()) client = make_grpc_client("foo", "bar", address, test_service_pb2_grpc.TestServiceStub) unary_value = StringValue(value="foo") self.assertEqual(unary_value, client.UnaryUnary(unary_value)) labels = { "client_name": "foo", "server_name": "bar", "service": "eagr_TestService", "endpoint": "UnaryUnary", } call_count = REGISTRY.get_sample_value( "clientside_grpc_endpoint_count", labels=labels) self.assertEqual(1, call_count) # Try that again to see if the number changes self.assertEqual(unary_value, client.UnaryUnary(unary_value)) call_count = REGISTRY.get_sample_value( "clientside_grpc_endpoint_count", labels=labels) self.assertEqual(2, call_count)
async def main(): tracer = init_jaeger_tracer() opentracing.set_global_tracer(tracer) context = RootContext() Serialization().register_file_descriptor(DESCRIPTOR) Remote().start("127.0.0.1", 8000) clients = [] async def process_message(ctx: AbstractContext): msg = ctx.message if isinstance(msg, Connect): print(f'Client {msg.sender} connected') clients.append(msg.sender) await ctx.send(msg.sender, Connected(message='Welcome!')) elif isinstance(msg, SayRequest): for client in clients: await ctx.send(client, SayResponse(user_name=msg.user_name, message=msg.message)) elif isinstance(msg, NickRequest): for client in clients: await ctx.send(client, NickResponse(old_user_name=msg.old_user_name, new_user_name=msg.new_user_name)) props = OpenTracingFactory.get_props_with_open_tracing(Props.from_func(process_message), span_setup, span_setup) context.spawn_named(props, 'chatserver') input()
def test_set_global_tracer(mock_obj): tracer = mock.Mock() opentracing.set_global_tracer(tracer) assert opentracing.global_tracer() is tracer opentracing.set_global_tracer(mock_obj) assert opentracing.global_tracer() is mock_obj
def init_tracer(hs: "HomeServer"): """Set the whitelists and initialise the JaegerClient tracer""" global opentracing if not hs.config.opentracer_enabled: # We don't have a tracer opentracing = None return if not opentracing or not JaegerConfig: raise ConfigError( "The server has been configured to use opentracing but opentracing is not " "installed.") # Pull out the jaeger config if it was given. Otherwise set it to something sensible. # See https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/config.py set_homeserver_whitelist(hs.config.opentracer_whitelist) from jaeger_client.metrics.prometheus import PrometheusMetricsFactory config = JaegerConfig( config=hs.config.jaeger_config, service_name=f"{hs.config.server_name} {hs.get_instance_name()}", scope_manager=LogContextScopeManager(hs.config), metrics_factory=PrometheusMetricsFactory(), ) # If we have the rust jaeger reporter available let's use that. if RustReporter: logger.info("Using rust_python_jaeger_reporter library") tracer = config.create_tracer(RustReporter(), config.sampler) opentracing.set_global_tracer(tracer) else: config.initialize_tracer()
async def start(argv): tracer = init_jaeger_tracer() opentracing.set_global_tracer(tracer) middleware = open_tracing_middleware.open_tracing_sender_middleware(tracer) Serialization().register_file_descriptor(DESCRIPTOR) Remote().start("127.0.0.1", 12001) server = PID(address='127.0.0.1:8000', id='chatserver') context = RootContext(MessageHeader(), [middleware]) props = OpenTracingFactory.get_props_with_open_tracing(Props.from_func(process_message), span_setup, span_setup, tracer) client = context.spawn(props) await context.send(server, Connect(sender=client)) nick = 'Alex' while True: text = input() if text == '/exit': return elif text.startswith('/nick '): new_nick = text.split(' ')[1] await context.send(server, NickRequest(old_user_name=nick, new_user_name=new_nick)) nick = new_nick else: await context.send(server, SayRequest(user_name=nick, message=text))
def initialize_tracer(tracer=None): """ Initialize dummy tracer: to be replaced by actual implementation :return: """ tracer = tracer or Tracer(get_service_name()) set_global_tracer(tracer)
def init_tracer(config: dict): config = jaeger_client.Config( config=config, validate=True, scope_manager=contextvars.ContextVarsScopeManager(), service_name=config["service_name"], ) global_tracer = config.new_tracer() opentracing.set_global_tracer(global_tracer) atexit.register(close_tracer)
def setup(app: FastAPI, tracer) -> None: """ Setup tracing: register a middleware and set global tracer instance @param app: @param tracer: @return: """ set_global_tracer(tracer) app.add_middleware(StarletteTracingMiddleWare, tracer=tracer)
def setup_tracer(): global recorder recorder = LoggerRecorder() # instantiate a haystack tracer for this service and set a common tag which applies to all traces tracer = HaystackTracer("Service-A", recorder, common_tags={"app.version": "1234"}) # now set the global tracer, so we can reference it with opentracing.tracer anywhere in our app opentracing.set_global_tracer(tracer)
def test_set_global_tracer(): tracer = mock.Mock() opentracing.set_global_tracer(tracer) assert opentracing.global_tracer() is tracer assert opentracing.is_global_tracer_registered() # Register another value. tracer = mock.Mock() opentracing.set_global_tracer(tracer) assert opentracing.global_tracer() is tracer assert opentracing.is_global_tracer_registered()
def tracer(): tracer = Tracer(service_name='test-tracer', sampler=ConstSampler(True), reporter=InMemoryReporter(), scope_manager=TornadoScopeManager()) opentracing.set_global_tracer(tracer) try: yield tracer finally: opentracing._reset_global_tracer() tracer.close()
def test_span_tags(encoding, operation, tracer, thrift_service): server = TChannel('server', tracer=tracer) server.listen() def get_span_baggage(): sp = server.context_provider.get_current_span() baggage = sp.get_baggage_item('bender') if sp else None return {'bender': baggage} @server.json.register('foo') def handler(_): return get_span_baggage() @server.thrift.register(thrift_service.X, method='thrift2') def thrift2(_): return json.dumps(get_span_baggage()) client = TChannel('client', tracer=tracer, trace=True) opentracing.set_global_tracer(tracer) span = tracer.start_span('root') span.set_baggage_item('bender', 'is great') with span: res = None with client.context_provider.span_in_context(span): if encoding == 'json': res = client.json( service='test-service', # match thrift_service name endpoint='foo', body={}, hostport=server.hostport, ) elif encoding == 'thrift': res = client.thrift( thrift_service.X.thrift2(), hostport=server.hostport, ) else: raise ValueError('Unknown encoding %s' % encoding) res = yield res # cannot yield in StackContext res = res.body if isinstance(res, six.string_types): res = json.loads(res) assert res == {'bender': 'is great'} for i in range(1000): spans = tracer.reporter.get_spans() if len(spans) == 3: break yield tornado.gen.sleep(0.001) # yield execution and sleep for 1ms spans = tracer.reporter.get_spans() assert len(spans) == 3 trace_ids = set([s.trace_id for s in spans]) assert 1 == len(trace_ids), \ 'all spans must have the same trace_id: %s' % trace_ids
def main() -> None: asyncio.set_event_loop(uvloop.new_event_loop()) http_session = Client() bakery_client = bakery.HttpClient("http://localhost:8000/bakery", client=http_session) bakery_service = bakery.Service() staff_room = staffroom.service.Service() shopfront_service = shopfront.service.Service(bakery=bakery_client, staff_room=staff_room) unknotter_service = unknotter.service.Service() exporter = unknotter.exporter.Exporter(unknotter=unknotter_service) set_exporter(exporter) tracer = Tracer() opentracing.set_global_tracer(tracer) bakery_blueprint = bakery.blueprint.factory(bakery=bakery_service) shopfront_blueprint = shopfront.blueprint.factory( shopfront=shopfront_service) shopfront_blueprint = shopfront.blueprint.factory( shopfront=shopfront_service) unknotter_blueprint = unknotter.blueprint.factory( unknotter=unknotter_service) app = Sanic() app.blueprint(bakery_blueprint, url_prefix="/bakery") app.blueprint(shopfront_blueprint, url_prefix="/shopfront") app.blueprint(unknotter_blueprint, url_prefix="/unknotter") server = app.create_server(host="127.0.0.1", port=8000, return_asyncio_server=True, debug=True) _ = asyncio.ensure_future(server) loop = asyncio.get_event_loop() signal(SIGINT, lambda s, f: loop.stop()) try: loop.run_forever() except Exception: loop.stop()
def _init_tracer(self): service_name = '' try: value = getattr(settings, 'SERVICE_CONF', '') if value: service_name = value.get('NAME', '') except Exception as e: logger.error('SERVICE_CONF:NAME is not set in settings. {}'.format(str(e))) # create a global tracer first tracer = Tracer( one_span_per_rpc=True, service_name=service_name, reporter=NullReporter(), sampler=ConstSampler(decision=True), extra_codecs={Format.HTTP_HEADERS: B3Codec()} ) opentracing.set_global_tracer(tracer)
async def main(): tracer = init_jaeger_tracer() opentracing.set_global_tracer(tracer) GlobalEventStream.subscribe(process_dead_letter_event, DeadLetterEvent) context = RootContext(middleware=[open_tracing_sender_middleware()]) props = Props.from_producer(lambda: ChildActor()) props = OpenTracingFactory.get_props_with_open_tracing(props) actor = context.spawn(props) await context.send(actor, Hello(who="Alex")) await asyncio.sleep(1) await GlobalRootContext.stop_future(actor) input()
def run(port): configure_logging() # define tracer tracer = get_tracer('api') opentracing.set_global_tracer(tracer) # define grpc server server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) server = intercept_grpc_server(server, opentracing.global_tracer()) # add grpc implementation to server register(BankingServiceImpl(), server) # set port server.add_insecure_port(f'[::]:{port}') # start server.start() logging.info(f"started server, {port}") ServerHelper().await_termination() logging.info("killing server") time.sleep(2) tracer.close() time.sleep(2)
def tracer(): reporter = InMemoryReporter() report_func = reporter.report_span def log_and_report(span): print(('Reporting span %s' % span)) print(('Span type %s' % type(span))) print(('SpanContext type %s' % type(span.context))) report_func(span) reporter.report_span = log_and_report tracer = Tracer( service_name='test-tracer', sampler=ConstSampler(True), reporter=reporter, scope_manager=TornadoScopeManager() ) opentracing.set_global_tracer(tracer) try: yield tracer finally: opentracing._reset_global_tracer() tracer.close()
context, it's not reliable to send requests via the AsyncHttpRecorder. If the function is not time-sensitive in reply or is async, SyncHttpRecorder is a good fit as shown below. If the function cannot afford to dispatch the span in-process, then it is recommended to either setup a haystack agent in the network and utilize HaystackAgentRecorder or offload the span record dispatching via Queue -> Worker model. In AWS this could mean implementing a SQSRecorder which puts the finished span onto a SQS queue. The queue could then notify a lambda implementing SyncHttpRecorder to dispatch the records. """ recorder = SyncHttpRecorder(os.env["COLLECTOR_URL"]) # suppose it is desired to tag all traces with the application version common_tags = {"svc_ver": os["APP_VERSION"]} tracer = HaystackTracer("example-service", recorder, common_tags=common_tags) opentracing.set_global_tracer(tracer) def invoke_downstream(headers): return "done" def process_downstream_response(response): return "done" def handler(event, context): # extract the span context from headers if this is a downstream service parent_ctx = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, event)
def tracer(asyncio_scope_manager): new_tracer = MockTracer(scope_manager=asyncio_scope_manager) tracer = opentracing.global_tracer() opentracing.set_global_tracer(new_tracer) yield new_tracer opentracing.set_global_tracer(tracer)
def test_register_none(): with pytest.raises(ValueError): opentracing.set_global_tracer(None)
def _initialize_global_tracer(self, tracer): opentracing.set_global_tracer(tracer) logger.info('opentracing.tracer initialized to %s[app_name=%s]', tracer, self.service_name)
r.mset({"length": _random_string(length)}) return str(r.get("length")) @app.route("/pymongo/<length>") def pymongo_integration(length): with global_tracer().start_active_span("server pymongo operation"): client = MongoClient("mongo", 27017, serverSelectionTimeoutMS=2000) db = client["opentelemetry-tests"] collection = db["tests"] collection.find_one() return _random_string(length) @app.route("/sqlalchemy/<length>") def sqlalchemy_integration(length): with global_tracer().start_active_span("server sqlalchemy operation"): # Create an engine that stores data in the local directory's # sqlalchemy_example.db file. engine = create_engine("sqlite:///sqlalchemy_example.db") # Create all tables in the engine. This is equivalent to "Create Table" # statements in raw SQL. Base.metadata.create_all(engine) return str(_random_string(length)) if __name__ == "__main__": set_global_tracer(shim) app.run(host="0.0.0.0")