async def make_app(): app = web.Application() app.router.add_get("/api/v1/data", handler) app.router.add_get("/", handler) endpoint = az.create_endpoint("service_a", ipv4=host, port=port) tracer = await az.create(zipkin_address, endpoint, sample_rate=1.0) trace_config = az.make_trace_config(tracer) session = aiohttp.ClientSession(trace_configs=[trace_config]) app["session"] = session async def close_session(app): await app["session"].close() app.on_cleanup.append(close_session) az.setup(app, tracer) TEMPLATES_ROOT = pathlib.Path(__file__).parent / "templates" aiohttp_jinja2.setup( app, loader=jinja2.FileSystemLoader(str(TEMPLATES_ROOT)) ) return app
async def test_send_full_batch(fake_zipkin, loop): endpoint = az.create_endpoint("simple_service", ipv4="127.0.0.1", port=80) tr = azt.ZipkinTransport( fake_zipkin.url, send_interval=60, send_max_size=2, send_timeout=ClientTimeout(total=1), ) tracer = await az.create_custom(endpoint, tr) waiter = fake_zipkin.wait_data(1) with tracer.new_trace(sampled=True) as span: span.name("root_span") span.kind(az.CLIENT) await asyncio.sleep(1) data = fake_zipkin.get_received_data() assert len(data) == 0 with tracer.new_trace(sampled=True) as span: span.name("root_span") span.kind(az.CLIENT) # batch is full here async with timeout(1): await waiter data = fake_zipkin.get_received_data() assert len(data) == 1 # close forced sending data to server regardless of send interval await tracer.close()
async def test_batches(fake_zipkin, loop): endpoint = az.create_endpoint("simple_service", ipv4="127.0.0.1", port=80) tr = azt.ZipkinTransport( fake_zipkin.url, send_interval=0.01, send_max_size=2, send_timeout=ClientTimeout(total=1), ) tracer = await az.create_custom(endpoint, tr) with tracer.new_trace(sampled=True) as span: span.name("root_span") span.kind(az.CLIENT) with span.new_child("child_1", az.CLIENT): pass with span.new_child("child_2", az.CLIENT): pass # close forced sending data to server regardless of send interval await tracer.close() data = fake_zipkin.get_received_data() trace_id = hexify(span.context.trace_id) assert len(data[0]) == 2 assert len(data[1]) == 1 assert data[0][0]["name"] == "child_1" assert data[0][1]["name"] == "child_2" assert data[1][0]["name"] == "root_span" assert any(s["traceId"] == trace_id for trace in data for s in trace), data
async def test_retry(fake_zipkin, loop): endpoint = az.create_endpoint("simple_service", ipv4="127.0.0.1", port=80) tr = azt.ZipkinTransport( fake_zipkin.url, send_interval=0.01, send_max_size=100, send_attempt_count=3, send_timeout=ClientTimeout(total=1), ) fake_zipkin.next_errors.append("disconnect") fake_zipkin.next_errors.append("timeout") waiter = fake_zipkin.wait_data(1) tracer = await az.create_custom(endpoint, tr) with tracer.new_trace(sampled=True) as span: span.name("root_span") span.kind(az.CLIENT) async with timeout(10): await waiter await tracer.close() data = fake_zipkin.get_received_data() trace_id = hexify(span.context.trace_id) assert any(s["traceId"] == trace_id for trace in data for s in trace), data
async def test_lost_spans(fake_zipkin, loop): endpoint = az.create_endpoint("simple_service", ipv4="127.0.0.1", port=80) tr = azt.ZipkinTransport( fake_zipkin.url, send_interval=0.01, send_max_size=100, send_attempt_count=2, send_timeout=ClientTimeout(total=1), ) fake_zipkin.next_errors.append("disconnect") fake_zipkin.next_errors.append("disconnect") tracer = await az.create_custom(endpoint, tr) with tracer.new_trace(sampled=True) as span: span.name("root_span") span.kind(az.CLIENT) await asyncio.sleep(1) await tracer.close() data = fake_zipkin.get_received_data() assert len(data) == 0
async def make_app(): app = web.Application() app.router.add_get("/api/v1/data", handler) zipkin_address = "http://127.0.0.1:9411/api/v2/spans" endpoint = az.create_endpoint("service_c", ipv4=host, port=port) tracer = await az.create(zipkin_address, endpoint, sample_rate=1.0) az.setup(app, tracer) return app
async def make_app(host, port): app = web.Application() app.router.add_post("/consume", handler) aiojobs.aiohttp.setup(app) zipkin_address = "http://127.0.0.1:9411/api/v2/spans" endpoint = az.create_endpoint("backend_broker", ipv4=host, port=port) tracer = await az.create(zipkin_address, endpoint, sample_rate=1.0) az.setup(app, tracer) return app
async def make_app(host, port): app = web.Application() app.router.add_get("/", index) session = aiohttp.ClientSession() app["session"] = session zipkin_address = "http://127.0.0.1:9411/api/v2/spans" endpoint = az.create_endpoint("frontend", ipv4=host, port=port) tracer = await az.create(zipkin_address, endpoint, sample_rate=1.0) az.setup(app, tracer) return app
async def make_app(host, port): app = web.Application() app.router.add_get("/", handle) # here we aquire reference to route, so later we can command # aiojaeger not to trace it skip_route = app.router.add_get("/status", not_traced_handle) endpoint = az.create_endpoint("aiohttp_server", ipv4=host, port=port) zipkin_address = "http://127.0.0.1:9411/api/v2/spans" tracer = await az.create(zipkin_address, endpoint, sample_rate=1.0) az.setup(app, tracer, skip_routes=[skip_route]) return app
def __init__( self, address: str, service_name: str, host: str, port: int, sample_rate: float = 1.0, ): self._address = address self._endpoint = aj.create_endpoint(service_name, ipv6=host, port=port) self._tracer: Optional[aj.Tracer] = None self._sample_rate = sample_rate self.trace_config: Optional[TraceConfig] = None
async def test_basic_context_manager(zipkin_url, client, loop): endpoint = az.create_endpoint("simple_service", ipv4="127.0.0.1", port=80) interval = 50 async with az.create_zipkin(zipkin_url, endpoint, sample_rate=1.0, send_interval=interval) as tracer: with tracer.new_trace(sampled=True) as span: span.name("root_span") await asyncio.sleep(0.1) trace_id = hexify(span.context.trace_id) url = URL(zipkin_url).with_path("/zipkin/api/v2/traces") data = await _retry_zipkin_client(url, client) assert any(s["traceId"] == trace_id for trace in data for s in trace), data
async def run(): # setup zipkin client zipkin_address = "http://127.0.0.1:9411/api/v2/spans" # address and name of current machine for better trace information endpoint = az.create_endpoint("minimal_example", ipv4="127.0.0.1") # creates tracer object that tracer all calls if you want sample # only 50% just set sample_rate=0.5 async with az.create(zipkin_address, endpoint, sample_rate=1.0) as tracer: # create and setup new trace with tracer.new_trace() as span: # here we just add name to the span for better search in UI span.name("root::span") # imitate long SQL query await asyncio.sleep(0.1) print("Done, check zipkin UI")
async def test_exception_in_span(zipkin_url, client, loop): endpoint = az.create_endpoint("error_service", ipv4="127.0.0.1", port=80) interval = 50 async with az.create_zipkin(zipkin_url, endpoint, send_interval=interval) as tracer: def func(span): with span: span.name("root_span") raise RuntimeError("foo") span = tracer.new_trace(sampled=True) with pytest.raises(RuntimeError): func(span) url = URL(zipkin_url).with_path("/zipkin/api/v2/traces") data = await _retry_zipkin_client(url, client) assert any({"error": "foo"} == s.get("tags", {}) for trace in data for s in trace)
async def make_app(): app = web.Application() app.router.add_get("/api/v1/data", handler) zipkin_address = "http://127.0.0.1:9411/api/v2/spans" endpoint = az.create_endpoint("service_b", ipv4=host, port=port) tracer = await az.create(zipkin_address, endpoint, sample_rate=1.0) az.setup(app, tracer) trace_config = az.make_trace_config(tracer) session = aiohttp.ClientSession(trace_configs=[trace_config]) app["session"] = session async def close_session(app): await app["session"].close() app.on_cleanup.append(close_session) return app
async def test_leak_in_transport(zipkin_url, client, loop): tracemalloc.start() endpoint = az.create_endpoint("simple_service") tracer = await az.create_zipkin(zipkin_url, endpoint, sample_rate=1, send_interval=0.0001) await asyncio.sleep(1) gc.collect() snapshot1 = tracemalloc.take_snapshot() await asyncio.sleep(2) gc.collect() snapshot2 = tracemalloc.take_snapshot() top_stats = snapshot2.compare_to(snapshot1, "lineno") count = sum(s.count for s in top_stats) await tracer.close() assert count < 400 # in case of leak this number is around 901452
async def test_basic(jaeger_server, jaeger_url, jaeger_api_url, client): endpoint = az.create_endpoint("simple_service", ipv4="127.0.0.1", port=80) tracer = await az.create_jaeger(jaeger_url, endpoint) with tracer.new_trace(sampled=True) as span: span.name("jaeger_span") span.tag("span_type", "root") span.kind(az.CLIENT) span.annotate("SELECT * FROM") await asyncio.sleep(0.1) span.annotate("start end sql") # close forced sending data to server regardless of send interval await tracer.close() trace_id = hexify(span.context.trace_id) url = URL(jaeger_api_url) / "api" / "traces" / trace_id resp = await client.get(url, headers={"Content-Type": "application/json"}) assert resp.status == 200 data = await resp.json() assert data["data"][0]["traceID"] == trace_id
async def test_zipkin_error(client, loop, caplog): endpoint = az.create_endpoint("error_service", ipv4="127.0.0.1", port=80) interval = 50 zipkin_url = "https://httpbin.org/status/404" async with az.create_zipkin( zipkin_url, endpoint, sample_rate=1.0, send_interval=interval, ) as tracer: with tracer.new_trace(sampled=True) as span: span.kind(az.CLIENT) await asyncio.sleep(0.0) assert len(caplog.records) == 1 msg = "zipkin responded with code: " assert msg in str(caplog.records[0].exc_info) t = ("aiojaeger.transport", logging.ERROR, "Can not send spans to zipkin") assert caplog.record_tuples == [t]
async def run(): # setup zipkin client zipkin_address = "http://127.0.0.1:9411/api/v2/spans" endpoint = az.create_endpoint("simple_service", ipv4="127.0.0.1", port=8080) # creates tracer object that traces all calls, if you want sample # only 50% just set sample_rate=0.5 tracer = await az.create(zipkin_address, endpoint, sample_rate=1.0) # create and setup new trace with tracer.new_trace(sampled=True) as span: span.name("root_span") span.tag("span_type", "root") span.kind(az.CLIENT) span.annotate("SELECT * FROM") # imitate long SQL query await asyncio.sleep(0.1) span.annotate("start end sql") # create child span with tracer.new_child(span.context) as nested_span: nested_span.name("nested_span_1") nested_span.kind(az.CLIENT) nested_span.tag("span_type", "inner1") nested_span.remote_endpoint("remote_service_1") await asyncio.sleep(0.01) # create other child span with tracer.new_child(span.context) as nested_span: nested_span.name("nested_span_2") nested_span.kind(az.CLIENT) nested_span.remote_endpoint("remote_service_2") nested_span.tag("span_type", "inner2") await asyncio.sleep(0.01) await tracer.close() print("-" * 100) print("Check zipkin UI for produced traces: http://localhost:9411/zipkin")
async def test_basic(zipkin_url, client, loop): endpoint = az.create_endpoint("simple_service", ipv4="127.0.0.1", port=80) interval = 50 tracer = await az.create_zipkin(zipkin_url, endpoint, sample_rate=1.0, send_interval=interval) with tracer.new_trace(sampled=True) as span: span.name("root_span") span.tag("span_type", "root") span.kind(az.CLIENT) span.annotate("SELECT * FROM") await asyncio.sleep(0.1) span.annotate("start end sql") # close forced sending data to server regardless of send interval await tracer.close() trace_id = hexify(span.context.trace_id) url = URL(zipkin_url).with_path("/zipkin/api/v2/traces") data = await _retry_zipkin_client(url, client) assert any(s["traceId"] == trace_id for trace in data for s in trace), data