def __attrs_post_init__(self): r = self._recorder = recorder.Recorder( max_events={ # Allow to store up to 10 threads for 60 seconds at 100 Hz stack.StackSampleEvent: 10 * 60 * 100, stack.StackExceptionSampleEvent: 10 * 60 * 100, # (default buffer size / interval) * export interval memalloc.MemoryAllocSampleEvent: int((memalloc.MemoryCollector._DEFAULT_MAX_EVENTS / memalloc.MemoryCollector._DEFAULT_INTERVAL) * 60), # Do not limit the heap sample size as the number of events is relative to allocated memory anyway memalloc.MemoryHeapSampleEvent: None, }, default_max_events=int( os.environ.get("DD_PROFILING_MAX_EVENTS", recorder.Recorder._DEFAULT_MAX_EVENTS)), ) self._collectors = [ stack.StackCollector(r, tracer=self.tracer), memalloc.MemoryCollector(r), threading.LockCollector(r, tracer=self.tracer), ] exporters = self._build_default_exporters() if exporters: self._scheduler = scheduler.Scheduler( recorder=r, exporters=exporters, before_flush=self._collectors_snapshot)
def __attrs_post_init__(self): r = recorder.Recorder( max_events={ # Allow to store up to 10 threads for 60 seconds at 100 Hz stack.StackSampleEvent: 10 * 60 * 100, stack.StackExceptionSampleEvent: 10 * 60 * 100, # This can generate one event every 0.1s if 100% are taken — though we take 5% by default. # = (60 seconds / 0.1 seconds) memory.MemorySampleEvent: int(60 / 0.1), # (default buffer size / interval) * export interval memalloc.MemoryAllocSampleEvent: int((64 / 0.5) * 60), }, default_max_events=int(os.environ.get("DD_PROFILING_MAX_EVENTS", recorder.Recorder._DEFAULT_MAX_EVENTS)), ) if formats.asbool(os.environ.get("DD_PROFILING_MEMALLOC", "false")): mem_collector = memalloc.MemoryCollector(r) else: mem_collector = memory.MemoryCollector(r) self._collectors = [ stack.StackCollector(r, tracer=self.tracer), mem_collector, exceptions.UncaughtExceptionCollector(r), threading.LockCollector(r), ] exporters = self._build_default_exporters(self.service, self.env, self.version) if exporters: self._scheduler = scheduler.Scheduler(recorder=r, exporters=exporters)
def test_resource_not_collected(monkeypatch, tracer): monkeypatch.setenv("DD_PROFILING_ENDPOINT_COLLECTION_ENABLED", "false") resource = str(uuid.uuid4()) span_type = str(uuid.uuid4()) r = recorder.Recorder() with collector_threading.LockCollector(r, tracer=tracer, capture_pct=100): lock = threading.Lock() lock.acquire() with tracer.trace("test", resource=resource, span_type=span_type) as t: lock2 = threading.Lock() lock2.acquire() lock.release() trace_id = t.trace_id span_id = t.span_id lock2.release() events = r.reset() # The tracer might use locks, so we need to look into every event to assert we got ours for event_type in (collector_threading.LockAcquireEvent, collector_threading.LockReleaseEvent): assert {"test_threading.py:149", "test_threading.py:152" }.issubset({e.lock_name for e in events[event_type]}) for event in events[event_type]: if event.name == "test_threading.py:149": assert event.trace_id is None assert event.span_id is None assert event.trace_resource_container is None assert event.trace_type is None elif event.name == "test_threading.py:152": assert event.trace_id == trace_id assert event.span_id == span_id assert event.trace_resource_container is None assert event.trace_type == t.span_type
def test_lock_events_tracer(tracer): r = recorder.Recorder() with collector_threading.LockCollector(r, tracer=tracer, capture_pct=100): lock = threading.Lock() lock.acquire() with tracer.trace("test") as t: lock2 = threading.Lock() lock2.acquire() lock.release() trace_id = t.trace_id span_id = t.span_id lock2.release() assert len(r.events[collector_threading.LockAcquireEvent]) == 2 assert len(r.events[collector_threading.LockReleaseEvent]) == 2 lock_event_1 = r.events[collector_threading.LockAcquireEvent][0] assert lock_event_1.trace_ids is None assert lock_event_1.span_ids is None lock_event_2 = r.events[collector_threading.LockAcquireEvent][1] assert lock_event_2.trace_ids == {trace_id} assert lock_event_2.span_ids == {span_id} lock_release_1 = r.events[collector_threading.LockReleaseEvent][0] assert lock_release_1.trace_ids == {trace_id} assert lock_release_1.span_ids == {span_id} lock_release_2 = r.events[collector_threading.LockReleaseEvent][1] assert lock_release_2.trace_ids is None assert lock_release_2.span_ids is None
def test_lock_events_tracer(tracer): resource = str(uuid.uuid4()) span_type = str(uuid.uuid4()) r = recorder.Recorder() with collector_threading.LockCollector(r, tracer=tracer, capture_pct=100): lock = threading.Lock() lock.acquire() with tracer.trace("test", resource=resource, span_type=span_type) as t: lock2 = threading.Lock() lock2.acquire() lock.release() trace_id = t.trace_id span_id = t.span_id lock2.release() events = r.reset() # The tracer might use locks, so we need to look into every event to assert we got ours for event_type in (collector_threading.LockAcquireEvent, collector_threading.LockReleaseEvent): assert {"test_threading.py:84", "test_threading.py:87" }.issubset({e.lock_name for e in events[event_type]}) for event in events[event_type]: if event.name == "test_threading.py:84": assert event.trace_id is None assert event.span_id is None assert event.trace_resource_container is None assert event.trace_type is None elif event.name == "test_threading.py:87": assert event.trace_id == trace_id assert event.span_id == span_id assert event.trace_resource_container[0] == t.resource assert event.trace_type == t.span_type
def test_lock_events_tracer(tracer): r = recorder.Recorder() with collector_threading.LockCollector(r, tracer=tracer, capture_pct=100): lock = threading.Lock() lock.acquire() with tracer.trace("test") as t: lock2 = threading.Lock() lock2.acquire() lock.release() trace_id = t.trace_id span_id = t.span_id lock2.release() events = r.reset() # The tracer might use locks, so we need to look into every event to assert we got ours for event_type in (collector_threading.LockAcquireEvent, collector_threading.LockReleaseEvent): assert {"test_threading.py:76", "test_threading.py:79" }.issubset({e.lock_name for e in events[event_type]}) for event in events[event_type]: if event.name == "test_threading.py:76": assert event.trace_ids is None assert event.span_ids is None elif event.name == "test_threading.py:79": assert event.trace_ids == {trace_id} assert event.span_ids == {span_id}
def _build_default_collectors(): r = recorder.Recorder() return [ stack.StackCollector(r), memory.MemoryCollector(r), exceptions.UncaughtExceptionCollector(r), threading.LockCollector(r), ]
def test_patch(): r = recorder.Recorder() lock = threading.Lock collector = collector_threading.LockCollector(r) collector.start() assert lock == collector.original # wrapt makes this true assert lock == threading.Lock collector.stop() assert lock == threading.Lock assert collector.original == threading.Lock
def test_lock_gevent_tasks(): r = recorder.Recorder() def play_with_lock(): lock = threading.Lock() lock.acquire() lock.release() with collector_threading.LockCollector(r, capture_pct=100): t = threading.Thread(name="foobar", target=play_with_lock) t.start() t.join() assert len(r.events[collector_threading.LockAcquireEvent]) >= 1 assert len(r.events[collector_threading.LockReleaseEvent]) >= 1 for event in r.events[collector_threading.LockAcquireEvent]: if event.lock_name == "test_threading.py:199": assert event.thread_id == nogevent.main_thread_id assert event.wait_time_ns >= 0 assert event.task_id == t.ident assert event.task_name == "foobar" # It's called through pytest so I'm sure it's gonna be that long, right? assert len(event.frames) > 3 assert event.nframes > 3 assert event.frames[0] == (__file__, 200, "play_with_lock") assert event.sampling_pct == 100 assert event.task_id == t.ident assert event.task_name == "foobar" break else: pytest.fail("Lock event not found") for event in r.events[collector_threading.LockReleaseEvent]: if event.lock_name == "test_threading.py:199": assert event.thread_id == nogevent.main_thread_id assert event.locked_for_ns >= 0.1 assert event.task_id == t.ident assert event.task_name == "foobar" # It's called through pytest so I'm sure it's gonna be that long, right? assert len(event.frames) > 3 assert event.nframes > 3 assert event.frames[0] == (__file__, 201, "play_with_lock") assert event.sampling_pct == 100 assert event.task_id == t.ident assert event.task_name == "foobar" break else: pytest.fail("Lock event not found")
def test_lock_acquire_events(): r = recorder.Recorder() with collector_threading.LockCollector(r, capture_pct=100): lock = threading.Lock() lock.acquire() assert len(r.events[collector_threading.LockAcquireEvent]) == 1 assert len(r.events[collector_threading.LockReleaseEvent]) == 0 event = r.events[collector_threading.LockAcquireEvent][0] assert event.lock_name == "test_threading.py:59" assert event.thread_id == _thread.get_ident() assert event.wait_time_ns > 0 # It's called through pytest so I'm sure it's gonna be that long, right? assert len(event.frames) > 3 assert event.nframes > 3 assert event.frames[0] == (__file__, 60, "test_lock_acquire_events") assert event.sampling_pct == 100
def __attrs_post_init__(self): r = self._recorder = recorder.Recorder( max_events={ # Allow to store up to 10 threads for 60 seconds at 100 Hz stack.StackSampleEvent: 10 * 60 * 100, stack.StackExceptionSampleEvent: 10 * 60 * 100, # This can generate one event every 0.1s if 100% are taken — though we take 5% by default. # = (60 seconds / 0.1 seconds) memory.MemorySampleEvent: int(60 / 0.1), # (default buffer size / interval) * export interval memalloc.MemoryAllocSampleEvent: int((memalloc.MemoryCollector._DEFAULT_MAX_EVENTS / memalloc.MemoryCollector._DEFAULT_INTERVAL) * 60), # Do not limit the heap sample size as the number of events is relative to allocated memory anyway memalloc.MemoryHeapSampleEvent: None, }, default_max_events=int( os.environ.get("DD_PROFILING_MAX_EVENTS", recorder.Recorder._DEFAULT_MAX_EVENTS)), ) if formats.asbool(os.environ.get("DD_PROFILING_MEMALLOC", "true")): mem_collector = memalloc.MemoryCollector(r) else: mem_collector = memory.MemoryCollector(r) self._collectors = [ stack.StackCollector(r, tracer=self.tracer), mem_collector, threading.LockCollector(r, tracer=self.tracer), ] exporters = self._build_default_exporters(self.tracer, self.url, self.tags, self.service, self.env, self.version) if exporters: self._scheduler = scheduler.Scheduler( recorder=r, exporters=exporters, before_flush=self._collectors_snapshot)
def test_wrapper(): r = recorder.Recorder() collector = collector_threading.LockCollector(r) with collector: class Foobar(object): lock_class = threading.Lock def __init__(self): lock = self.lock_class() assert lock.acquire() lock.release() # Try to access the attribute lock = Foobar.lock_class() assert lock.acquire() lock.release() # Try this way too Foobar()
def _build_default_collectors(tracer): r = recorder.Recorder( max_events={ # Allow to store up to 10 threads for 60 seconds at 100 Hz stack.StackSampleEvent: 10 * 60 * 100, stack.StackExceptionSampleEvent: 10 * 60 * 100, # This can generate one event every 0.1s if 100% are taken — though we take 5% by default. # = (60 seconds / 0.1 seconds) memory.MemorySampleEvent: int(60 / 0.1), }, default_max_events=int( os.environ.get("DD_PROFILING_MAX_EVENTS", recorder.Recorder._DEFAULT_MAX_EVENTS)), ) return [ stack.StackCollector(r, tracer=tracer), memory.MemoryCollector(r), exceptions.UncaughtExceptionCollector(r), threading.LockCollector(r), ]
def test_lock_acquire_release_speed_patched(benchmark, pct): r = recorder.Recorder() with collector_threading.LockCollector(r, capture_pct=pct): benchmark(_lock_acquire_release, threading.Lock())
def test_lock_create_speed_patched(benchmark): r = recorder.Recorder() with collector_threading.LockCollector(r): benchmark(threading.Lock)