Ejemplo n.º 1
0
    def __attrs_post_init__(self):
        r = recorder.Recorder(
            max_events={
                # Allow to store up to 10 threads for 60 seconds at 100 Hz
                stack.StackSampleEvent: 10 * 60 * 100,
                stack.StackExceptionSampleEvent: 10 * 60 * 100,
                # This can generate one event every 0.1s if 100% are taken — though we take 5% by default.
                # = (60 seconds / 0.1 seconds)
                memory.MemorySampleEvent: int(60 / 0.1),
                # (default buffer size / interval) * export interval
                memalloc.MemoryAllocSampleEvent: int((64 / 0.5) * 60),
            },
            default_max_events=int(os.environ.get("DD_PROFILING_MAX_EVENTS", recorder.Recorder._DEFAULT_MAX_EVENTS)),
        )

        if formats.asbool(os.environ.get("DD_PROFILING_MEMALLOC", "false")):
            mem_collector = memalloc.MemoryCollector(r)
        else:
            mem_collector = memory.MemoryCollector(r)

        self._collectors = [
            stack.StackCollector(r, tracer=self.tracer),
            mem_collector,
            exceptions.UncaughtExceptionCollector(r),
            threading.LockCollector(r),
        ]

        exporters = self._build_default_exporters(self.service, self.env, self.version)

        if exporters:
            self._scheduler = scheduler.Scheduler(recorder=r, exporters=exporters)
def test_thread_name():
    r = recorder.Recorder()
    exp = exporter.NullExporter()
    s = scheduler.Scheduler(r, [exp])
    s.start()
    assert s._worker.name == "ddtrace.profiling.scheduler:Scheduler"
    s.stop()
Ejemplo n.º 3
0
    def __attrs_post_init__(self):
        r = self._recorder = recorder.Recorder(
            max_events={
                # Allow to store up to 10 threads for 60 seconds at 100 Hz
                stack.StackSampleEvent:
                10 * 60 * 100,
                stack.StackExceptionSampleEvent:
                10 * 60 * 100,
                # (default buffer size / interval) * export interval
                memalloc.MemoryAllocSampleEvent:
                int((memalloc.MemoryCollector._DEFAULT_MAX_EVENTS /
                     memalloc.MemoryCollector._DEFAULT_INTERVAL) * 60),
                # Do not limit the heap sample size as the number of events is relative to allocated memory anyway
                memalloc.MemoryHeapSampleEvent:
                None,
            },
            default_max_events=int(
                os.environ.get("DD_PROFILING_MAX_EVENTS",
                               recorder.Recorder._DEFAULT_MAX_EVENTS)),
        )

        self._collectors = [
            stack.StackCollector(r, tracer=self.tracer),
            memalloc.MemoryCollector(r),
            threading.LockCollector(r, tracer=self.tracer),
        ]

        exporters = self._build_default_exporters()

        if exporters:
            self._scheduler = scheduler.Scheduler(
                recorder=r,
                exporters=exporters,
                before_flush=self._collectors_snapshot)
Ejemplo n.º 4
0
    def __attrs_post_init__(self):
        if self.exporters is None:
            self.exporters = _build_default_exporters(self.service, self.env)

        if self.exporters:
            for rec in self.recorders:
                self.schedulers.append(scheduler.Scheduler(recorder=rec, exporters=self.exporters))
Ejemplo n.º 5
0
def test_before_flush():
    x = {}

    def call_me():
        x["OK"] = True

    r = recorder.Recorder()
    s = scheduler.Scheduler(r, [exporter.NullExporter()], before_flush=call_me)
    r.push_events([event.Event()] * 10)
    s.flush()
    assert x["OK"]
Ejemplo n.º 6
0
def test_before_flush_failure(caplog):
    def call_me():
        raise Exception("LOL")

    r = recorder.Recorder()
    s = scheduler.Scheduler(r, [exporter.NullExporter()], before_flush=call_me)
    r.push_events([event.Event()] * 10)
    s.flush()
    assert caplog.record_tuples == [
        (("ddtrace.profiling.scheduler", logging.ERROR, "Scheduler before_flush hook failed"))
    ]
Ejemplo n.º 7
0
    def __attrs_post_init__(self):
        if self.collectors is None:
            self.collectors = self._build_default_collectors(self.tracer)

        if self.exporters is None:
            self.exporters = _build_default_exporters(self.service, self.env,
                                                      self.version)

        if self.exporters:
            for rec in self.recorders:
                self._schedulers.append(
                    scheduler.Scheduler(recorder=rec,
                                        exporters=self.exporters))
Ejemplo n.º 8
0
    def __attrs_post_init__(self):
        r = self._recorder = recorder.Recorder(
            max_events={
                # Allow to store up to 10 threads for 60 seconds at 100 Hz
                stack.StackSampleEvent:
                10 * 60 * 100,
                stack.StackExceptionSampleEvent:
                10 * 60 * 100,
                # This can generate one event every 0.1s if 100% are taken — though we take 5% by default.
                # = (60 seconds / 0.1 seconds)
                memory.MemorySampleEvent:
                int(60 / 0.1),
                # (default buffer size / interval) * export interval
                memalloc.MemoryAllocSampleEvent:
                int((memalloc.MemoryCollector._DEFAULT_MAX_EVENTS /
                     memalloc.MemoryCollector._DEFAULT_INTERVAL) * 60),
                # Do not limit the heap sample size as the number of events is relative to allocated memory anyway
                memalloc.MemoryHeapSampleEvent:
                None,
            },
            default_max_events=int(
                os.environ.get("DD_PROFILING_MAX_EVENTS",
                               recorder.Recorder._DEFAULT_MAX_EVENTS)),
        )

        if formats.asbool(os.environ.get("DD_PROFILING_MEMALLOC", "true")):
            mem_collector = memalloc.MemoryCollector(r)
        else:
            mem_collector = memory.MemoryCollector(r)

        self._collectors = [
            stack.StackCollector(r, tracer=self.tracer),
            mem_collector,
            threading.LockCollector(r, tracer=self.tracer),
        ]

        exporters = self._build_default_exporters(self.tracer, self.url,
                                                  self.tags, self.service,
                                                  self.env, self.version)

        if exporters:
            self._scheduler = scheduler.Scheduler(
                recorder=r,
                exporters=exporters,
                before_flush=self._collectors_snapshot)
Ejemplo n.º 9
0
 def __attrs_post_init__(self):
     if self.exporters:
         for rec in self.recorders:
             self.schedulers.append(
                 scheduler.Scheduler(recorder=rec,
                                     exporters=self.exporters))
Ejemplo n.º 10
0
def test_exporter_failure():
    r = recorder.Recorder()
    exp = _FailExporter()
    s = scheduler.Scheduler(r, [exp])
    r.push_events([event.Event()] * 10)
    s.flush()