Beispiel #1
0
class BaseExporter(object):
    def __init__(self, **options):
        options = Options(**options)
        self.export_interval = options.export_interval
        self.max_batch_size = options.max_batch_size
        # TODO: queue should be moved to tracer
        # too much refactor work, leave to the next PR
        self._queue = Queue(capacity=options.queue_capacity)
        # TODO: worker should not be created in the base exporter
        self._worker = Worker(self._queue, self)
        self._worker.start()
        atexit.register(self._worker.stop, options.grace_period)

    # Ideally we don't want to have `emit`
    # Exporter will have one public method - `export`, which is a blocking
    # method, running inside worker threads.
    def emit(self, batch, event=None):
        raise NotImplementedError  # pragma: NO COVER

    # TODO: we shouldn't have this at the beginning
    # Tracer should own the queue, exporter shouldn't even know if the
    # source is a queue or not.
    # Tracer puts span_data into the queue.
    # Worker gets span_data from the src (here is the queue) and feed into
    # the dst (exporter).
    # Exporter defines the MTU (max_batch_size) and export_interval.
    # There can be one worker for each queue, or multiple workers for each
    # queue, or shared workers among queues (e.g. queue for traces, queue
    # for logs).
    def export(self, items):
        self._queue.puts(items, block=False)  # pragma: NO COVER
class BaseLogHandler(logging.Handler):
    def __init__(self):
        super(BaseLogHandler, self).__init__()
        self._queue = Queue(capacity=8192)  # TODO: make this configurable
        self._worker = Worker(self._queue, self)
        self._worker.start()

    def close(self):
        self._worker.stop()

    def createLock(self):
        self.lock = None

    def emit(self, record):
        self._queue.put(record, block=False)

    def _export(self, batch, event=None):
        try:
            return self.export(batch)
        finally:
            if event:
                event.set()

    def export(self, batch):
        raise NotImplementedError  # pragma: NO COVER

    def flush(self, timeout=None):
        self._queue.flush(timeout=timeout)
Beispiel #3
0
 def __init__(self, **options):
     super(BaseLogHandler, self).__init__()
     self.options = Options(**options)
     utils.validate_instrumentation_key(self.options.instrumentation_key)
     if not 0 <= self.options.logging_sampling_rate <= 1:
         raise ValueError('Sampling must be in the range: [0,1]')
     self.export_interval = self.options.export_interval
     self.max_batch_size = self.options.max_batch_size
     self.storage = None
     if self.options.enable_local_storage:
         self.storage = LocalFileStorage(
             path=self.options.storage_path,
             max_size=self.options.storage_max_size,
             maintenance_period=self.options.storage_maintenance_period,
             retention_period=self.options.storage_retention_period,
             source=self.__class__.__name__,
         )
     self._telemetry_processors = []
     self.addFilter(SamplingFilter(self.options.logging_sampling_rate))
     self._queue = Queue(capacity=self.options.queue_capacity)
     self._worker = Worker(self._queue, self)
     self._worker.start()
     atexit.register(self.close, self.options.grace_period)
     # start statsbeat on exporter instantiation
     if not os.environ.get("APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"):
         statsbeat_metrics.collect_statsbeat_metrics(self.options)
     # For redirects
     self._consecutive_redirects = 0  # To prevent circular redirects
class BaseLogHandler(logging.Handler):
    def __init__(self, **options):
        super(BaseLogHandler, self).__init__()
        self.options = Options(**options)
        utils.validate_instrumentation_key(self.options.instrumentation_key)
        if not 0 <= self.options.logging_sampling_rate <= 1:
            raise ValueError('Sampling must be in the range: [0,1]')
        self.export_interval = self.options.export_interval
        self.max_batch_size = self.options.max_batch_size
        self.storage = None
        if self.options.enable_local_storage:
            self.storage = LocalFileStorage(
                path=self.options.storage_path,
                max_size=self.options.storage_max_size,
                maintenance_period=self.options.storage_maintenance_period,
                retention_period=self.options.storage_retention_period,
                source=self.__class__.__name__,
            )
        self._telemetry_processors = []
        self.addFilter(SamplingFilter(self.options.logging_sampling_rate))
        self._queue = Queue(capacity=self.options.queue_capacity)
        self._worker = Worker(self._queue, self)
        self._worker.start()

    def _export(self, batch, event=None):  # pragma: NO COVER
        try:
            if batch:
                envelopes = [self.log_record_to_envelope(x) for x in batch]
                envelopes = self.apply_telemetry_processors(envelopes)
                result = self._transmit(envelopes)
                # Only store files if local storage enabled
                if self.storage and result > 0:
                    self.storage.put(envelopes, result)
            if event:
                if isinstance(event, QueueExitEvent):
                    self._transmit_from_storage()  # send files before exit
                return
            if len(batch) < self.options.max_batch_size:
                self._transmit_from_storage()
        finally:
            if event:
                event.set()

    def close(self):
        if self.storage:
            self.storage.close()
        if self._worker:
            self._worker.stop()

    def createLock(self):
        self.lock = None

    def emit(self, record):
        self._queue.put(record, block=False)

    def log_record_to_envelope(self, record):
        raise NotImplementedError  # pragma: NO COVER

    def flush(self, timeout=None):
        self._queue.flush(timeout=timeout)
Beispiel #5
0
 def __init__(self, **options):
     options = Options(**options)
     self.export_interval = options.export_interval
     self.max_batch_size = options.max_batch_size
     # TODO: queue should be moved to tracer
     # too much refactor work, leave to the next PR
     self._queue = Queue(capacity=options.queue_capacity)
     # TODO: worker should not be created in the base exporter
     self._worker = Worker(self._queue, self)
     self._worker.start()
     atexit.register(self._worker.stop, options.grace_period)
Beispiel #6
0
    def test_gets(self):
        queue = Queue(capacity=10)
        queue.puts((1, 2, 3))
        result = queue.gets(count=5, timeout=TIMEOUT)
        self.assertEqual(result, (1, 2, 3))

        queue.puts((1, 2, 3, 4, 5))
        result = queue.gets(count=3, timeout=TIMEOUT)
        self.assertEqual(result, (1, 2, 3))
        result = queue.gets(count=3, timeout=TIMEOUT)
        self.assertEqual(result, (4, 5))
 def __init__(self, **options):
     super(BaseLogHandler, self).__init__()
     self.options = Options(**options)
     utils.validate_instrumentation_key(self.options.instrumentation_key)
     if not 0 <= self.options.logging_sampling_rate <= 1:
         raise ValueError('Sampling must be in the range: [0,1]')
     self.export_interval = self.options.export_interval
     self.max_batch_size = self.options.max_batch_size
     self.storage = LocalFileStorage(
         path=self.options.storage_path,
         max_size=self.options.storage_max_size,
         maintenance_period=self.options.storage_maintenance_period,
         retention_period=self.options.storage_retention_period,
     )
     self._telemetry_processors = []
     self.addFilter(SamplingFilter(self.options.logging_sampling_rate))
     self._queue = Queue(capacity=8192)  # TODO: make this configurable
     self._worker = Worker(self._queue, self)
     self._worker.start()
Beispiel #8
0
    def test_flush_timeout(self):
        queue = Queue(capacity=10)
        self.assertEqual(queue.flush(timeout=TIMEOUT), 0)
        queue.put('test', timeout=TIMEOUT)
        self.assertIsNone(queue.flush(timeout=TIMEOUT))
        queue.puts(range(100), timeout=TIMEOUT)
        self.assertIsNone(queue.flush(timeout=TIMEOUT))

        def proc():
            for item in queue.gets(count=1, timeout=TIMEOUT):
                if isinstance(item, QueueEvent):
                    item.set()

        task = PeriodicTask(TIMEOUT / 10, proc)
        task.start()
        try:
            self.assertIsNotNone(queue.flush())
        finally:
            task.cancel()
            task.join()
Beispiel #9
0
 def __init__(self, **options):
     super(BaseLogHandler, self).__init__()
     self.options = Options(**options)
     utils.validate_instrumentation_key(self.options.instrumentation_key)
     if not 0 <= self.options.logging_sampling_rate <= 1:
         raise ValueError('Sampling must be in the range: [0,1]')
     self.export_interval = self.options.export_interval
     self.max_batch_size = self.options.max_batch_size
     self.storage = LocalFileStorage(
         path=self.options.storage_path,
         max_size=self.options.storage_max_size,
         maintenance_period=self.options.storage_maintenance_period,
         retention_period=self.options.storage_retention_period,
         source=self.__class__.__name__,
     )
     self._telemetry_processors = []
     self.addFilter(SamplingFilter(self.options.logging_sampling_rate))
     self._queue = Queue(capacity=self.options.queue_capacity)
     self._worker = Worker(self._queue, self)
     self._worker.start()
     heartbeat_metrics.enable_heartbeat_metrics(
         self.options.connection_string, self.options.instrumentation_key)
Beispiel #10
0
    def test_gets_event(self):
        queue = Queue(capacity=10)
        event = QueueEvent('test')
        queue.puts((event, 1, 2, 3, event))
        result = queue.gets(count=5, timeout=TIMEOUT)
        self.assertEqual(result, (event, ))
        result = queue.gets(count=5, timeout=TIMEOUT)
        self.assertEqual(result, (1, 2, 3, event))

        task = PeriodicTask(TIMEOUT / 10, lambda: queue.put(1))
        task.start()
        try:
            result = queue.gets(count=5, timeout=TIMEOUT)
            self.assertEqual(result, (1, 1, 1, 1, 1))
        finally:
            task.cancel()
            task.join()
 def __init__(self):
     super(BaseLogHandler, self).__init__()
     self._queue = Queue(capacity=8192)  # TODO: make this configurable
     self._worker = Worker(self._queue, self)
     self._worker.start()
Beispiel #12
0
 def test_puts_timeout(self):
     queue = Queue(capacity=10)
     queue.puts(range(100), timeout=TIMEOUT)
class BaseLogHandler(logging.Handler):
    def __init__(self, **options):
        super(BaseLogHandler, self).__init__()
        self.options = Options(**options)
        utils.validate_instrumentation_key(self.options.instrumentation_key)
        if not 0 <= self.options.logging_sampling_rate <= 1:
            raise ValueError('Sampling must be in the range: [0,1]')
        self.export_interval = self.options.export_interval
        self.max_batch_size = self.options.max_batch_size
        self.storage = None
        if self.options.enable_local_storage:
            self.storage = LocalFileStorage(
                path=self.options.storage_path,
                max_size=self.options.storage_max_size,
                maintenance_period=self.options.storage_maintenance_period,
                retention_period=self.options.storage_retention_period,
                source=self.__class__.__name__,
            )
        self._telemetry_processors = []
        self.addFilter(SamplingFilter(self.options.logging_sampling_rate))
        self._queue = Queue(capacity=self.options.queue_capacity)
        self._worker = Worker(self._queue, self)
        self._worker.start()
        atexit.register(self.close, self.options.grace_period)
        # start statsbeat on exporter instantiation
        if not os.environ.get("APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"):
            statsbeat_metrics.collect_statsbeat_metrics(self.options)
        # For redirects
        self._consecutive_redirects = 0  # To prevent circular redirects

    def _export(self, batch, event=None):  # pragma: NO COVER
        try:
            if batch:
                envelopes = [self.log_record_to_envelope(x) for x in batch]
                envelopes = self.apply_telemetry_processors(envelopes)
                result = self._transmit(envelopes)
                # Only store files if local storage enabled
                if self.storage and result > 0:
                    self.storage.put(envelopes, result)
            if event:
                if isinstance(event, QueueExitEvent):
                    self._transmit_from_storage()  # send files before exit
                return
            if len(batch) < self.options.max_batch_size:
                self._transmit_from_storage()
        finally:
            if event:
                event.set()

    def close(self, timeout=None):
        if self.storage:
            self.storage.close()
        if self._worker:
            self._worker.stop(timeout)

    def createLock(self):
        self.lock = None

    def emit(self, record):
        self._queue.put(record, block=False)

    def log_record_to_envelope(self, record):
        raise NotImplementedError  # pragma: NO COVER

    def flush(self, timeout=None):
        if self._queue.is_empty():
            return

        # We must check the worker thread is alive, because otherwise flush
        # is useless. Also, it would deadlock if no timeout is given, and the
        # queue isn't empty.
        # This is a very possible scenario during process termination, when
        # atexit first calls handler.close() and then logging.shutdown(),
        # that in turn calls handler.flush() without arguments.
        if not self._worker.is_alive():
            logger.warning(
                "Can't flush %s, worker thread is dead. "
                "Any pending messages will be lost.", self)
            return

        self._queue.flush(timeout=timeout)
Beispiel #14
0
class BaseLogHandler(logging.Handler):

    def __init__(self, **options):
        super(BaseLogHandler, self).__init__()
        self.options = Options(**options)
        
        logger.debug("Setting up AzureLogHandler")
        if not 0 <= self.options.logging_sampling_rate <= 1:
            raise ValueError('Sampling must be in the range: [0,1]')
        self.export_interval = self.options.export_interval
        self.max_batch_size = self.options.max_batch_size
        self.storage = LocalFileStorage(
            path=self.options.storage_path,
            max_size=self.options.storage_max_size,
            maintenance_period=self.options.storage_maintenance_period,
            retention_period=self.options.storage_retention_period,
            source=self.__class__.__name__,
        )
        self._telemetry_processors = []
        self.addFilter(SamplingFilter(self.options.logging_sampling_rate))
        self._queue = Queue(capacity=8192)  # TODO: make this configurable
        self._worker = Worker(self._queue, self)
        self._worker.start()

        # TODO: Make enable/disable heartbeat configurable. Disabling it for now.
        # heartbeat_metrics.enable_heartbeat_metrics(
        #    self.options.connection_string, self.options.instrumentation_key)

    def _export(self, batch, event=None):  # pragma: NO COVER
        try:
            if batch:
                envelopes = [self.log_record_to_envelope(x) for x in batch]
                envelopes = self.apply_telemetry_processors(envelopes)
                result = self._transmit(envelopes)
                if result > 0:
                    self.storage.put(envelopes, result)
            if event:
                if isinstance(event, QueueExitEvent):
                    self._transmit_from_storage()  # send files before exit
                return
            if len(batch) < self.options.max_batch_size:
                self._transmit_from_storage()
        finally:
            if event:
                event.set()

    def close(self):
        self.storage.close()
        self._worker.stop()

    def createLock(self):
        self.lock = None

    def emit(self, record):
        self._queue.put(record, block=False)

    def log_record_to_envelope(self, record):
        raise NotImplementedError  # pragma: NO COVER

    def flush(self, timeout=None):
        self._queue.flush(timeout=timeout)