class BaseLogHandler(logging.Handler): def __init__(self): super(BaseLogHandler, self).__init__() self._queue = Queue(capacity=8192) # TODO: make this configurable self._worker = Worker(self._queue, self) self._worker.start() def close(self): self._worker.stop() def createLock(self): self.lock = None def emit(self, record): self._queue.put(record, block=False) def _export(self, batch, event=None): try: return self.export(batch) finally: if event: event.set() def export(self, batch): raise NotImplementedError # pragma: NO COVER def flush(self, timeout=None): self._queue.flush(timeout=timeout)
class BaseLogHandler(logging.Handler): def __init__(self, **options): super(BaseLogHandler, self).__init__() self.options = Options(**options) utils.validate_instrumentation_key(self.options.instrumentation_key) if not 0 <= self.options.logging_sampling_rate <= 1: raise ValueError('Sampling must be in the range: [0,1]') self.export_interval = self.options.export_interval self.max_batch_size = self.options.max_batch_size self.storage = None if self.options.enable_local_storage: self.storage = LocalFileStorage( path=self.options.storage_path, max_size=self.options.storage_max_size, maintenance_period=self.options.storage_maintenance_period, retention_period=self.options.storage_retention_period, source=self.__class__.__name__, ) self._telemetry_processors = [] self.addFilter(SamplingFilter(self.options.logging_sampling_rate)) self._queue = Queue(capacity=self.options.queue_capacity) self._worker = Worker(self._queue, self) self._worker.start() def _export(self, batch, event=None): # pragma: NO COVER try: if batch: envelopes = [self.log_record_to_envelope(x) for x in batch] envelopes = self.apply_telemetry_processors(envelopes) result = self._transmit(envelopes) # Only store files if local storage enabled if self.storage and result > 0: self.storage.put(envelopes, result) if event: if isinstance(event, QueueExitEvent): self._transmit_from_storage() # send files before exit return if len(batch) < self.options.max_batch_size: self._transmit_from_storage() finally: if event: event.set() def close(self): if self.storage: self.storage.close() if self._worker: self._worker.stop() def createLock(self): self.lock = None def emit(self, record): self._queue.put(record, block=False) def log_record_to_envelope(self, record): raise NotImplementedError # pragma: NO COVER def flush(self, timeout=None): self._queue.flush(timeout=timeout)
def test_flush_timeout(self): queue = Queue(capacity=10) self.assertEqual(queue.flush(timeout=TIMEOUT), 0) queue.put('test', timeout=TIMEOUT) self.assertIsNone(queue.flush(timeout=TIMEOUT)) queue.puts(range(100), timeout=TIMEOUT) self.assertIsNone(queue.flush(timeout=TIMEOUT)) def proc(): for item in queue.gets(count=1, timeout=TIMEOUT): if isinstance(item, QueueEvent): item.set() task = PeriodicTask(TIMEOUT / 10, proc) task.start() try: self.assertIsNotNone(queue.flush()) finally: task.cancel() task.join()
class BaseLogHandler(logging.Handler): def __init__(self, **options): super(BaseLogHandler, self).__init__() self.options = Options(**options) utils.validate_instrumentation_key(self.options.instrumentation_key) if not 0 <= self.options.logging_sampling_rate <= 1: raise ValueError('Sampling must be in the range: [0,1]') self.export_interval = self.options.export_interval self.max_batch_size = self.options.max_batch_size self.storage = None if self.options.enable_local_storage: self.storage = LocalFileStorage( path=self.options.storage_path, max_size=self.options.storage_max_size, maintenance_period=self.options.storage_maintenance_period, retention_period=self.options.storage_retention_period, source=self.__class__.__name__, ) self._telemetry_processors = [] self.addFilter(SamplingFilter(self.options.logging_sampling_rate)) self._queue = Queue(capacity=self.options.queue_capacity) self._worker = Worker(self._queue, self) self._worker.start() atexit.register(self.close, self.options.grace_period) # start statsbeat on exporter instantiation if not os.environ.get("APPLICATIONINSIGHTS_STATSBEAT_DISABLED_ALL"): statsbeat_metrics.collect_statsbeat_metrics(self.options) # For redirects self._consecutive_redirects = 0 # To prevent circular redirects def _export(self, batch, event=None): # pragma: NO COVER try: if batch: envelopes = [self.log_record_to_envelope(x) for x in batch] envelopes = self.apply_telemetry_processors(envelopes) result = self._transmit(envelopes) # Only store files if local storage enabled if self.storage and result > 0: self.storage.put(envelopes, result) if event: if isinstance(event, QueueExitEvent): self._transmit_from_storage() # send files before exit return if len(batch) < self.options.max_batch_size: self._transmit_from_storage() finally: if event: event.set() def close(self, timeout=None): if self.storage: self.storage.close() if self._worker: self._worker.stop(timeout) def createLock(self): self.lock = None def emit(self, record): self._queue.put(record, block=False) def log_record_to_envelope(self, record): raise NotImplementedError # pragma: NO COVER def flush(self, timeout=None): if self._queue.is_empty(): return # We must check the worker thread is alive, because otherwise flush # is useless. Also, it would deadlock if no timeout is given, and the # queue isn't empty. # This is a very possible scenario during process termination, when # atexit first calls handler.close() and then logging.shutdown(), # that in turn calls handler.flush() without arguments. if not self._worker.is_alive(): logger.warning( "Can't flush %s, worker thread is dead. " "Any pending messages will be lost.", self) return self._queue.flush(timeout=timeout)
class BaseLogHandler(logging.Handler): def __init__(self, **options): super(BaseLogHandler, self).__init__() self.options = Options(**options) logger.debug("Setting up AzureLogHandler") if not 0 <= self.options.logging_sampling_rate <= 1: raise ValueError('Sampling must be in the range: [0,1]') self.export_interval = self.options.export_interval self.max_batch_size = self.options.max_batch_size self.storage = LocalFileStorage( path=self.options.storage_path, max_size=self.options.storage_max_size, maintenance_period=self.options.storage_maintenance_period, retention_period=self.options.storage_retention_period, source=self.__class__.__name__, ) self._telemetry_processors = [] self.addFilter(SamplingFilter(self.options.logging_sampling_rate)) self._queue = Queue(capacity=8192) # TODO: make this configurable self._worker = Worker(self._queue, self) self._worker.start() # TODO: Make enable/disable heartbeat configurable. Disabling it for now. # heartbeat_metrics.enable_heartbeat_metrics( # self.options.connection_string, self.options.instrumentation_key) def _export(self, batch, event=None): # pragma: NO COVER try: if batch: envelopes = [self.log_record_to_envelope(x) for x in batch] envelopes = self.apply_telemetry_processors(envelopes) result = self._transmit(envelopes) if result > 0: self.storage.put(envelopes, result) if event: if isinstance(event, QueueExitEvent): self._transmit_from_storage() # send files before exit return if len(batch) < self.options.max_batch_size: self._transmit_from_storage() finally: if event: event.set() def close(self): self.storage.close() self._worker.stop() def createLock(self): self.lock = None def emit(self, record): self._queue.put(record, block=False) def log_record_to_envelope(self, record): raise NotImplementedError # pragma: NO COVER def flush(self, timeout=None): self._queue.flush(timeout=timeout)