Beispiel #1
0
    def test_shutdown(self):
        exporter = InMemoryLogExporter()
        log_processor = BatchLogProcessor(exporter)

        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("shutdown")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        logger.warning("warning message: %s", "possible upcoming heatwave")
        logger.error("Very high rise in temperatures across the globe")
        logger.critical("Temparature hits high 420 C in Hyderabad")

        log_processor.shutdown()
        self.assertTrue(exporter._stopped)

        finished_logs = exporter.get_finished_logs()
        expected = [
            ("warning message: possible upcoming heatwave", "WARNING"),
            ("Very high rise in temperatures across the globe", "ERROR"),
            (
                "Temparature hits high 420 C in Hyderabad",
                "CRITICAL",
            ),
        ]
        emitted = [
            (item.log_record.body, item.log_record.severity_text)
            for item in finished_logs
        ]
        self.assertEqual(expected, emitted)
Beispiel #2
0
    def test_with_multiple_threads(self):
        exporter = InMemoryLogExporter()
        log_processor = BatchLogProcessor(exporter)

        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("threads")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        def bulk_log_and_flush(num_logs):
            for _ in range(num_logs):
                logger.critical("Critical message")
            self.assertTrue(log_processor.force_flush())

        with ThreadPoolExecutor(max_workers=69) as executor:
            futures = []
            for idx in range(69):
                future = executor.submit(bulk_log_and_flush, idx + 1)
                futures.append(future)

            executor.shutdown()

        finished_logs = exporter.get_finished_logs()
        self.assertEqual(len(finished_logs), 2415)
Beispiel #3
0
    def test_log_processor_too_many_logs(self):
        exporter = InMemoryLogExporter()
        log_processor = BatchLogProcessor(exporter)

        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("many_logs")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        for log_no in range(1000):
            logger.critical("Log no: %s", log_no)

        self.assertTrue(log_processor.force_flush())
        finised_logs = exporter.get_finished_logs()
        self.assertEqual(len(finised_logs), 1000)
Beispiel #4
0
    def test_force_flush(self):
        exporter = InMemoryLogExporter()
        log_processor = BatchLogProcessor(exporter)

        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("force_flush")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        logger.critical("Earth is burning")
        log_processor.force_flush()
        finished_logs = exporter.get_finished_logs()
        self.assertEqual(len(finished_logs), 1)
        log_record = finished_logs[0].log_record
        self.assertEqual(log_record.body, "Earth is burning")
        self.assertEqual(log_record.severity_number, SeverityNumber.FATAL)
Beispiel #5
0
    def test_batch_log_processor_fork(self):
        # pylint: disable=invalid-name
        exporter = InMemoryLogExporter()
        log_processor = BatchLogProcessor(
            exporter,
            max_export_batch_size=64,
            schedule_delay_millis=10,
        )
        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("test-fork")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        logger.critical("yolo")
        time.sleep(0.5)  # give some time for the exporter to upload

        self.assertTrue(log_processor.force_flush())
        self.assertEqual(len(exporter.get_finished_logs()), 1)
        exporter.clear()

        multiprocessing.set_start_method("fork")

        def child(conn):
            def _target():
                logger.critical("Critical message child")

            self.run_with_many_threads(_target, 100)

            time.sleep(0.5)

            logs = exporter.get_finished_logs()
            conn.send(len(logs) == 100)
            conn.close()

        parent_conn, child_conn = multiprocessing.Pipe()
        p = multiprocessing.Process(target=child, args=(child_conn,))
        p.start()
        self.assertTrue(parent_conn.recv())
        p.join()

        log_processor.shutdown()
Beispiel #6
0
    def test_emit_call_log_record(self):
        exporter = InMemoryLogExporter()
        log_processor = Mock(wraps=BatchLogProcessor(exporter))
        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("emit_call")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        logger.error("error")
        self.assertEqual(log_processor.emit.call_count, 1)
Beispiel #7
0
def _init_logging(
    exporters: Dict[str, Type[LogExporter]],
    auto_instrumentation_version: Optional[str] = None,
):
    # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name
    # from the env variable else defaults to "unknown_service"
    auto_resource = {}
    # populate version if using auto-instrumentation
    if auto_instrumentation_version:
        auto_resource[ResourceAttributes.
                      TELEMETRY_AUTO_VERSION] = auto_instrumentation_version
    provider = LogEmitterProvider(resource=Resource.create(auto_resource))
    set_log_emitter_provider(provider)

    for _, exporter_class in exporters.items():
        exporter_args = {}
        provider.add_log_processor(
            BatchLogProcessor(exporter_class(**exporter_args)))

    log_emitter = provider.get_log_emitter(__name__)
    handler = OTLPHandler(level=logging.NOTSET, log_emitter=log_emitter)

    logging.getLogger().addHandler(handler)
Beispiel #8
0
import logging

from opentelemetry.sdk._logs import (
    LogEmitterProvider,
    OTLPHandler,
    set_log_emitter_provider,
)
from opentelemetry.sdk._logs.export import BatchLogProcessor

from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter

log_emitter_provider = LogEmitterProvider()
set_log_emitter_provider(log_emitter_provider)

exporter = AzureMonitorLogExporter.from_connection_string(
    os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"])

log_emitter_provider.add_log_processor(BatchLogProcessor(exporter))
handler = OTLPHandler()

# Attach OTel handler to namespaced logger
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.NOTSET)

try:
    val = 1 / 0
    print(val)
except ZeroDivisionError:
    logger.exception("Error: Division by zero")