Ejemplo n.º 1
0
    def test_with_multiple_threads(self):
        exporter = InMemoryLogExporter()
        log_processor = BatchLogProcessor(exporter)

        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("threads")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        def bulk_log_and_flush(num_logs):
            for _ in range(num_logs):
                logger.critical("Critical message")
            self.assertTrue(log_processor.force_flush())

        with ThreadPoolExecutor(max_workers=69) as executor:
            futures = []
            for idx in range(69):
                future = executor.submit(bulk_log_and_flush, idx + 1)
                futures.append(future)

            executor.shutdown()

        finished_logs = exporter.get_finished_logs()
        self.assertEqual(len(finished_logs), 2415)
Ejemplo n.º 2
0
    def test_simple_log_processor_custom_level(self):
        exporter = InMemoryLogExporter()
        log_emitter_provider = LogEmitterProvider()
        log_emitter = log_emitter_provider.get_log_emitter(__name__)

        log_emitter_provider.add_log_processor(SimpleLogProcessor(exporter))

        logger = logging.getLogger("custom_level")
        logger.setLevel(logging.ERROR)
        logger.addHandler(OTLPHandler(log_emitter=log_emitter))

        logger.warning("Warning message")
        logger.debug("Debug message")
        logger.error("Error message")
        logger.critical("Critical message")
        finished_logs = exporter.get_finished_logs()
        # Make sure only level >= logging.CRITICAL logs are recorded
        self.assertEqual(len(finished_logs), 2)
        critical_log_record = finished_logs[0].log_record
        fatal_log_record = finished_logs[1].log_record
        self.assertEqual(critical_log_record.body, "Error message")
        self.assertEqual(critical_log_record.severity_text, "ERROR")
        self.assertEqual(
            critical_log_record.severity_number, SeverityNumber.ERROR
        )
        self.assertEqual(fatal_log_record.body, "Critical message")
        self.assertEqual(fatal_log_record.severity_text, "CRITICAL")
        self.assertEqual(
            fatal_log_record.severity_number, SeverityNumber.FATAL
        )
Ejemplo n.º 3
0
    def test_shutdown(self):
        exporter = InMemoryLogExporter()
        log_processor = BatchLogProcessor(exporter)

        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("shutdown")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        logger.warning("warning message: %s", "possible upcoming heatwave")
        logger.error("Very high rise in temperatures across the globe")
        logger.critical("Temparature hits high 420 C in Hyderabad")

        log_processor.shutdown()
        self.assertTrue(exporter._stopped)

        finished_logs = exporter.get_finished_logs()
        expected = [
            ("warning message: possible upcoming heatwave", "WARNING"),
            ("Very high rise in temperatures across the globe", "ERROR"),
            (
                "Temparature hits high 420 C in Hyderabad",
                "CRITICAL",
            ),
        ]
        emitted = [
            (item.log_record.body, item.log_record.severity_text)
            for item in finished_logs
        ]
        self.assertEqual(expected, emitted)
Ejemplo n.º 4
0
    def test_emit_call_log_record(self):
        exporter = InMemoryLogExporter()
        log_processor = Mock(wraps=BatchLogProcessor(exporter))
        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("emit_call")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        logger.error("error")
        self.assertEqual(log_processor.emit.call_count, 1)
Ejemplo n.º 5
0
    def test_log_processor_too_many_logs(self):
        exporter = InMemoryLogExporter()
        log_processor = BatchLogProcessor(exporter)

        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("many_logs")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        for log_no in range(1000):
            logger.critical("Log no: %s", log_no)

        self.assertTrue(log_processor.force_flush())
        finised_logs = exporter.get_finished_logs()
        self.assertEqual(len(finised_logs), 1000)
Ejemplo n.º 6
0
    def test_force_flush(self):
        exporter = InMemoryLogExporter()
        log_processor = BatchLogProcessor(exporter)

        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("force_flush")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        logger.critical("Earth is burning")
        log_processor.force_flush()
        finished_logs = exporter.get_finished_logs()
        self.assertEqual(len(finished_logs), 1)
        log_record = finished_logs[0].log_record
        self.assertEqual(log_record.body, "Earth is burning")
        self.assertEqual(log_record.severity_number, SeverityNumber.FATAL)
Ejemplo n.º 7
0
    def test_simple_log_processor_default_level(self):
        exporter = InMemoryLogExporter()
        log_emitter_provider = LogEmitterProvider()
        log_emitter = log_emitter_provider.get_log_emitter(__name__)

        log_emitter_provider.add_log_processor(SimpleLogProcessor(exporter))

        logger = logging.getLogger("default_level")
        logger.addHandler(OTLPHandler(log_emitter=log_emitter))

        logger.warning("Something is wrong")
        finished_logs = exporter.get_finished_logs()
        self.assertEqual(len(finished_logs), 1)
        warning_log_record = finished_logs[0].log_record
        self.assertEqual(warning_log_record.body, "Something is wrong")
        self.assertEqual(warning_log_record.severity_text, "WARNING")
        self.assertEqual(
            warning_log_record.severity_number, SeverityNumber.WARN
        )
Ejemplo n.º 8
0
    def test_batch_log_processor_fork(self):
        # pylint: disable=invalid-name
        exporter = InMemoryLogExporter()
        log_processor = BatchLogProcessor(
            exporter,
            max_export_batch_size=64,
            schedule_delay_millis=10,
        )
        provider = LogEmitterProvider()
        provider.add_log_processor(log_processor)

        emitter = provider.get_log_emitter(__name__)
        logger = logging.getLogger("test-fork")
        logger.addHandler(OTLPHandler(log_emitter=emitter))

        logger.critical("yolo")
        time.sleep(0.5)  # give some time for the exporter to upload

        self.assertTrue(log_processor.force_flush())
        self.assertEqual(len(exporter.get_finished_logs()), 1)
        exporter.clear()

        multiprocessing.set_start_method("fork")

        def child(conn):
            def _target():
                logger.critical("Critical message child")

            self.run_with_many_threads(_target, 100)

            time.sleep(0.5)

            logs = exporter.get_finished_logs()
            conn.send(len(logs) == 100)
            conn.close()

        parent_conn, child_conn = multiprocessing.Pipe()
        p = multiprocessing.Process(target=child, args=(child_conn,))
        p.start()
        self.assertTrue(parent_conn.recv())
        p.join()

        log_processor.shutdown()
    def test_log_processor(self):
        provider = LogEmitterProvider()
        log_emitter = provider.get_log_emitter(__name__)
        handler = OTLPHandler(log_emitter=log_emitter)

        logs_list_1 = []
        processor1 = AnotherLogProcessor(Mock(), logs_list_1)
        logs_list_2 = []
        processor2 = AnotherLogProcessor(Mock(), logs_list_2)

        logger = logging.getLogger("test.span.processor")
        logger.addHandler(handler)

        # Test no proessor added
        logger.critical("Odisha, we have another major cyclone")

        self.assertEqual(len(logs_list_1), 0)
        self.assertEqual(len(logs_list_2), 0)

        # Add one processor
        provider.add_log_processor(processor1)
        logger.warning("Brace yourself")
        logger.error("Some error message")

        expected_list_1 = [
            ("Brace yourself", "WARNING"),
            ("Some error message", "ERROR"),
        ]
        self.assertEqual(logs_list_1, expected_list_1)

        # Add another processor
        provider.add_log_processor(processor2)
        logger.critical("Something disastrous")
        expected_list_1.append(("Something disastrous", "CRITICAL"))

        expected_list_2 = [("Something disastrous", "CRITICAL")]

        self.assertEqual(logs_list_1, expected_list_1)
        self.assertEqual(logs_list_2, expected_list_2)
Ejemplo n.º 10
0
def _init_logging(
    exporters: Dict[str, Type[LogExporter]],
    auto_instrumentation_version: Optional[str] = None,
):
    # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name
    # from the env variable else defaults to "unknown_service"
    auto_resource = {}
    # populate version if using auto-instrumentation
    if auto_instrumentation_version:
        auto_resource[ResourceAttributes.
                      TELEMETRY_AUTO_VERSION] = auto_instrumentation_version
    provider = LogEmitterProvider(resource=Resource.create(auto_resource))
    set_log_emitter_provider(provider)

    for _, exporter_class in exporters.items():
        exporter_args = {}
        provider.add_log_processor(
            BatchLogProcessor(exporter_class(**exporter_args)))

    log_emitter = provider.get_log_emitter(__name__)
    handler = OTLPHandler(level=logging.NOTSET, log_emitter=log_emitter)

    logging.getLogger().addHandler(handler)
Ejemplo n.º 11
0
    def test_simple_log_processor_trace_correlation(self):
        exporter = InMemoryLogExporter()
        log_emitter_provider = LogEmitterProvider()
        log_emitter = log_emitter_provider.get_log_emitter("name", "version")

        log_emitter_provider.add_log_processor(SimpleLogProcessor(exporter))

        logger = logging.getLogger("trace_correlation")
        logger.addHandler(OTLPHandler(log_emitter=log_emitter))

        logger.warning("Warning message")
        finished_logs = exporter.get_finished_logs()
        self.assertEqual(len(finished_logs), 1)
        log_record = finished_logs[0].log_record
        self.assertEqual(log_record.body, "Warning message")
        self.assertEqual(log_record.severity_text, "WARNING")
        self.assertEqual(log_record.severity_number, SeverityNumber.WARN)
        self.assertEqual(log_record.trace_id, INVALID_SPAN_CONTEXT.trace_id)
        self.assertEqual(log_record.span_id, INVALID_SPAN_CONTEXT.span_id)
        self.assertEqual(
            log_record.trace_flags, INVALID_SPAN_CONTEXT.trace_flags
        )
        exporter.clear()

        tracer = trace.TracerProvider().get_tracer(__name__)
        with tracer.start_as_current_span("test") as span:
            logger.critical("Critical message within span")

            finished_logs = exporter.get_finished_logs()
            log_record = finished_logs[0].log_record
            self.assertEqual(log_record.body, "Critical message within span")
            self.assertEqual(log_record.severity_text, "CRITICAL")
            self.assertEqual(log_record.severity_number, SeverityNumber.FATAL)
            span_context = span.get_span_context()
            self.assertEqual(log_record.trace_id, span_context.trace_id)
            self.assertEqual(log_record.span_id, span_context.span_id)
            self.assertEqual(log_record.trace_flags, span_context.trace_flags)
Ejemplo n.º 12
0
import logging

from opentelemetry.sdk._logs import (
    LogEmitterProvider,
    OTLPHandler,
    set_log_emitter_provider,
)
from opentelemetry.sdk._logs.export import BatchLogProcessor

from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter

log_emitter_provider = LogEmitterProvider()
set_log_emitter_provider(log_emitter_provider)

exporter = AzureMonitorLogExporter.from_connection_string(
    os.environ["APPLICATIONINSIGHTS_CONNECTION_STRING"])

log_emitter_provider.add_log_processor(BatchLogProcessor(exporter))
handler = OTLPHandler()

# Attach OTel handler to namespaced logger
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.NOTSET)

try:
    val = 1 / 0
    print(val)
except ZeroDivisionError:
    logger.exception("Error: Division by zero")
Ejemplo n.º 13
0
def get_logger(level=logging.NOTSET, log_emitter=None):
    logger = logging.getLogger(__name__)
    handler = OTLPHandler(level=level, log_emitter=log_emitter)
    logger.addHandler(handler)
    return logger
Ejemplo n.º 14
0
trace.set_tracer_provider(TracerProvider())
trace.get_tracer_provider().add_span_processor(
    BatchSpanProcessor(ConsoleSpanExporter()))

log_emitter_provider = LogEmitterProvider(resource=Resource.create({
    "service.name":
    "shoppingcart",
    "service.instance.id":
    "instance-12",
}), )
set_log_emitter_provider(log_emitter_provider)

exporter = OTLPLogExporter(insecure=True)
log_emitter_provider.add_log_processor(BatchLogProcessor(exporter))
log_emitter = log_emitter_provider.get_log_emitter(__name__, "0.1")
handler = OTLPHandler(level=logging.NOTSET, log_emitter=log_emitter)

# Attach OTLP handler to root logger
logging.getLogger().addHandler(handler)

# Log directly
logging.info("Jackdaws love my big sphinx of quartz.")

# Create different namespaced loggers
logger1 = logging.getLogger("myapp.area1")
logger2 = logging.getLogger("myapp.area2")

logger1.debug("Quick zephyrs blow, vexing daft Jim.")
logger1.info("How quickly daft jumping zebras vex.")
logger2.warning("Jail zesty vixen who grabbed pay from quack.")
logger2.error("The five boxing wizards jump quickly.")
Ejemplo n.º 15
0
    })))
trace.get_tracer_provider().add_span_processor(
    BatchSpanProcessor(OTLPSpanExporter()))

log_emitter_provider = LogEmitterProvider(
    resource=Resource.create({
        "service.name": "python-django.heroku",
        "service.instance.id": serviceId,
        "environment": "production"
    }))
set_log_emitter_provider(log_emitter_provider)

exporter = OTLPLogExporter(insecure=True)
log_emitter_provider.add_log_processor(BatchLogProcessor(exporter))
log_emitter = log_emitter_provider.get_log_emitter(__name__, "0.1")
handler = OTLPHandler(level=logging.DEBUG, log_emitter=log_emitter)

# Attach OTLP handler to root logger
logging.getLogger().addHandler(handler)

DjangoInstrumentor().instrument()
SQLite3Instrumentor().instrument()
Psycopg2Instrumentor().instrument()

# Create your views here.


def check_topic_owner(owner, request):
    # Make sure the topic belongs to the current user.
    if owner != request.user:
        raise Http404