def __init__(self, run):
        print('Initializing the AppInsightsLogger')
        self.env = Env()
        self.run_id = self.get_run_id_and_set_context(run)

        # Prepare integrations and initialize tracer
        config_integration.trace_integrations(['httplib', 'logging'])
        texporter = AzureExporter(
            connection_string=self.env.app_insights_connection_string)
        texporter.add_telemetry_processor(self.callback_function)
        self.tracer = Tracer(exporter=texporter,
                             sampler=ProbabilitySampler(
                                 self.env.trace_sampling_rate))

        # Create AppInsights Handler and set log format
        self.logger = logging.getLogger(__name__)
        self.logger.setLevel(
            getattr(logging, self.env.log_level.upper(), "WARNING"))
        handler = AzureLogHandler(
            connection_string=self.env.app_insights_connection_string,
            logging_sampling_rate=self.env.log_sampling_rate,
        )
        handler.add_telemetry_processor(self.callback_function)
        self.logger.addHandler(handler)

        # initializes metric exporter
        mexporter = metrics_exporter.new_metrics_exporter(
            enable_standard_metrics=self.env.enable_standard_metrics,
            export_interval=self.env.metrics_export_interval,
            connection_string=self.env.app_insights_connection_string,
        )
        mexporter.add_telemetry_processor(self.callback_function)
        stats_module.stats.view_manager.register_exporter(mexporter)
Ejemplo n.º 2
0
def __get_logger() -> Logger:
    # Create a logger with Azure Application Insights
    config_integration.trace_integrations(['logging'])
    logger = logging.getLogger(__name__)
    handler = AzureLogHandler(
        connection_string=os.getenv('ApplicationInsights'))
    handler.setFormatter(logging.Formatter('%(traceId)s %(message)s'))
    logger.addHandler(handler)
    return logger
Ejemplo n.º 3
0
def _create_telemetry_handler() -> "AzureLogHandler":
    """
    Configure, create, and return the telemetry handler
    """
    from opencensus.ext.azure.log_exporter import AzureLogHandler
    global telemetry_handler

    # The default_custom_dimensions will appear in the "customDimensions"
    # field in Azure log analytics for every log message alongside any
    # custom dimensions that message may have. All messages additionally come
    # with custom dimensions fileName, level, lineNumber, module, and process
    default_custom_dimensions = {"pythonExecutable": sys.executable}

    class CustomDimensionsFilter(logging.Filter):
        """
        Add application-wide properties to the customDimension field of
        AzureLogHandler records
        """
        def __init__(self, custom_dimensions: Dict[str, str]):
            super().__init__()
            self.custom_dimensions = custom_dimensions

        def filter(self, record: logging.LogRecord) -> bool:
            """
            Add the default custom_dimensions into the current log record
            """
            cdim = self.custom_dimensions.copy()
            cdim.update(getattr(record, "custom_dimensions", {}))
            record.custom_dimensions = cdim  # type: ignore[attr-defined]

            return True

    # Transport module of opencensus-ext-azure logs info 'transmission
    # succeeded' which is also exported to azure if AzureLogHandler is
    # in root_logger. The following lines stops that.
    logging.getLogger("opencensus.ext.azure.common.transport").setLevel(
        logging.WARNING)

    loc = qc.config.GUID_components.location
    stat = qc.config.GUID_components.work_station

    def callback_function(envelope: "Envelope") -> bool:
        envelope.tags["ai.user.accountId"] = platform.node()
        envelope.tags["ai.user.id"] = f"{loc:02x}-{stat:06x}"
        return True

    telemetry_handler = AzureLogHandler(
        connection_string=f"InstrumentationKey="
        f"{qc.config.telemetry.instrumentation_key}")
    telemetry_handler.add_telemetry_processor(callback_function)
    telemetry_handler.setLevel(logging.INFO)
    telemetry_handler.addFilter(
        CustomDimensionsFilter(default_custom_dimensions))
    telemetry_handler.setFormatter(get_formatter_for_telemetry())

    return telemetry_handler
Ejemplo n.º 4
0
def _register_azure_handler() -> None:
    key = settings.INSTRUMENTATION_KEY.value
    if key is None or key == '':
        logger.info("INSTRUMENTATION_KEY not set, logging locally")
    else:
        logger.info("INSTRUMENTATION_KEY set, starting to log remotely")
        azure_handler = AzureLogHandler(
            connection_string=f'InstrumentationKey={key}')
        azure_formatter = logging.Formatter('%(message)s')
        azure_handler.setFormatter(azure_formatter)
        logger.addHandler(azure_handler)
Ejemplo n.º 5
0
def setup_logging():
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s [%(levelname)-5.5s] [%(name)s] %(message)s', "%H:%M:%S")

    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    ch.setLevel(logging.INFO)
    root_logger.addHandler(ch)

    az = AzureLogHandler(connection_string=azure_insights.connection_string)
    az.setLevel(logging.INFO)
    root_logger.addHandler(az)
Ejemplo n.º 6
0
class AzureProxyHandler(logging.Handler):
    def __init__(self, connection_string, context, environment):
        super().__init__()
        self._handler = AzureLogHandler(connection_string=connection_string)
        self.context = context
        self.environment = environment

    def emit(self, record):
        record.custom_dimensions = record.extra.get("custom_dimensions", {})
        if self.context is not None:
            record.custom_dimensions["context"] = self.context
        if self.environment is not None:
            record.custom_dimensions["environment"] = self.environment
        self._handler.emit(record)
Ejemplo n.º 7
0
def initialize_logging(logging_level: int, correlation_id: str, add_console_handler: bool = False) -> logging.LoggerAdapter:
    """
    Adds the Application Insights handler for the root logger and sets the given logging level.
    Creates and returns a logger adapter that integrates the correlation ID, if given, to the log messages.
    Note: This should be called only once, otherwise duplicate log entries could be produced.

    :param logging_level: The logging level to set e.g., logging.WARNING.
    :param correlation_id: Optional. The correlation ID that is passed on to the operation_Id in App Insights.
    :returns: A newly created logger adapter.
    """
    logger = logging.getLogger()

    # When using sessions and NEXT_AVAILABLE_SESSION we see regular exceptions which are actually expected
    # See https://github.com/Azure/azure-sdk-for-python/issues/9402
    # Other log entries such as 'link detach' also confuse the logs, and are expected.
    # We don't want these making the logs any noisier so we raise the logging level for that logger here
    # To inspect all the loggers, use -> loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
    for logger_name in LOGGERS_FOR_ERRORS_ONLY:
        logging.getLogger(logger_name).setLevel(logging.ERROR)

    if add_console_handler:
        console_formatter = logging.Formatter(fmt='%(module)-7s %(name)-7s %(process)-7s %(asctime)s %(levelname)-7s %(message)s')
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(console_formatter)
        logger.addHandler(console_handler)

    try:
        azurelog_formatter = AzureLogFormatter()
        # picks up APPLICATIONINSIGHTS_CONNECTION_STRING automatically
        azurelog_handler = AzureLogHandler()
        azurelog_handler.setFormatter(azurelog_formatter)
        logger.addHandler(azurelog_handler)
    except ValueError as e:
        logger.error(f"Failed to set Application Insights logger handler: {e}")

    config_integration.trace_integrations(['logging'])
    logging.basicConfig(level=logging_level, format='%(asctime)s traceId=%(traceId)s spanId=%(spanId)s %(message)s')
    Tracer(sampler=AlwaysOnSampler())
    logger.setLevel(logging_level)

    extra = None

    if correlation_id:
        extra = {'traceId': correlation_id}

    adapter = logging.LoggerAdapter(logger, extra)
    adapter.debug(f"Logger adapter initialized with extra: {extra}")

    return adapter
Ejemplo n.º 8
0
def initialize_logging(logging_level: int, correlation_id: str = None) -> logging.LoggerAdapter:
    """
    Adds the Application Insights handler for the root logger and sets the given logging level.
    Creates and returns a logger adapter that integrates the correlation ID, if given, to the log messages.

    :param logging_level: The logging level to set e.g., logging.WARNING.
    :param correlation_id: Optional. The correlation ID that is passed on to the operation_Id in App Insights.
    :returns: A newly created logger adapter.
    """
    logger = logging.getLogger()
    logger.addHandler(logging.StreamHandler())  # For logging into console
    app_insights_connection_string = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING")

    try:
        logger.addHandler(AzureLogHandler(connection_string=app_insights_connection_string))
    except ValueError as e:
        logger.error(f"Failed to set Application Insights logger handler: {e}")

    config_integration.trace_integrations(['logging'])
    logging.basicConfig(level=logging_level, format='%(asctime)s traceId=%(traceId)s spanId=%(spanId)s %(message)s')
    Tracer(sampler=AlwaysOnSampler())
    logger.setLevel(logging_level)

    extra = None

    if correlation_id:
        extra = {'traceId': correlation_id}

    adapter = logging.LoggerAdapter(logger, extra)
    adapter.debug(f"Logger adapter initialized with extra: {extra}")

    return adapter
def prep_service():
    exporter = AzureExporter(connection_string=AI_INSTRUMENTATION_KEY)
    exporter.add_telemetry_processor(add_cloud_role_name)

    _ = FlaskMiddleware(app=app,
                        exporter=exporter,
                        sampler=AlwaysOnSampler(),
                        propagator=TraceContextPropagator())

    handler = AzureLogHandler(connection_string=AI_INSTRUMENTATION_KEY)

    handler.add_telemetry_processor(add_cloud_role_name)

    for log, level in logging_instances:
        log.addHandler(handler)
        log.setLevel(level)
Ejemplo n.º 10
0
def _get_client(environ_key: str) -> Optional[logging.Logger]:
    key = os.environ.get(environ_key)
    if key is None:
        return None
    client = logging.getLogger("onefuzz")
    client.addHandler(
        AzureLogHandler(connection_string="InstrumentationKey=%s" % key))
    return client
def create_trace(message=message):
    logger = logging.getLogger(__name__)

    logger.addHandler(
        AzureLogHandler(connection_string='InstrumentationKey=%s' %
                        (instrument_key)))

    logger.warning(message)
Ejemplo n.º 12
0
async def main():
    global module_client

    logger.setLevel(LOG_LEVEL)

    # ODBL: Object Detection Business Logic
    formatter = logging.Formatter('[ODBL] [%(asctime)-15s] [%(threadName)-12.12s] [%(levelname)s]: %(message)s')

    # Add stdout handler
    stdout_handler = logging.StreamHandler(sys.stdout)
    stdout_handler.setFormatter(formatter)
    logger.addHandler(stdout_handler)

    appinsights_instrumentationkey = os.environ.get('APPINSIGHTS_INSTRUMENTATIONKEY', None)
    if appinsights_instrumentationkey and not appinsights_instrumentationkey.isspace():
        try:
            azure_log_handler = AzureLogHandler(instrumentation_key=appinsights_instrumentationkey)
            azure_log_handler.setFormatter(formatter)
            logger.addHandler(azure_log_handler)

            logger.info('Application Insights initialized.')
        except Exception:
            logger.exception('Application Insights failed to initialize')

    try:
        if not sys.version >= '3.6':
            raise Exception('The object detection business logic module requires python 3.6+. Current version of Python: %s' % sys.version)

        logger.info('Starting the object detection business logic module ...')

        module_client = IoTHubModuleClient.create_from_edge_environment(websockets=True)

        await module_client.connect()
        module_client.on_twin_desired_properties_patch_received = twin_patch_handler
        module_client.on_message_received = message_handler

        logger.info('The object detection business logic module is now waiting for messages.')

        await continuous_loop()
        await module_client.disconnect()
    except Exception as ex:
        logger.exception('Unexpected error: %s' % ex)
        return
Ejemplo n.º 13
0
def getLogger(
    name: str,
    instrumentation_conn_string: str = AI_CONNECTION_STRING,
    propagate: bool = False,
) -> Logger:
    """Get a new logging instance with a handler to send logs to Application Insights

    Args:
        name([str]): [The name of the logger]
        instrumentation_conn_string([str]): [The AppInsights instrumentation connection string]
        propagate([bool]): [Enable log propagation (default: false)]
    """
    logHandler = AzureLogHandler(connection_string=instrumentation_conn_string)
    logHandler.add_telemetry_processor(callback_add_role_name)

    logger = logging.getLogger(name)
    logger.addHandler(logHandler)
    logger.propagate = propagate

    return logger
    def __init__(self, app, sampler, instrumentation_key, cloud_role_name,
                 extra_attrs: Dict[str, str],
                 logging_instances: Iterable[Iterable[Union[logging.Logger,
                                                            int]]]):

        self.exporter = Exporter(connection_string=instrumentation_key)
        self.exporter.add_telemetry_processor(cloud_role_name)

        self.app = app

        self.sampler = sampler
        self.extra_attrs = extra_attrs

        self.handler = AzureLogHandler(connection_string=instrumentation_key)

        self.handler.add_telemetry_processor(cloud_role_name)
        super(TraceRequestMiddleware, self).__init__(app)

        for log, level in logging_instances:
            log.addHandler(self.handler)
            log.setLevel(level)
Ejemplo n.º 15
0
def setup_loggers(name: str, level: str):

    app_insights_key = os.environ.get("APPINSIGHTS_INSTRUMENTATIONKEY")
    app_insights_level = get_level(os.environ.get("APPINSIGHTS_LOG_LEVEL", "disabled"))

    formatter = logging.Formatter("[%(levelname)s] %(name)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
    logging.basicConfig(formatter=formatter)
    logging.addLevelName(logging.WARNING, "WARN")

    logger_name = name if name is not None else os.environ.get("LOGGER_NAME")

    log = logging.getLogger(logger_name) if name is not None else logging.getLogger()

    root_logger = logging.getLogger()
    root_logger.setLevel(get_level(level))
    log.handlers.clear()
    log.propagate = False

    console_log_level = get_level(level if level else os.environ.get("CONSOLE_LOGGER_LEVEL", "info"))
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    console_handler.setLevel(console_log_level)
    log.addHandler(console_handler)

    if app_insights_key:  # and app_insights_level != logging.NOTSET:
        app_insights_handler = AzureLogHandler(connection_string=f"InstrumentationKey={app_insights_key}")
        app_insights_handler.setFormatter(formatter)
        app_insights_handler.setLevel(app_insights_level)

        log.addHandler(app_insights_handler)

    return log
Ejemplo n.º 16
0
def init():
    # Load environment variables
    load_dotenv()
    # Init the logger
    logger = logging.getLogger(os.getenv("LOG_NAME"))
    logger.setLevel(logging.DEBUG)
    # Create file handler to store the log
    fh = logging.FileHandler(
        os.path.join(os.getenv("LOCAL_DIR"),
                     os.getenv("LOG_NAME") + ".log"))
    fh.setLevel(logging.DEBUG)
    # Create console handler to show the log on the screen
    ch = logging.StreamHandler(stream=sys.stdout)
    ch.setLevel(logging.DEBUG)
    # Create Azure Application Insights handler
    ah = AzureLogHandler()
    ah.setLevel(logging.DEBUG)
    # Create formatter and add it to the handlers
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    ah.setFormatter(formatter)
    # Add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    logger.addHandler(ah)
def create_trace_dependency(name=name,
                            message=message,
                            message_before=message_before,
                            message_after=message_after):
    config_integration.trace_integrations(['logging'])

    logger = logging.getLogger(__name__)

    handler = AzureLogHandler(connection_string='InstrumentationKey=%s' %
                              (instrument_key))
    handler.setFormatter(
        logging.Formatter('%(traceId)s %(spanId)s %(message)s'))
    logger.addHandler(handler)

    tracer = Tracer(exporter=AzureExporter(
        connection_string='InstrumentationKey=%s' % (instrument_key)),
                    sampler=ProbabilitySampler(1.0))

    logger.warning(message_before)
    with tracer.span(name=name):
        logger.warning(message)
    logger.warning(message_after)
Ejemplo n.º 18
0
def getLogger():
    global logger
    try:
        if logger[os.getpid()] is not None:
            print("returning existing logger " + str([os.getpid()]))
            return logger[os.getpid()]
    except:
        logger[os.getpid()] = logging.getLogger(str(os.getpid()))
        print(connection_string)
        handler = AzureLogHandler(connection_string=connection_string)
        logger[os.getpid()].addHandler(handler)
        logger[os.getpid()].setLevel(logging.INFO)
        print("created logger for pid " + str([os.getpid()]))
    return logger[os.getpid()]
Ejemplo n.º 19
0
    def __init__(self, env: locust.env.Environment, testplan="", instrumentation_key="", propagate_logs=True):
        self.testplan = testplan or "appinsightstestplan"
        self.env = env
        self.logger = logging.getLogger(__name__)

        if instrumentation_key != "":
            formated_key = "InstrumentationKey=" + instrumentation_key
        else:
            formated_key = "InstrumentationKey=" + str(os.getenv("APP_INSIGHTS_INSTRUMENTATION_KEY"))

        self.logger.addHandler(AzureLogHandler(connection_string=formated_key))
        self.logger.propagate = propagate_logs

        env.events.request.add_listener(self.request)
Ejemplo n.º 20
0
def get_app_insight_logger() -> logging.Logger:
    """get_app_insight_logger.

    Return a logger with AzureLogHandler added.

    Returns:
        logging.Logger:
    """

    app_insight_logger = logging.getLogger("Backend-Training-App-Insight")
    app_insight_logger.handlers = []
    app_insight_logger.addHandler(
        AzureLogHandler(connection_string=APP_INSIGHT_CONN_STR)
    )
    return app_insight_logger
Ejemplo n.º 21
0
def index():
    """runs the car pics in a container"""
    # setup the logging connection
    logger = logging.getLogger(__name__)
    logger.addHandler(
        AzureLogHandler(
            connection_string='InstrumentationKey=${{ INSTRUMENTATIONKEY }}'))

    manu = request.args.get('manufacturer')
    if manu:
        url = random.choice(cars[manu])
    else:
        url = random.choice(all_cars)
    properties = {'custom_dimensions': {'url-request': url}}
    logger.warning('action', extra=properties)
    return render_template("index.html", url=url)
Ejemplo n.º 22
0
def startLogger():
    # Get Azure Monitor instrumentation key from environment variable
    instrumentation_key = os.environ.get('InstrumentationKey')

    # initialize logger object
    logger = logging.getLogger(__name__)

    # define connection string attribute of logger object
    # this uses our Azure Monitor instrumentation key
    logger.addHandler(
        AzureLogHandler(
            # connection_string='InstrumentationKey=fd8638b6-ccd6-41f0-a273-e8b15726c4dd'
            connection_string=
            'InstrumentationKey=fd8638b6-ccd6-41f0-a273-e8b15726c4dd'))

    return logger
Ejemplo n.º 23
0
def _setup_azure_logging(logger: logging.Logger, app: Flask,
                         connection_string: str):
    #:https://docs.microsoft.com/en-us/azure/azure-monitor/app/opencensus-python
    #:param logger: Logging instance for azure opencensus stream handler.
    #:param app: Flask app instance for azure opencensus handler.
    #:param connection_string: Azure Application Insight connection string.

    # Setup trace handler. Handles normal logging output:
    # >>> logger.info("Info message")
    azure_handler = AzureLogHandler(connection_string=connection_string)
    logger.addHandler(azure_handler)

    FlaskMiddleware(
        app,
        exporter=AzureExporter(connection_string=connection_string),
        sampler=ProbabilitySampler(rate=1.0),
    )
    def __init__(self):
        self.metrics = {}
        self.logger = logging.getLogger(__name__)

        self.appinsights_key = None
        raw_key = getenv(APPINSIGHTS_INSTRUMENTATIONKEY, None)
        if raw_key and len(raw_key.strip()) > 0:
            self.appinsights_key = raw_key.strip()

        if self.appinsights_key:
            handler = AzureLogHandler(
                connection_string="InstrumentationKey=" +
                str(getenv("APPINSIGHTS_INSTRUMENTATIONKEY")))
            self.logger.addHandler(handler)
            exporter = metrics_exporter.new_metrics_exporter(
                connection_string="InstrumentationKey=" +
                str(getenv("APPINSIGHTS_INSTRUMENTATIONKEY")))
            view_manager.register_exporter(exporter)
Ejemplo n.º 25
0
    def _initialize_logger(self, instrumentation_key, logging_level):
        logging.lastResort = None
        # the logger will print an error like "ValueError: I/O operation on closed file" because we're trying to have log messages also print to stdout
        # and apparently this causes issues on some of the spark executor nodes. The bottom line is that we don't want these logging errors to get printed in the notebook output.
        logging.raiseExceptions = False
        logger.setLevel(logging_level)

        handler = logging.StreamHandler(sys.stdout)
        handler.setLevel(logging_level)
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger.addHandler(handler)

        if instrumentation_key:
            # Setup logging to go to app insights (more info here: https://github.com/balakreshnan/Samples2021/blob/main/Synapseworkspace/opencensuslog.md#azure-synapse-spark-logs-runtime-errors-to-application-insights)
            logger.addHandler(
                AzureLogHandler(connection_string='InstrumentationKey=' +
                                instrumentation_key))
    def _initialize_azure_log_handler(self, component_name, custom_dimensions):
        """Initialize azure log handler."""
        # Adding logging to trace_integrations
        # This will help in adding trace and span ids to logs
        # https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-logging

        logging.basicConfig(
            format="%(asctime)s name=%(name)s level=%(levelname)s "
            "traceId=%(traceId)s spanId=%(spanId)s %(message)s"
        )
        app_insights_cs = "InstrumentationKey=" + self._get_app_insights_key()
        log_handler = AzureLogHandler(
            connection_string=app_insights_cs, export_interval=0.0
        )
        log_handler.add_telemetry_processor(self._get_callback(component_name))
        log_handler.name = self.HANDLER_NAME
        log_handler.addFilter(CustomDimensionsFilter(custom_dimensions))
        return log_handler
Ejemplo n.º 27
0
def set_logger():
    global logger

    # Add the app insights logger to the python logger
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(logging.StreamHandler())
    logger.addHandler(AzureLogHandler())

    # Get pipeline information
    custom_dimensions = {
        "parent_run_id": run.parent.id,
        "step_id": run.id,
        "step_name": run.name,
        "experiment_name": run.experiment.name,
        "run_url": run.parent.get_portal_url(),
    }

    # Log pipeline information
    logger.info(custom_dimensions)
Ejemplo n.º 28
0
def init():
    # EntryScriptHelper().config(LOG_NAME)
    # logger = logging.getLogger(LOG_NAME)
    # APPLICATIONINSIGHTS_CONNECTION_STRING needs to be set
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(logging.StreamHandler())
    logger.addHandler(AzureLogHandler())

    output_folder = os.path.join(
        os.environ.get("AZ_BATCHAI_INPUT_AZUREML", ""), "temp/output")
    working_dir = os.environ.get("AZ_BATCHAI_OUTPUT_logs", "")
    ip_addr = os.environ.get("AZ_BATCHAI_WORKER_IP", "")
    log_dir = os.path.join(working_dir, "user", ip_addr,
                           current_process().name)
    t_log_dir = Path(log_dir)
    t_log_dir.mkdir(parents=True, exist_ok=True)

    logger.info(f"{__file__}.output_folder:{output_folder}")
    logger.info("init()")
Ejemplo n.º 29
0
async def lifespan(application: Starlette):
    exporter = Exporter(connection_string=Settings.instrumentation_key)
    exporter.add_telemetry_processor(add_cloud_role_name)
    exporter.add_telemetry_processor(add_instance_role_id)
    application.state.azure_exporter = exporter

    handler = AzureLogHandler(connection_string=Settings.instrumentation_key)

    handler.add_telemetry_processor(add_cloud_role_name)
    handler.add_telemetry_processor(add_instance_role_id)

    for log, level in logging_instances:
        log.addHandler(handler)
        log.setLevel(level)

    pool = await redis.instantiate_redis_pool()
    application.state.redis = pool

    yield

    pool.close()
    await pool.wait_closed()
    handler.flush()
def start_telemetry() -> None:
    """
    Start telemetry, capturing all log messages and warnings and sending them
    to our Applications Insights cloud instance
    """

    global telemetry_handler

    instrumentation_key = ccp.telemetry_config['Telemetry'][
        'instrumentation_key']

    root_logger = logging.getLogger(pulsequantum.__name__)
    root_logger.setLevel(logging.DEBUG)

    # remove previously set handlers
    for handler in (telemetry_handler, ):
        if handler is not None:
            handler.close()
            root_logger.removeHandler(handler)

    # Transport module of opencensus-ext-azure logs info 'transmission
    # succeeded' which is also exported to azure if AzureLogHandler is
    # in root_logger. The following lines stops that.
    logging.getLogger('opencensus.ext.azure.common.transport').setLevel(
        logging.WARNING)

    def callback_function(envelope: Envelope) -> bool:
        envelope.tags['ai.user.accountId'] = platform.node()
        envelope.tags['ai.cloud.role'] = f"pulsequantum"
        return True

    telemetry_handler = AzureLogHandler(
        connection_string=f'InstrumentationKey={instrumentation_key}')
    telemetry_handler.add_telemetry_processor(callback_function)
    telemetry_handler.setLevel(logging.INFO)
    telemetry_handler.setFormatter(get_formatter_for_telemetry())
    root_logger.addHandler(telemetry_handler)