Пример #1
0
    def _init_trace_logging(self, app):
        """
        Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
        set in the Flask config.

        Args:
            app (flask.Flask). the Flask application for which to initialize the extension.
        """
        enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)

        if not enabled:
            return

        if self._key_grantee:
            self._trace_log_handler_grantee = LoggingHandler(
                self._key_grantee, telemetry_channel=self._channel)

            app.logger.addHandler(self._trace_log_handler_grantee)

        if self._key_ai4e:
            print("Starting trace logging")
            self._trace_log_handler_ai4e = LoggingHandler(
                self._key_ai4e, telemetry_channel=self._channel)

            app.logger.addHandler(self._trace_log_handler_ai4e)
Пример #2
0
def setup_logging(ikey):
    telemetry_channel = channel.TelemetryChannel()
    telemetry_channel.context.application.ver = __version__
    telemetry_channel.context.properties['worker.id'] = str(uuid.uuid4())
    handler = LoggingHandler(ikey, telemetry_channel=telemetry_channel)
    logging.basicConfig(handlers=[handler],
                        format='%(levelname)s: %(message)s',
                        level=logging.DEBUG)
Пример #3
0
    def attach_appinsights(logger, instrumentation_key: str):
        if not instrumentation_key:
            logger.warning(
                "appinsights instrumentation key is null; not writing to app insights"
            )
            return

        handler = LoggingHandler(instrumentation_key)

        # TODO: extend this collection to jobid, pipelineid or farmid etc.
        handler.client._context.properties['Collection'] = 'ADF_LOGS'

        # Removing all PIO information from context.
        # Due to a bug in LoggingHanlder constructor, its not honoring context passed.
        # so trying to set values once handler is created. Overriding with 'None' string
        # instead of None as for later value, its taking default value from constructor.
        handler.client._context.device = contracts.Device()
        handler.client._context.device.os_version = NONE_STRING
        handler.client._context.device.locale = NONE_STRING
        handler.client._context.device.id = NONE_STRING
        handler.client._context.device.type = NONE_STRING
        handler.client._context.device.oem_name = NONE_STRING
        handler.client._context.device.model = NONE_STRING

        handler.client._context.location = contracts.Location()
        handler.client._context.location.ip = NONE_STRING

        handler.client._context.user = contracts.User()
        handler.client._context.user.id = NONE_STRING
        handler.client._context.user.account_id = NONE_STRING
        handler.client._context.user.auth_user_id = NONE_STRING

        handler.client._context.session = contracts.Session()
        handler.client._context.session.id = NONE_STRING

        handler.client._context.cloud = contracts.Cloud()
        handler.client._context.cloud.role = NONE_STRING

        handler.setLevel(Logger.get_logging_level())

        # Log formatter looks similar to default python logging statement.
        handler.setFormatter(
            logging.Formatter(
                '[%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(process)d:%(thread)d]: %(message)s'
            ))

        # enable exceptions to app insights
        enable(instrumentation_key)

        logger.addHandler(handler)
        logger.info("Attached app insights handler to logger")
Пример #4
0
def main(event: func.EventHubEvent):
    handler = LoggingHandler(os.environ['APPINSIGHTS_INSTRUMENTATIONKEY'])
    logging.basicConfig(handlers=[ handler ], format='%(levelname)s: %(message)s', level=logging.DEBUG)

    tc = TelemetryClient(os.environ['APPINSIGHTS_INSTRUMENTATIONKEY'])
    tc.track_event("Incoming event")
    tc.flush()

    logging.info('Function triggered to process a message: %s', event)
    logging.info('  body: %s', event.get_body())
    logging.info('  EnqueuedTimeUtc: %s', event.enqueued_time)
    logging.info('  SequenceNumber: %s', event.sequence_number)
    logging.info('  Offset: %s', event.offset)
    logging.info('  Partition: %s', event.partition_key)
    logging.info('  Metadata: %s', event.iothub_metadata)

    table_service = TableService(connection_string=os.environ['AzureTableConnectionString'])

    for datapoint in json.loads(event.get_body()):
        # Expected data format:
        #   {"timestamp": 1564598054, "deviceid": "Node1", "scale": 2, "temperature": 1.1,"weight": 10000}
        if datapoint is not None and 'deviceid' in datapoint and \
           'timestamp' in datapoint and 'scale' in datapoint and \
           'weight' in datapoint:
            logging.debug('  datapoint: %s', (datapoint))
            # deviceid is used as partition key.
            # {timestamp}-{scale} is used as RowKey
            # timestamp and scale number are duplicated as an int columns
            # to keep them searchable.  The rest of the datapoint elements
            # are added as columns as well.
            history = {}
            history['PartitionKey'] = datapoint.pop('deviceid')
            history['RowKey'] = str(datapoint['timestamp']) + '-' + str(datapoint['scale'])
            history.update(datapoint.items())
            logging.debug('history: %s' % (history))
            table_service.insert_entity(TABLE_NAME_HISTORICAL_DATA, history)
            logging.info('Added historical table data: %s', (history))

            # Touch/create the row in the config table for each reported scale with latest weight
            configupdate = {}
            configupdate['PartitionKey'] = history['PartitionKey']
            configupdate['RowKey'] = str(history['scale'])
            configupdate['weight'] = history['weight']
            if 'temperature' in history:
                configupdate['temperature'] = history['temperature']
            logging.info('config update: %s' % (configupdate))
            logging.info('Writing to table: %s' % (TABLE_NAME_CONFIGURATION))
            table_service.insert_or_merge_entity(TABLE_NAME_CONFIGURATION, configupdate)
            logging.info('Updated configuration table entry: %s', (configupdate))
        else:
            logging.info('  Invalid datapoint: %s', (datapoint))
Пример #5
0
    def InitializeAppInsights(self):
        # AppInsights initialization
        cur_thread = threading.current_thread()
        logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-7.7s]: %(message)s")
        appInsightsKey = os.environ['APPINSIGHTS_KEY']
        self.rootLogger.info("AppInsights key: '" + appInsightsKey + "'")   # log locally

        # create a child logger per thread so that we can set the SessionId without collision during concurrent execution
        #  by default logging will propagate to the parent rootLogger
        self.telemetryLogger = self.rootLogger.getChild('AppInsights.{0}'.format(cur_thread) )
        telemetryhandler = LoggingHandler(appInsightsKey)
        telemetryhandler.setFormatter(logFormatter)
        telemetryhandler.client.context.application.id = "DiskInspect-Service"
        telemetryhandler.client.context.application.ver = self.containerVersion
        telemetryhandler.client.context.properties['releaseName'] = self.releaseName
        self.telemetryLogger.addHandler(telemetryhandler)

        self.telemetryClient = telemetryhandler.client
Пример #6
0
    def __init__(self, bright_host_ip: str, metrics: Iterable[str], instrumentation_key: str):
        self.__set_bright_host_ip(bright_host_ip)
        self.__set_metrics(metrics)
        self.__set_instrumentation_key(instrumentation_key)

        bright_cluster = self.__create_bright_cluster()
        self.__set_bright_cluster(bright_cluster)

        emit_handler = LoggingHandler(instrumentation_key)
        emit_handler.setLevel(logging.DEBUG)
        self.__set_emit_handler(emit_handler)

        emitter = logging.getLogger('application-insights')
        emitter.setLevel(logging.DEBUG)
        emitter.addHandler(emit_handler)
        self.__set_emitter(emitter)

        mutex = threading.Lock()
        self.__set_mutex(mutex)
Пример #7
0
def setup_logger():
    logging.setLoggerClass(CustomLogger)
    _logger = logging.getLogger()

    _logger.setLevel(logging.DEBUG)
    # Stdout handler
    c_handler = logging.StreamHandler()
    c_handler.setLevel(logging.DEBUG)
    c_handler.setFormatter(logging.Formatter(format_str))
    _logger.addHandler(c_handler)

    # Application insight handler
    if utils.convert_to_boolean(os.environ["AZURE_USEAPPINSIGHT"]):
        from applicationinsights import TelemetryClient
        from applicationinsights.logging import LoggingHandler
        a_handler = LoggingHandler(os.environ["AZURE_INSTRUMENTATION_KEY"])
        a_handler.setLevel(logging.DEBUG)
        a_handler.setFormatter(logging.Formatter(ai_str))
        _logger.addHandler(a_handler)
        tc = TelemetryClient(os.environ["AZURE_INSTRUMENTATION_KEY"])
        tc.channel.queue.max_queue_length = 2
Пример #8
0
instrumentation_key = '<Instrumentation key>'

#Enable unhandled exception logging
enable(instrumentation_key)

#setup other needed variables
tc = TelemetryClient(instrumentation_key)
tc.context.application.ver = '0.0.1'
tc.context.device.id = 'Sample notebook'

telemetry_channel = channel.TelemetryChannel()
telemetry_channel.context.application.ver = '1.0.0'
telemetry_channel.context.properties['application_name'] = 'sample_notebook'
telemetry_channel.context.properties['application_id'] = sc.applicationId

handler = LoggingHandler(instrumentation_key,
                         telemetry_channel=telemetry_channel)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logger = logging.getLogger('simple_logger')
logger.setLevel(logging.INFO)
logger.addHandler(handler)

logger.info('Starting sample app ... ')


def getReadConfig():
    readConfig = {
        "Endpoint": "https://nomier-test-sql.documents.azure.com:443/",
        "Masterkey": "<MK>",
        "Database": "partitionIdTestDB",
        "Collection": "sourcecollection",
Пример #9
0
sys.settrace(trace)
"""

"""Setup Azure Application Insights
 ____ _  _ ____ ____  ___ ____ __ _ ____ 
(  __| \/ |  __|  _ \/ __|  __|  ( (_  _)
 ) _)/ \/ \) _) )   ( (_ \) _)/    / )(  
(____)_)(_(____|__\_)\___(____)_)__)(__) 
 """
# set up channel with context
telemetry_channel = channel.TelemetryChannel()
telemetry_channel.context.application.ver = '0.0.0.0'
#telemetry_channel.context.properties['my_property'] = 'my_value'

# set up logging
az_handler = LoggingHandler('1bd7b388-4afd-4b58-8b2f-060ac172d00d', 
                            telemetry_channel=telemetry_channel)
az_handler.setLevel(logging.DEBUG)
hash = hashlib.sha1()
hash.update(str(time.time()).encode("utf-8", "strict"))
az_handler.setFormatter(
    logging.Formatter(f'{hash.hexdigest()[:16]} ||'
                       '%(name)s - %(levelname)s: %(message)s')
)

f_handler = logging.FileHandler('output.log')
f_handler.setLevel(logging.DEBUG)
f_handler.setFormatter(
    logging.Formatter('%(name)s - %(levelname)s: %(message)s')
)

c_handler = logging.StreamHandler()
Пример #10
0
}

# Sysout Logging Setup
logger = logging.getLogger("monitofi")
logger.setLevel(logging.INFO)
syshandler = logging.StreamHandler(sys.stdout)
syshandler.setLevel(logging.INFO)
formatter = json_log_formatter.JSONFormatter()
syshandler.setFormatter(formatter)
logger.addHandler(syshandler)

if IKEY != "REPLACE_ME":
    # Logging unhandled exceptions with Appinsights
    enable(IKEY)
    # Applications Insights Logging Setup
    handler = LoggingHandler(IKEY)
    handler.setFormatter(formatter)
    logger.addHandler(handler)

iclient = InfluxDBClient(INFLUXDB_SERVER, INFLUXDB_PORT, INFLUXDB_USERNAME,
                         INFLUXDB_PASSWORD, INFLUXDB_DATABASE)
iclient.create_database(INFLUXDB_DATABASE)


def flattening(nested, prefix, ignore_list):
    field = {}

    flatten(True, nested, field, prefix, ignore_list)

    return field
Пример #11
0
# set up logging
enable('<YOUR INSTRUMENTATION KEY GOES HERE>')

# log something (this will be sent to the Application Insights service as a trace)
logging.info('This is a message')

# logging shutdown will cause a flush of all un-sent telemetry items
logging.shutdown()
Basic logging configuration (second option)

import logging
from applicationinsights.logging import LoggingHandler

# set up logging
handler = LoggingHandler('<YOUR INSTRUMENTATION KEY GOES HERE>')
logging.basicConfig(handlers=[ handler ], format='%(levelname)s: %(message)s', level=logging.DEBUG)

# log something (this will be sent to the Application Insights service as a trace)
logging.debug('This is a message')

try:
    raise Exception('Some exception')
except:
    # this will send an exception to the Application Insights service
    logging.exception('Code went boom!')

# logging shutdown will cause a flush of all un-sent telemetry items
# alternatively flush manually via handler.flush()
logging.shutdown()
Advanced logging configuration