def intercept_excepthook(type, value, traceback):
    client = TelemetryClient('temp_key', telemetry_channel)
    for instrumentation_key in enabled_instrumentation_keys:
        client.context.instrumentation_key = instrumentation_key
        client.track_exception(type, value, traceback)
    client.flush()
    original_excepthook(type, value, traceback)
예제 #2
0
def upload(data_to_save):
    from applicationinsights import TelemetryClient
    from applicationinsights.exceptions import enable

    client = TelemetryClient(INSTRUMENTATION_KEY)
    enable(INSTRUMENTATION_KEY)

    if in_diagnostic_mode():
        sys.stdout.write('Telemetry upload begins\n')

    try:
        data_to_save = json.loads(data_to_save.replace("'", '"'))
    except Exception as err:  # pylint: disable=broad-except
        if in_diagnostic_mode():
            sys.stdout.write('{}/n'.format(str(err)))
            sys.stdout.write('Raw [{}]/n'.format(data_to_save))

    for record in data_to_save:
        client.track_event(record['name'], record['properties'])

    client.flush()

    if in_diagnostic_mode():
        json.dump(data_to_save, sys.stdout, indent=2, sort_keys=True)
        sys.stdout.write('\nTelemetry upload completes\n')
    def test_track_dependency_works_as_expected(self):
        def process(data, context):
            data.properties["NEW_PROP"] = "MYPROP"
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient(channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.instrumentation_key = '99999999-9999-9999-9999-999999999999'
        client.context.device = None
        client.track_dependency('test', 'COMMAND_PLACEHOLDER', 'HTTP', 'localhost', 13, True, 200, { 'foo': 'bar' }, { 'x': 42 }, 'ID_PLACEHOLDER')
        client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.RemoteDependency", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "RemoteDependencyData", "baseData": {"ver": 2, "name": "test", "id": "ID_PLACEHOLDER", "resultCode": "200", "duration": "00:00:00.013", "success": true, "data": "COMMAND_PLACEHOLDER", "target": "localhost", "type": "HTTP", "properties": {"NEW_PROP": "MYPROP", "foo": "bar"}, "measurements": {"x": 42}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        self.maxDiff = None
        self.assertEqual(expected, actual)
 def test_track_exception_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.context.device = None
     try:
         raise Exception("blah")
     except Exception as e:
         client.track_exception(*sys.exc_info(), properties={}, measurements={ 'x': 42 })
         client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Exception", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "ExceptionData", "baseData": {"ver": 2, "handledAt": "UserCode", "exceptions": [{"id": 1, "outerId": 0, "typeName": "Exception", "message": "blah", "hasFullStack": true, "parsedStack": [{"level": 0, "method": "test_track_exception_works_as_expected", "assembly": "Unknown", "fileName": "TestTelemetryClient.py", "line": 0}]}], "measurements": {"x": 42}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     for item in sender.data.data.base_data.exceptions:
         for frame in item.parsed_stack:
             frame.file_name = os.path.basename(frame.file_name)
             frame.line = 0
     actual = json.dumps(sender.data.write())
     self.assertEqual(expected, actual)
     try:
         raise Exception("blah")
     except Exception as e:
         client.track_exception()
         client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Exception", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "ExceptionData", "baseData": {"ver": 2, "handledAt": "UserCode", "exceptions": [{"id": 1, "outerId": 0, "typeName": "Exception", "message": "blah", "hasFullStack": true, "parsedStack": [{"level": 0, "method": "test_track_exception_works_as_expected", "assembly": "Unknown", "fileName": "TestTelemetryClient.py", "line": 0}]}]}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     for item in sender.data.data.base_data.exceptions:
         for frame in item.parsed_stack:
             frame.file_name = os.path.basename(frame.file_name)
             frame.line = 0
     actual = json.dumps(sender.data.write())
     self.assertEqual(expected, actual)
class Telemetry:
    def __init__(self):
        try:
            self.telemetry = TelemetryClient(IKEY)
            if os.path.exists("telemetry.config"):
                config_file = open("telemetry.config", "r")
                if config_file.read() == "1":
                    self.enable_telemetry = True
                else:
                    self.enable_telemetry = False
            else:
                self.enable_telemetry = self._query_yes_no(PROMPT_TEXT)
                config_file = open("telemetry.config", "w")
                if self.enable_telemetry:
                    config_file.write("1")
                    self.telemetry.track_event("yes", {"device": DEVICE, "language": LANGUAGE})
                else:
                    config_file.write("0")
                    self.telemetry.context.location.ip = "0.0.0.0"
                    self.telemetry.track_event("no", {"device": DEVICE, "language": LANGUAGE})
            self.telemetry.flush()
        except:
            pass

    def send_telemetry_data(self, iot_hub_name, event, message):
        try:
            if self.enable_telemetry:
                hash_mac = self._get_mac_hash()
                hash_iot_hub_name = hashlib.sha256(iot_hub_name.encode("utf-8")).hexdigest()
                self.telemetry.track_event(event, {"iothub": hash_iot_hub_name, "message": message,
                                            "language": LANGUAGE, "device": DEVICE, "mac": hash_mac,
                                            "osType": platform.system(), "osPlatform": platform.dist()[0],
                                            "osRelease": platform.dist()[1]})
                self.telemetry.flush()
        except:
            pass

    def _get_mac_hash(self):
        mac = ":".join(re.findall("..", "%012x" % uuid.getnode()))
        return hashlib.sha256(mac.encode("utf-8")).hexdigest()

    def _query_yes_no(self, question):
        global input
        default = "y"
        valid = {"y": True, "n": False}
        prompt = " [Y/n] "
        while True:
            sys.stdout.write(question + prompt)
            try:
                input = raw_input
            except NameError:
                pass
            choice = input().lower()
            if default is not None and choice == "":
                return valid[default]
            elif choice in valid:
                return valid[choice]
            else:
                sys.stdout.write("Please respond with 'y' or 'n' ")
    def test_track_event_processor_filtered(self):
        def process(data, context):
            return False # Filter the event

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.device = None
        client.track_event('test', { 'foo': 'bar' }, { 'x': 42 })
        client.flush()
        self.assertEqual(None, sender.data)
 def test_track_pageview_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.context.device = None
     client.track_pageview('test', 'http://tempuri.org', 13, { 'foo': 'bar' }, { 'x': 42 })
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.PageView", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "PageViewData", "baseData": {"ver": 2, "url": "http://tempuri.org", "name": "test", "duration": 13, "properties": {"foo": "bar"}, "measurements": {"x": 42}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
 def test_track_metric_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.context.device = None
     client.track_metric('metric', 42, channel.contracts.DataPointType.aggregation, 13, 1, 123, 111, {'foo': 'bar'})
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Metric", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "MetricData", "baseData": {"ver": 2, "metrics": [{"name": "metric", "kind": 1, "value": 42, "count": 13, "min": 1, "max": 123, "stdDev": 111}], "properties": {"foo": "bar"}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
예제 #9
0
def _send_feedback(score, response_what_changes, response_do_well, email_address):
    from applicationinsights import TelemetryClient
    tc = TelemetryClient(INSTRUMENTATION_KEY)
    tc.context.application.ver = core_version
    version_components, version_python = _get_version_info()
    tc.track_event(
        EVENT_NAME,
        {'response_what_changes': response_what_changes,
         'response_do_well': response_do_well,
         'response_email_address': email_address,
         'version_components': version_components,
         'version_python': version_python},
        {'response_net_promoter_score': score})
    tc.flush()
    def test_track_exception_works_as_expected(self):
        def process(data, context):
            data.properties["NEW_PROP"] = "MYPROP"
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.device = None
        try:
            raise Exception("blah")
        except Exception as e:
            client.track_exception(*sys.exc_info(), properties={}, measurements={ 'x': 42 })
            client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Exception", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "ExceptionData", "baseData": {"ver": 2, "exceptions": [{"id": 1, "outerId": 0, "typeName": "Exception", "message": "blah", "hasFullStack": true, "parsedStack": [{"level": 0, "method": "test_track_exception_works_as_expected", "assembly": "Unknown", "fileName": "TestTelemetryProcessor.py", "line": 0}]}], "properties": {"NEW_PROP": "MYPROP"}, "measurements": {"x": 42}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        for item in sender.data.data.base_data.exceptions:
            for frame in item.parsed_stack:
                frame.file_name = os.path.basename(frame.file_name)
                frame.line = 0
        actual = json.dumps(sender.data.write())
        self.maxDiff = None
        self.assertEqual(expected, actual)
        try:
            raise Exception("blah")
        except Exception as e:
            client.track_exception()
            client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Exception", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "ExceptionData", "baseData": {"ver": 2, "exceptions": [{"id": 1, "outerId": 0, "typeName": "Exception", "message": "blah", "hasFullStack": true, "parsedStack": [{"level": 0, "method": "test_track_exception_works_as_expected", "assembly": "Unknown", "fileName": "TestTelemetryProcessor.py", "line": 0}]}], "properties": {"NEW_PROP": "MYPROP"}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        for item in sender.data.data.base_data.exceptions:
            for frame in item.parsed_stack:
                frame.file_name = os.path.basename(frame.file_name)
                frame.line = 0
        actual = json.dumps(sender.data.write())
        self.assertEqual(expected, actual)
 def test_track_request_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient(channel.TelemetryChannel(context=None, queue=queue))
     client.context.instrumentation_key = '99999999-9999-9999-9999-999999999999'
     client.context.device = None
     client.track_request('test', 'http://tempuri.org', True, 'START_TIME', 13, '42', 'OPTIONS', { 'foo': 'bar' }, { 'x': 42 })
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Request", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "RequestData", "baseData": {"ver": 2, "id": "ID_PLACEHOLDER", "name": "test", "startTime": "START_TIME", "duration": "00:00:00.013", "responseCode": "42", "success": true, "httpMethod": "OPTIONS", "url": "http://tempuri.org", "properties": {"foo": "bar"}, "measurements": {"x": 42}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     sender.data.data.base_data.id = 'ID_PLACEHOLDER'
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
    def test_track_event_with_common_processor_two_clients(self):
        def process(data, context):
            data.properties["NEW_PROP"] = "MYPROP"
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        key = '99999999-9999-9999-9999-999999999999'
        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)

        chan = channel.TelemetryChannel(queue=queue)
        chan.context.properties['foo'] = 'bar'

        client1 = TelemetryClient(key, chan)
        client1.add_telemetry_processor(process)
        client1.context.device = None
        client1.context.properties['x'] = 42

        client2 = TelemetryClient(key, chan)
        client2.add_telemetry_processor(process)
        client2.context.device = None
        client2.context.properties['x'] = 84

        client1.track_event('test 1')
        client1.flush()
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Event", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "EventData", "baseData": {"ver": 2, "name": "test 1", "properties": {"NEW_PROP": "MYPROP", "foo": "bar", "x": 42}}}}'
        self.maxDiff = None
        self.assertEqual(expected, actual)

        client2.track_event('test 2')
        client2.flush()
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Event", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "EventData", "baseData": {"ver": 2, "name": "test 2", "properties": {"NEW_PROP": "MYPROP", "foo": "bar", "x": 84}}}}'
        self.assertEqual(expected, actual)
 def test_track_event_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.context.device = None
     client.track_event('test', { 'foo': 'bar' }, { 'x': 42 })
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Event", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "EventData", "baseData": {"ver": 2, "name": "test", "properties": {"foo": "bar"}, "measurements": {"x": 42}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
     sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
     sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
     sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
예제 #14
0
def upload(data_to_save):
    from applicationinsights import TelemetryClient
    from applicationinsights.exceptions import enable

    client = TelemetryClient(INSTRUMENTATION_KEY)
    enable(INSTRUMENTATION_KEY)

    if in_diagnostic_mode():
        sys.stdout.write('Telemetry upload begins\n')
        sys.stdout.write('Got data {}\n'.format(json.dumps(json.loads(data_to_save), indent=2)))

    try:
        data_to_save = json.loads(data_to_save.replace("'", '"'))
    except Exception as err:  # pylint: disable=broad-except
        if in_diagnostic_mode():
            sys.stdout.write('{}/n'.format(str(err)))
            sys.stdout.write('Raw [{}]/n'.format(data_to_save))

    for record in data_to_save:
        name = record['name']
        raw_properties = record['properties']
        properties = {}
        measurements = {}
        for k in raw_properties:
            v = raw_properties[k]
            if isinstance(v, six.string_types):
                properties[k] = v
            else:
                measurements[k] = v
        client.track_event(record['name'], properties, measurements)

        if in_diagnostic_mode():
            sys.stdout.write('\nTrack Event: {}\nProperties: {}\nMeasurements: {}'.format(
                name, json.dumps(properties, indent=2), json.dumps(measurements, indent=2)))

    client.flush()

    if in_diagnostic_mode():
        sys.stdout.write('\nTelemetry upload completes\n')
예제 #15
0
def upload(data_to_save):
    if in_diagnostic_mode():
        sys.stdout.write('Telemetry upload begins\n')
        sys.stdout.write('Got data {}\n'.format(json.dumps(json.loads(data_to_save), indent=2)))

    try:
        data_to_save = json.loads(data_to_save.replace("'", '"'))
    except Exception as err:  # pylint: disable=broad-except
        if in_diagnostic_mode():
            sys.stdout.write('ERROR: {}/n'.format(str(err)))
            sys.stdout.write('Raw [{}]/n'.format(data_to_save))

    for instrumentation_key in data_to_save:
        client = TelemetryClient(instrumentation_key=instrumentation_key,
                                 telemetry_channel=TelemetryChannel(queue=SynchronousQueue(LimitedRetrySender())))
        enable(instrumentation_key)

        for record in data_to_save[instrumentation_key]:
            name = record['name']
            raw_properties = record['properties']
            properties = {}
            measurements = {}
            for k, v in raw_properties.items():
                if isinstance(v, six.string_types):
                    properties[k] = v
                else:
                    measurements[k] = v
            client.track_event(record['name'], properties, measurements)

            if in_diagnostic_mode():
                sys.stdout.write(
                    '\nTrack Event: {}\nProperties: {}\nMeasurements: {}'.format(name, json.dumps(properties, indent=2),
                                                                                 json.dumps(measurements, indent=2)))

        client.flush()

    if in_diagnostic_mode():
        sys.stdout.write('\nTelemetry upload completes\n')
 def test_track_metric_works_as_expected(self):
     def process(data, context):
         data.properties["NEW_PROP"] = "MYPROP"
         context.user.id = "BOTUSER"
         context.session.id = "BOTSESSION"
         return True
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.add_telemetry_processor(process)
     client.context.device = None
     client.track_metric('metric', 42, channel.contracts.DataPointType.aggregation, 13, 1, 123, 111, {'foo': 'bar'})
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Metric", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "MetricData", "baseData": {"ver": 2, "metrics": [{"name": "metric", "kind": 1, "value": 42, "count": 13, "min": 1, "max": 123, "stdDev": 111}], "properties": {"NEW_PROP": "MYPROP", "foo": "bar"}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
     sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
     sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
     sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
    def test_track_pageview_works_as_expected(self):
        def process(data, context):
            data.properties["NEW_PROP"] = "MYPROP"
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.device = None
        client.track_pageview('test', 'http://tempuri.org', 13, { 'foo': 'bar' }, { 'x': 42 })
        client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.PageView", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "PageViewData", "baseData": {"ver": 2, "url": "http://tempuri.org", "name": "test", "duration": 13, "properties": {"NEW_PROP": "MYPROP", "foo": "bar"}, "measurements": {"x": 42}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        self.maxDiff = None
        self.assertEqual(expected, actual)
    def test_track_event_modifes_options(self):
        def process(data, context):
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.device = None
        client.context.properties['foo'] = 'bar'
        client.track_event('test')
        client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Event", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "EventData", "baseData": {"ver": 2, "name": "test", "properties": {"foo": "bar"}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        self.maxDiff = None
        self.assertEqual(expected, actual)
예제 #19
0
def costum_event():
    tc = TelemetryClient('<YOUR INSTRUMENTATION KEY GOES HERE>')
    tc.track_event('Test event', {'foo': 'bar'}, {'baz': 42})
    tc.flush()
    messagebox.showinfo("updates", "sent")
예제 #20
0
class RaftUtils():
    def __init__(self):
        work_directory = os.environ['RAFT_WORK_DIRECTORY']
        with open(os.path.join(work_directory, 'task-config.json'),
                  'r') as task_config:
            self.config = json.load(task_config)

        connection_str = os.environ['RAFT_SB_OUT_SAS']
        self.topic_client = TopicClient.from_connection_string(connection_str)

        self.telemetry_client = TelemetryClient(
            instrumentation_key=os.environ['RAFT_APP_INSIGHTS_KEY'])

        self.job_id = os.environ['RAFT_JOB_ID']
        self.container_name = os.environ['RAFT_CONTAINER_NAME']

        self.telemetry_properties = {
            "jobId": self.job_id,
            "taskIndex": os.environ['RAFT_TASK_INDEX'],
            "containerName": self.container_name
        }

    def report_status(self, state, details):
        m = {
            'eventType': 'JobStatus',
            'message': {
                'tool': 'ZAP',
                'jobId': self.job_id,
                'agentName': self.container_name,
                'details': details,
                'utcEventTime': time.strftime('%Y-%m-%d %H:%M:%S',
                                              time.gmtime()),
                'state': state
            }
        }
        msg = Message(str.encode(json.dumps(m)))
        self.topic_client.send(msg)

    def report_status_created(self, details=None):
        self.report_status('Created', details)

    def report_status_running(self, details=None):
        self.report_status('Running', details)

    def report_status_error(self, details=None):
        self.report_status('Error', details)

    def report_status_completed(self, details=None):
        self.report_status('Completed', details)

    def log_trace(self, trace):
        self.telemetry_client.track_trace(trace,
                                          properties=self.telemetry_properties)

    def log_exception(self):
        self.telemetry_client.track_exception(
            properties=self.telemetry_properties)

    def flush(self):
        self.telemetry_client.flush()

    def get_swagger_target(self):
        swagger = self.config.get("swaggerLocation")
        if swagger and swagger.get("url"):
            return swagger["url"]
        elif swagger.get("filePath"):
            return swagger["filePath"]
예제 #21
0
def trace_item():
    tc = TelemetryClient('<YOUR INSTRUMENTATION KEY GOES HERE>')
    tc.track_trace('Test trace', {'foo': 'bar'})  #trace function to be given
    tc.flush()
예제 #22
0
class RaftUtils():
    def __init__(self, tool_name):
        from applicationinsights import TelemetryClient
        from azure.servicebus import ServiceBusClient, ServiceBusMessage
        self.config = task_config()

        connection_str = os.environ['RAFT_SB_OUT_SAS']

        self.sb_client = ServiceBusClient.from_connection_string(
            connection_str)
        self.topic_client = self.sb_client.get_topic_sender(
            self.sb_client._entity_name)

        self.telemetry_client = TelemetryClient(
            instrumentation_key=os.environ['RAFT_APP_INSIGHTS_KEY'])

        self.job_id = os.environ['RAFT_JOB_ID']
        self.container_name = os.environ['RAFT_CONTAINER_NAME']
        self.tool_name = tool_name

        self.telemetry_properties = {
            "jobId": self.job_id,
            "taskIndex": os.environ['RAFT_TASK_INDEX'],
            "containerName": self.container_name
        }

        self.newSbMessage = ServiceBusMessage

    def report_bug(self, bugDetails):
        m = {
            'eventType': 'BugFound',
            'message': {
                'tool': self.tool_name,
                'jobId': self.job_id,
                'agentName': self.container_name,
                'bugDetails': bugDetails
            }
        }
        msg = self.newSbMessage(str.encode(json.dumps(m)))
        self.topic_client.send_messages([msg])

    def report_status(self, state, details):
        m = {
            'eventType': 'JobStatus',
            'message': {
                'tool': self.tool_name,
                'jobId': self.job_id,
                'agentName': self.container_name,
                'details': details,
                'utcEventTime': time.strftime('%Y-%m-%d %H:%M:%S',
                                              time.gmtime()),
                'state': state
            }
        }
        msg = self.newSbMessage(str.encode(json.dumps(m)))
        self.topic_client.send_messages([msg])

    def report_status_created(self, details=None):
        self.report_status('Created', details)

    def report_status_running(self, details=None):
        self.report_status('Running', details)

    def report_status_error(self, details=None):
        self.report_status('Error', details)

    def report_status_completed(self, details=None):
        self.report_status('Completed', details)

    def log_trace(self, trace):
        self.telemetry_client.track_trace(trace,
                                          properties=self.telemetry_properties)

    def log_exception(self):
        self.telemetry_client.track_exception(
            properties=self.telemetry_properties)

    def flush(self):
        self.telemetry_client.flush()
        self.sb_client.close()
예제 #23
0
t_sensordata_malformed_sdf.write.mode("append").insertInto("malformed.sensor")


# COMMAND ----------

# MAGIC %md
# MAGIC ### Metrics

# COMMAND ----------

parkingbay_count = t_parkingbay_sdf.count()
sensordata_count = t_sensordata_sdf.count()
parkingbay_malformed_count = t_parkingbay_malformed_sdf.count()
sensordata_malformed_count = t_sensordata_malformed_sdf.count()

tc.track_event('Standardize : Completed load', 
               properties={'parkingbay_filepath': parkingbay_filepath, 
                           'sensors_filepath': sensors_filepath,
                           'load_id': load_id 
                          },
               measurements={'parkingbay_count': parkingbay_count,
                             'sensordata_count': sensordata_count,
                             'parkingbay_malformed_count': parkingbay_malformed_count,
                             'sensordata_malformed_count': sensordata_malformed_count
                            })
tc.flush()

# COMMAND ----------

dbutils.notebook.exit("success")
예제 #24
0
def ocr(body, response, language: "The language(s) to use for OCR" = "eng"):

    logger.info(os.environ)

    azureAccountName = os.environ['SCANOCR_STORAGE_ACCOUNT_NAME']
    azureAccountKey = os.environ['SCANOCR_STORAGE_ACCOUNT_KEY']
    appInsightsTelemetryKey = os.environ['SCANOCR_APP_INSIGHTS_TELEMETRY_KEY']

    fileSetId = body["file_set_id"]
    zipFileUrl = body["zip_file_url"]
    fileName = body["file_name"]

    # initiate app insights
    tc = TelemetryClient(appInsightsTelemetryKey)
    tc.context.operation.id = str(uuid.uuid4())
    tc.track_event('ocr', {
        'zip_file_url': zipFileUrl,
        'file_set_id': fileSetId
    })
    tc.flush()

    # download zip, extract
    zipRequestBody = requests.get(zipFileUrl)
    z = zipfile.ZipFile(io.BytesIO(zipRequestBody.content))
    tempDir = '/tmp/' + fileSetId + '/' + fileName
    if (os.path.isdir(tempDir)):
        shutil.rmtree(tempDir)
    z.extractall(tempDir)

    # grab all PNG images from zip extract results
    image_files = []
    for root, dirs, files in os.walk(tempDir):
        for file in files:
            if file.endswith(".png"):
                image_files.append(os.path.join(root, file))

    # log file count to app insights
    tc.track_event('ocr_zip_extracted', {'file_count': len(image_files)})
    tc.flush()

    with open(tempDir + '/output.pdf', 'w+') as output:

        # convert PNGs to (non-OCR) PDF
        pdf_bytes = img2pdf.convert(image_files)
        file = open(tempDir + '/input.pdf', "wb")
        file.write(pdf_bytes)

        # log progress to app insights
        tc.track_event('ocr_pdf_created')
        tc.flush()

        # launch OCR process
        proc = subprocess.Popen("ocrmypdf --jobs 4 --output-type pdf " +
                                tempDir + "/input.pdf " + tempDir +
                                "/output.pdf",
                                stdin=subprocess.PIPE,
                                shell=True)

    # wait for OCR completion
    code = proc.wait()

    # log OCR completion to app insights
    tc.track_event('ocr_output_pdf_complete')
    tc.flush()

    # upload resulting PDF to Azure
    blob_service = BlockBlobService(account_name=azureAccountName,
                                    account_key=azureAccountKey)
    blob_service.create_blob_from_path(
        'images/' + fileSetId,
        fileName + ".pdf",
        tempDir + '/output.pdf',
        content_settings=ContentSettings(content_type='application/pdf'))

    # log upload completion to app insights
    tc.track_event('ocr_uploaded_to_azure')
    tc.flush()

    # obtain download signature from Azure
    sas_url = blob_service.generate_blob_shared_access_signature(
        'images/' + fileSetId, fileName + "pdf", BlobPermissions.READ,
        datetime.utcnow() + timedelta(hours=12))

    download_url = 'https://' + azureAccountName + '.blob.core.windows.net/' + 'images/' + fileSetId + '/' + fileName + ".pdf" + '?' + sas_url

    # return results
    return {
        'filename': zipFileUrl,
        'files': z.namelist(),
        'download_url': download_url
    }
예제 #25
0
def main(msg: func.QueueMessage) -> None:
    """
    Main function, triggered by Azure Storage Queue, parsed queue content
    :param msg: func.QueueMessage
    :return: None
    """
    logging.info('Python queue trigger function processed a queue item: %s',
                 msg.get_body().decode('utf-8'))
    # Get blob file content
    file_path = json.loads(msg.get_body().decode('utf-8'))['data']['url']

    # Parsing rows to get blob location and failure reason
    temp_blob_client = BlobClient.from_blob_url(blob_url=file_path)
    local_file_name = temp_blob_client.blob_name
    container_name = temp_blob_client.container_name

    get_config_values()

    log_file_str = download_logfile(container_name=container_name,
                                    local_file_name=local_file_name)

    for line in log_file_str.splitlines():
        log_file_json = json.loads(line)
        path = log_file_json["path"]
        reason = log_file_json["reason"]

        tc = TelemetryClient(APP_INSIGHT_KEY)
        tc.context.application.ver = '1.0'
        tc.context.properties[
            "PROCESS_PROGRAM"] = "XDR_SDL_INGESTION_ERR_HANDLER_V01A"
        tc.context.properties["PROCESS_START"] = time.time()

        try:
            patharray = path.replace('abfss://', '').split('/')
            container = patharray[0].split('@')[0]
            patharray.remove(patharray[0])
            filepath = ''
            for item in patharray:
                filepath += '/' + item
            filepath = filepath[1:]
        except:  # pylint: disable=bare-except
            logging.error(
                "Retry blob Databricks split error handling log file format error, FilePath: %s",
                file_path)
            tc.track_event(RETRY_ERROR_EVENT_NAME, {'FILE_PATH': file_path},
                           {RETRY_ERROR_EVENT_NAME + '_COUNT': 0})
            tc.flush()
            continue

        retry_times = get_blob_retry_times(filepath)

        # check retry is necessary or not
        if not check_retry_necessary(file_path=file_path):
            new_blob_file_path = get_new_blob_move_file_path(filepath, True)
            retry_blob_databireck_split(
                connect_str=CONNECT_STR,
                source_container=container,
                target_container=RetryFail_Container_Name,
                source_path=filepath,
                target_path=new_blob_file_path)
            logging.error(
                "Retry blob Databricks split hit the no need to retry, blob_path: %s, failure reason: %s",
                filepath, reason)
            tc.track_event(RETRY_END_IN_FAIL_EVENT_NAME,
                           {'FILE_PATH': filepath},
                           {RETRY_END_IN_FAIL_EVENT_NAME + '_COUNT': 0})
            tc.flush()
            continue

        if retry_times >= MAX_INGEST_RETRIES_TIMES:
            new_blob_file_path = get_new_blob_move_file_path(filepath)
            retry_blob_databireck_split(
                connect_str=CONNECT_STR,
                source_container=container,
                target_container=RetryFail_Container_Name,
                source_path=filepath,
                target_path=new_blob_file_path)
            logging.error(
                "Retry blob Databricks split hit the retries limit %s, blob_path: %s, failure reason: %s",
                MAX_INGEST_RETRIES_TIMES, filepath, reason)
            tc.track_event(RETRY_END_IN_FAIL_EVENT_NAME,
                           {'FILE_PATH': filepath},
                           {RETRY_END_IN_FAIL_EVENT_NAME + '_COUNT': 1})
            tc.flush()
            continue

        new_blob_file_path = get_new_blob_move_file_path(filepath)
        retry_blob_databireck_split(connect_str=CONNECT_STR,
                                    source_container=container,
                                    target_container=container,
                                    source_path=filepath,
                                    target_path=new_blob_file_path)

        logging.info(
            "Retry the Databricks split, blob_path: %s, failure reason: %s",
            path, reason)
        tc.track_event(RETRY_EVENT_NAME, {'FILE_PATH': file_path},
                       {RETRY_EVENT_NAME + '_COUNT': 1})
        tc.flush()
        retry_times += 1
        logging.info(
            "Databricks error handler execution succeed, blob path: %s, trial count: %s",
            path, retry_times)
예제 #26
0
class Telemetry(metaclass=Singleton):
    """Singleton class that handles telemetry sending to AppInsights."""
    def __init__(self, toggle):
        """Initialize Telemetry instance."""
        self._toggle = toggle
        if self._toggle:
            self._telemetry_client = TelemetryClient(APP_INSIGHTS_KEY)
            self._telemetry_channel = self._setup_telemetry_channel()
            print("Telemetry enabled.")
        else:
            self._telemetry_client = None
            self._telemetry_channel = None
            print("Telemetry disabled.")

    def track_event(self, name, properties=None, measurements=None):
        """Track a telemetry event."""
        try:
            self._telemetry_client.track_event(name, properties, measurements)
        except AttributeError:
            print(f"Telemetry Disabled: Event Name: {name}")
            print(f"properties: {properties}")
            print(f"measurements: {measurements}")

    def track_metric(self,
                     name,
                     value,
                     type=None,
                     count=None,
                     min=None,
                     max=None,
                     std_dev=None,
                     properties=None):
        """Track a telemetry metric."""
        try:
            self._telemetry_client.track_metric(name, value, type, count, min,
                                                max, std_dev, properties)
        except AttributeError:
            print(f"Telemetry Disabled: Metric Name: {name}")
            print(f"value: {value}")
            if type:
                print(f"type: {type}")
            if count:
                print(f"count: {count}")
            if min:
                print(f"min: {min}")
            if max:
                print(f"max: {max}")
            if std_dev:
                print(f"std_dev: {std_dev}")
            if properties:
                print(f"properties: {properties}")

    def flush(self):
        """Flush the telemetry client info to AppInsights."""
        try:
            self._telemetry_client.flush()
        except AttributeError:
            pass

    def _setup_telemetry_channel(self):
        """Create telemetry_channel object.

        Instantiates a telemetry channel that collects unhandled exceptions.

        Return:
            telemetry_channel

        """
        from applicationinsights.exceptions import enable
        from applicationinsights import channel

        # set up channel with context
        telemetry_channel = channel.TelemetryChannel()
        telemetry_channel.context.application.ver = get_version()
        # set up exception capture
        telemetry_channel.context.properties['capture'] = 'exceptions'
        enable(APP_INSIGHTS_KEY, telemetry_channel=telemetry_channel)

        return telemetry_channel
예제 #27
0
class AppInsights(object):
    """ This class represents a plugin that enables request telemetry,
     logging for a Bottle application. The telemetry
    will be sent to Application Insights service using the supplied
    instrumentation key.

    The following Bottle config variables can be used to configure the extension:

    - Set ``APPINSIGHTS_INSTRUMENTATIONKEY`` to a string to provide the
      instrumentation key to send telemetry to application insights.
      Alternatively, this value can also be provided via an environment variable
      of the same name.

    - Set ``APPINSIGHTS_ENDPOINT_URI`` to a string to customize the telemetry
      endpoint to which Application Insights will send the telemetry.

    - Set ``APPINSIGHTS_DISABLE_REQUEST_LOGGING`` to ``False`` to disable
      logging of Bottle requests to Application Insights.

    .. code:: python

            from bottle import run, Bottle
            from applicationinsights.bottle.plugin import AppInsights

            app = Bottle()
            app.config["APPINSIGHTS_INSTRUMENTATIONKEY"] = "<YOUR INSTRUMENTATION KEY GOES HERE>"
            app.install(AppInsights())

            @app.route('/hello')
            def hello():
                return "Hello World!"

            if __name__ == '__main__':
                run(app, host='localhost', port=8080)
    """

    name = "appinsights"
    api = 2

    def __init__(self):
        """
        Initialize a new instance of the extension.

        """
        self._key = None
        self._endpoint_uri = None
        self._channel = None
        self._tc = None

    def setup(self, app):
        """
        Initializes the plugin for the provided Bottle application.

        Args:
            app (bottle.Bottle). the Bottle application for which to initialize the extension.
        """
        self._key = app.config.get(CONF_KEY) or getenv(CONF_KEY)

        if not self._key:
            return

        self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)
        sender = AsynchronousSender(self._endpoint_uri)

        queue = AsynchronousQueue(sender)
        self._channel = TelemetryChannel(None, queue)
        self._tc = TelemetryClient(self._key, self._channel)

        self.context.cloud.role_instance = platform.node()

    def close(self):
        self.flush()

    @property
    def context(self):
        """
        Accesses the telemetry context.

        Returns:
            (applicationinsights.channel.TelemetryContext). The Application Insights telemetry context.
        """
        return self._channel.context

    def apply(self, callback, route):
        """
        Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``
        is set in the Bottle config.
        """
        if route.app.config.get(CONF_DISABLE_REQUEST_LOGGING,
                                False) or self._tc is None:
            return callback

        def wrapper(*args, **kwargs):
            start_time = current_milli_time()
            result = callback(*args, **kwargs)
            try:
                duration = current_milli_time() - start_time

                self._tc.track_request(route.method + " " + route.rule,
                                       route.rule,
                                       bottle.response.status_code < 400,
                                       start_time, duration,
                                       bottle.response.status_code,
                                       route.method)
            finally:
                return result

        return wrapper

    def flush(self):
        """Flushes the queued up telemetry to the service.
        """
        if self._tc:
            self._tc.flush()
예제 #28
0
class ApplicationInsightsTelemetryClient(BotTelemetryClient):
    def __init__(self, instrumentation_key: str):
        self._instrumentation_key = instrumentation_key
        self._client = TelemetryClient(self._instrumentation_key)

        # Telemetry Processor
        def telemetry_processor(data, context):
            post_data = IntegrationPostData().activity_json
            # Override session and user id
            from_prop = post_data['from'] if 'from' in post_data else None
            user_id = from_prop['id'] if from_prop != None else None
            channel_id = post_data[
                'channelId'] if 'channelId' in post_data else None
            conversation = post_data[
                'conversation'] if 'conversation' in post_data else None
            conversation_id = conversation[
                'id'] if 'id' in conversation else None
            context.user.id = channel_id + user_id
            context.session.id = conversation_id

            # Additional bot-specific properties
            if 'activityId' in post_data:
                data.properties["activityId"] = post_data['activityId']
            if 'channelId' in post_data:
                data.properties["channelId"] = post_data['channelId']
            if 'activityType' in post_data:
                data.properties["activityType"] = post_data['activityType']

        self._client.add_telemetry_processor(telemetry_processor)

    def track_pageview(self,
                       name: str,
                       url: str,
                       duration: int = 0,
                       properties: Dict[str, object] = None,
                       measurements: Dict[str, object] = None) -> None:
        """
        Send information about the page viewed in the application (a web page for instance).
        :param name: the name of the page that was viewed.
        :param url: the URL of the page that was viewed.
        :param duration: the duration of the page view in milliseconds. (defaults to: 0)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_pageview(name, url, duration, properties,
                                    measurements)

    def track_exception(self,
                        type_exception: type = None,
                        value: Exception = None,
                        tb: traceback = None,
                        properties: Dict[str, object] = None,
                        measurements: Dict[str, object] = None) -> None:
        """ 
        Send information about a single exception that occurred in the application.
        :param type_exception: the type of the exception that was thrown.
        :param value: the exception that the client wants to send.
        :param tb: the traceback information as returned by :func:`sys.exc_info`.
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_exception(type_exception, value, tb, properties,
                                     measurements)

    def track_event(self,
                    name: str,
                    properties: Dict[str, object] = None,
                    measurements: Dict[str, object] = None) -> None:
        """ 
        Send information about a single event that has occurred in the context of the application.
        :param name: the data to associate to this event.
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_event(name, properties, measurements)

    def track_metric(self,
                     name: str,
                     value: float,
                     type: TelemetryDataPointType = None,
                     count: int = None,
                     min: float = None,
                     max: float = None,
                     std_dev: float = None,
                     properties: Dict[str, object] = None) -> NotImplemented:
        """
        Send information about a single metric data point that was captured for the application.
        :param name: The name of the metric that was captured.
        :param value: The value of the metric that was captured.
        :param type: The type of the metric. (defaults to: TelemetryDataPointType.aggregation`)
        :param count: the number of metrics that were aggregated into this data point. (defaults to: None)
        :param min: the minimum of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param max: the maximum of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param std_dev: the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        """
        self._client.track_metric(name, value, type, count, min, max, std_dev,
                                  properties)

    def track_trace(self,
                    name: str,
                    properties: Dict[str, object] = None,
                    severity=None):
        """
        Sends a single trace statement.
        :param name: the trace statement.\n
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)\n
        :param severity: the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
        """
        self._client.track_trace(name, properties, severity)

    def track_request(self,
                      name: str,
                      url: str,
                      success: bool,
                      start_time: str = None,
                      duration: int = None,
                      response_code: str = None,
                      http_method: str = None,
                      properties: Dict[str, object] = None,
                      measurements: Dict[str, object] = None,
                      request_id: str = None):
        """
        Sends a single request that was captured for the application.
        :param name: The name for this request. All requests with the same name will be grouped together.
        :param url: The actual URL for this request (to show in individual request instances).
        :param success: True if the request ended in success, False otherwise.
        :param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
        :param duration: the number of milliseconds that this request lasted. (defaults to: None)
        :param response_code: the response code that this request returned. (defaults to: None)
        :param http_method: the HTTP method that triggered this request. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        :param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
        """
        self._client.track_request(name, url, success, start_time, duration,
                                   response_code, http_method, properties,
                                   measurements, request_id)

    def track_dependency(self,
                         name: str,
                         data: str,
                         type: str = None,
                         target: str = None,
                         duration: int = None,
                         success: bool = None,
                         result_code: str = None,
                         properties: Dict[str, object] = None,
                         measurements: Dict[str, object] = None,
                         dependency_id: str = None):
        """
        Sends a single dependency telemetry that was captured for the application.
        :param name: the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template.
        :param data: the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters.
        :param type: the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None)
        :param target: the target site of a dependency call. Examples are server name, host address. (default to: None)
        :param duration: the number of milliseconds that this dependency call lasted. (defaults to: None)
        :param success: true if the dependency call ended in success, false otherwise. (defaults to: None)
        :param result_code: the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        :param id: the id for this dependency call. If None, a new uuid will be generated. (defaults to: None)
        """
        self._client.track_dependency(name, data, type, target, duration,
                                      success, result_code, properties,
                                      measurements, dependency_id)

    def flush(self):
        """Flushes data in the queue. Data in the queue will be sent either immediately irrespective of what sender is
        being used.
        """
        self._client.flush()
예제 #29
0
def send_event():
    tc = TelemetryClient('<YOUR INSTRUMENTATION KEY GOES HERE>')
    tc.track_event('Test event')
    tc.flush()
    messagebox.showinfo("updates", "sent")
예제 #30
0
class NodeStatsCollector:
    """
    Node Stats Manager class
    """
    def __init__(self,
                 pool_id,
                 node_id,
                 refresh_interval=_DEFAULT_STATS_UPDATE_INTERVAL,
                 app_insights_key=None):
        self.pool_id = pool_id
        self.node_id = node_id
        self.telemetry_client = None
        self.first_collect = True
        self.refresh_interval = refresh_interval

        self.disk = IOThroughputAggregator()
        self.network = IOThroughputAggregator()

        if app_insights_key or 'APP_INSIGHTS_INSTRUMENTATION_KEY' in os.environ or 'APP_INSIGHTS_KEY' in os.environ:
            key = (app_insights_key
                   or os.environ.get('APP_INSIGHTS_INSTRUMENTATION_KEY')
                   or os.environ.get('APP_INSIGHTS_KEY'))

            logger.info(
                "Detected instrumentation key. Will upload stats to app insights"
            )
            self.telemetry_client = TelemetryClient(key)
            context = self.telemetry_client.context
            context.application.id = 'AzureBatchInsights'
            context.application.ver = VERSION
            context.device.model = "BatchNode"
            context.device.role_name = self.pool_id
            context.device.role_instance = self.node_id
        else:
            logger.info(
                "No instrumentation key detected. Cannot upload to app insights."
                +
                "Make sure you have the APP_INSIGHTS_INSTRUMENTATION_KEY environment variable setup"
            )

    def init(self):
        """
            Initialize the monitoring
        """
        # start cpu utilization monitoring, first value is ignored
        psutil.cpu_percent(interval=None, percpu=True)

    def _get_network_usage(self):
        netio = psutil.net_io_counters()
        return self.network.aggregate(netio.bytes_recv, netio.bytes_sent)

    def _get_disk_usage(self):
        diskio = psutil.disk_io_counters()
        return self.disk.aggregate(diskio.read_bytes, diskio.write_bytes)

    def _sample_stats(self):
        # get system-wide counters
        mem = psutil.virtual_memory()
        disk_stats = self._get_disk_usage()
        net_stats = self._get_network_usage()

        swap_total, _, swap_avail, _, _, _ = psutil.swap_memory()

        stats = NodeStats(
            cpu_count=psutil.cpu_count(),
            cpu_percent=psutil.cpu_percent(interval=None, percpu=True),
            num_pids=len(psutil.pids()),

            # Memory
            mem_total=mem.total,
            mem_avail=mem.available,
            swap_total=swap_total,
            swap_avail=swap_avail,

            # Disk IO
            disk=disk_stats,

            # Net transfer
            net=net_stats,
        )
        del mem
        return stats

    def _collect_stats(self):
        """
            Collect the stats and then send to app insights
        """
        # collect stats
        stats = self._sample_stats()

        if self.first_collect:
            self.first_collect = False
            return

        if stats is None:
            logger.error("Could not sample node stats")
            return

        if self.telemetry_client:
            self._send_stats(stats)
        else:
            self._log_stats(stats)

    def _send_stats(self, stats):
        """
            Retrieve the current stats and send to app insights
        """
        process = psutil.Process(os.getpid())

        logger.debug("Uploading stats. Mem of this script: %d vs total: %d",
                     process.memory_info().rss, stats.mem_avail)
        client = self.telemetry_client

        for cpu_n in range(0, stats.cpu_count):
            client.track_metric("Cpu usage",
                                stats.cpu_percent[cpu_n],
                                properties={"Cpu #": cpu_n})

        client.track_metric("Memory used", stats.mem_used)
        client.track_metric("Memory available", stats.mem_avail)
        client.track_metric("Disk read", stats.disk.read_bps)
        client.track_metric("Disk write", stats.disk.write_bps)
        client.track_metric("Network read", stats.net.read_bps)
        client.track_metric("Network write", stats.net.write_bps)
        self.telemetry_client.flush()

    def _log_stats(self, stats):
        logger.info(
            "========================= Stats =========================")
        logger.info("Cpu percent:            %d%% %s", avg(stats.cpu_percent),
                    stats.cpu_percent)
        logger.info("Memory used:       %sB / %sB", pretty_nb(stats.mem_used),
                    pretty_nb(stats.mem_total))
        logger.info("Swap used:         %sB / %sB",
                    pretty_nb(stats.swap_avail), pretty_nb(stats.swap_total))
        logger.info("Net read:               %sBs",
                    pretty_nb(stats.net.read_bps))
        logger.info("Net write:              %sBs",
                    pretty_nb(stats.net.write_bps))
        logger.info("Disk read:               %sBs",
                    pretty_nb(stats.disk.read_bps))
        logger.info("Disk write:              %sBs",
                    pretty_nb(stats.disk.write_bps))
        logger.info("-------------------------------------")
        logger.info("")

    def run(self):
        """
            Start collecting information of the system.
        """
        logger.debug("Start collecting stats for pool=%s node=%s",
                     self.pool_id, self.node_id)
        while True:
            self._collect_stats()
            time.sleep(self.refresh_interval)
예제 #31
0
파일: telemetry.py 프로젝트: Bartman0/ppm
class Telemetry:
    def __init__(self):
        try:
            self.telemetry = TelemetryClient(IKEY)
            if os.path.exists("telemetry.config"):
                config_file = open("telemetry.config", "r")
                if config_file.read() == "1":
                    self.enable_telemetry = True
                else:
                    self.enable_telemetry = False
            else:
                self.enable_telemetry = self._query_yes_no(PROMPT_TEXT)
                config_file = open("telemetry.config", "w")
                if self.enable_telemetry:
                    config_file.write("1")
                    self.telemetry.track_event("yes", {
                        "device": DEVICE,
                        "language": LANGUAGE
                    })
                else:
                    config_file.write("0")
                    self.telemetry.context.location.ip = "0.0.0.0"
                    self.telemetry.track_event("no", {
                        "device": DEVICE,
                        "language": LANGUAGE
                    })
            self.telemetry.flush()
        except:
            pass

    def send_telemetry_data(self, iot_hub_name, event, message):
        try:
            if self.enable_telemetry:
                hash_mac = self._get_mac_hash()
                hash_iot_hub_name = hashlib.sha256(
                    iot_hub_name.encode("utf-8")).hexdigest()
                self.telemetry.track_event(
                    event, {
                        "iothub": hash_iot_hub_name,
                        "message": message,
                        "language": LANGUAGE,
                        "device": DEVICE,
                        "mac": hash_mac,
                        "osType": platform.system(),
                        "osPlatform": platform.dist()[0],
                        "osRelease": platform.dist()[1]
                    })
                self.telemetry.flush()
        except:
            pass

    def _get_mac_hash(self):
        mac = ":".join(re.findall("..", "%012x" % uuid.getnode()))
        return hashlib.sha256(mac.encode("utf-8")).hexdigest()

    def _query_yes_no(self, question):
        global input
        default = "y"
        valid = {"y": True, "n": False}
        prompt = " [Y/n] "
        while True:
            sys.stdout.write(question + prompt)
            try:
                input = raw_input
            except NameError:
                pass
            choice = input().lower()
            if default is not None and choice == "":
                return valid[default]
            elif choice in valid:
                return valid[choice]
            else:
                sys.stdout.write("Please respond with 'y' or 'n' ")
예제 #32
0
def index():

    if request.method == 'GET':

        # Get current values
        vote1 = r.get(button1).decode('utf-8')
        # TODO: use tracer object to trace cat vote
        tracer.span(name=button1)
        vote2 = r.get(button2).decode('utf-8')
        # TODO: use tracer object to trace dog vote
        tracer.span(name=button2)

        # Return index with values
        return render_template("index.html", value1=int(vote1), value2=int(vote2), button1=button1, button2=button2, title=title)

    elif request.method == 'POST':

        if request.form['vote'] == 'reset':

            vote1 = r.get(button1).decode('utf-8')
            vote2 = r.get(button2).decode('utf-8')
            # Empty table and return results
            r.set(button1,0)

            r.set(button2,0)

            try:
                if int(vote1) > 0:
                    properties = {'custom_dimensions': {'Cats Vote': vote1}}
                    # TODO: use logger object to log cat vote
                    with tracer.span(name=vote1) as span:
                        logger.warning('cat vote', extra=properties)

                if int(vote2) > 0:
                    properties = {'custom_dimensions': {'Dogs Vote': vote2}}
                    # TODO: use logger object to log dog vote
                    with tracer.span(name=vote2) as span:
                        logger.warning('dog vote', extra=properties)
            except ValueError as e:
                logger.error("Error - not integers {0}".format(e))

            return render_template("index.html", value1=int(vote1), value2=int(vote2), button1=button1, button2=button2, title=title)

        else:

            # Insert vote result into DB
            vote = request.form['vote']
            with tracer.span(name=vote) as span:
                r.incr(vote, 1)

            # Get current values
            vote1 = r.get(button1).decode('utf-8')
            vote2 = r.get(button2).decode('utf-8')

            tc = TelemetryClient(app.config['INSTRUMENTATION_KEY'])
            if vote == button1: #button1 = Cats
                tc.track_event('Cats vote')
            else: 
                tc.track_event('Dogs vote')

            tc.flush()

            # Return results
            return render_template("index.html", value1=int(vote1), value2=int(vote2), button1=button1, button2=button2, title=title)
예제 #33
0
test_id = os.environ.get("TEST_ID", str(uuid.uuid4()))

instrumentation_key = os.environ.get("APPINSIGHTS_INSTRUMENTATIONKEY")
telemetry_client = None
if instrumentation_key:
    telemetry_client = TelemetryClient(instrumentation_key)

print("Test run for '{}' started.".format(test_id))

queries_total = int(os.environ.get("QUERIES_TOTAL", -1))
queries_executed = 0

while queries_executed < queries_total or queries_total < 0:
    raw_query = get_query()
    print("\nTest '{}' executing #{}:\n{}\n".format(test_id, queries_executed,
                                                    raw_query))

    t = timeit.Timer(functools.partial(cursor.execute, raw_query))
    query_time = t.timeit(number=1)

    print("Query took: {:.2f} seconds".format(query_time))
    queries_executed += 1

    if telemetry_client:
        telemetry_client.track_metric("query_time",
                                      query_time,
                                      properties={"test_id": test_id})
        telemetry_client.flush()

print("Test run for '{}' ended.".format(test_id))
예제 #34
0
class NodeStatsCollector:
    """
    Node Stats Manager class
    """
    def __init__(self,
                 pool_id,
                 node_id,
                 refresh_interval=_DEFAULT_STATS_UPDATE_INTERVAL,
                 app_insights_key=None):
        self.pool_id = pool_id
        self.node_id = node_id
        self.telemetry_client = None
        self.first_collect = True
        self.refresh_interval = refresh_interval

        self.disk = IOThroughputAggregator()
        self.network = IOThroughputAggregator()

        if app_insights_key or 'APP_INSIGHTS_INSTRUMENTATION_KEY' in os.environ or 'APP_INSIGHTS_KEY' in os.environ:
            key = (app_insights_key
                   or os.environ.get('APP_INSIGHTS_INSTRUMENTATION_KEY')
                   or os.environ.get('APP_INSIGHTS_KEY'))

            logger.info(
                "Detected instrumentation key. Will upload stats to app insights"
            )
            self.telemetry_client = TelemetryClient(key)
            context = self.telemetry_client.context
            context.application.id = 'AzureBatchInsights'
            context.application.ver = VERSION
            context.device.model = "BatchNode"
            context.device.role_name = self.pool_id
            context.device.role_instance = self.node_id
        else:
            logger.info(
                "No instrumentation key detected. Cannot upload to app insights."
                +
                "Make sure you have the APP_INSIGHTS_INSTRUMENTATION_KEY environment variable setup"
            )

    def init(self):
        """
            Initialize the monitoring
        """
        # start cpu utilization monitoring, first value is ignored
        psutil.cpu_percent(interval=None, percpu=True)

    def _get_network_usage(self):
        netio = psutil.net_io_counters()
        return self.network.aggregate(netio.bytes_recv, netio.bytes_sent)

    def _get_disk_io(self):
        diskio = psutil.disk_io_counters()
        return self.disk.aggregate(diskio.read_bytes, diskio.write_bytes)

    def _get_disk_usage(self):
        disk_usage = dict()
        try:
            disk_usage[_OS_DISK] = psutil.disk_usage(_OS_DISK)
            disk_usage[_USER_DISK] = psutil.disk_usage(_USER_DISK)
        except Exception as e:
            logger.error(
                'Could not retrieve user disk stats for {0}: {1}'.format(
                    _USER_DISK, e))
        return disk_usage

    def _sample_stats(self):
        # get system-wide counters
        mem = psutil.virtual_memory()
        disk_stats = self._get_disk_io()
        disk_usage = self._get_disk_usage()
        net_stats = self._get_network_usage()

        swap_total, _, swap_avail, _, _, _ = psutil.swap_memory()

        # Tuple (proc name, CPU %)
        process_list = list(((proc.info['name'], proc.cpu_percent(interval=1))
                             for proc in psutil.process_iter(attrs=['name'])
                             if proc.info["name"] in PROCESSES_TO_WATCH))

        stats = NodeStats(
            cpu_count=psutil.cpu_count(),
            cpu_percent=psutil.cpu_percent(interval=None, percpu=True),
            num_pids=len(psutil.pids()),

            # Memory
            mem_total=mem.total,
            mem_avail=mem.available,
            swap_total=swap_total,
            swap_avail=swap_avail,

            # Disk IO
            disk_io=disk_stats,

            # Disk usage
            disk_usage=disk_usage,

            # Net transfer
            net=net_stats,

            # Active rendering processes with CPU
            process_list=process_list)
        del mem
        return stats

    def _collect_stats(self):
        """
            Collect the stats and then send to app insights
        """
        # collect stats
        stats = self._sample_stats()

        if self.first_collect:
            self.first_collect = False
            return

        if stats is None:
            logger.error("Could not sample node stats")
            return

        if self.telemetry_client:
            self._send_stats(stats)
        else:
            self._log_stats(stats)

    def _send_stats(self, stats):
        """
            Retrieve the current stats and send to app insights
        """
        process = psutil.Process(os.getpid())

        logger.debug("Uploading stats. Mem of this script: %d vs total: %d",
                     process.memory_info().rss, stats.mem_avail)
        client = self.telemetry_client

        for cpu_n in range(0, stats.cpu_count):
            client.track_metric("Cpu usage",
                                stats.cpu_percent[cpu_n],
                                properties={"Cpu #": cpu_n})

        for name, disk_usage in stats.disk_usage.items():
            client.track_metric("Disk usage",
                                disk_usage.used,
                                properties={"Disk": name})
            client.track_metric("Disk free",
                                disk_usage.free,
                                properties={"Disk": name})

        if stats.process_list:
            for process_name, cpu in stats.process_list:
                props = {
                    "Process": process_name,
                    "PoolName": self.pool_id,
                    "ComputeNode": self.node_id
                }
                client.track_metric("ActiveProcess", cpu, properties=props)

        client.track_metric("Memory used", stats.mem_used)
        client.track_metric("Memory available", stats.mem_avail)
        client.track_metric("Disk read", stats.disk_io.read_bps)
        client.track_metric("Disk write", stats.disk_io.write_bps)
        client.track_metric("Network read", stats.net.read_bps)
        client.track_metric("Network write", stats.net.write_bps)
        self.telemetry_client.flush()

    def _log_stats(self, stats):
        logger.info(
            "========================= Stats =========================")
        logger.info("Cpu percent:            %d%% %s", avg(stats.cpu_percent),
                    stats.cpu_percent)
        logger.info("Memory used:       %sB / %sB", pretty_nb(stats.mem_used),
                    pretty_nb(stats.mem_total))
        logger.info("Swap used:         %sB / %sB",
                    pretty_nb(stats.swap_avail), pretty_nb(stats.swap_total))
        logger.info("Net read:               %sBs",
                    pretty_nb(stats.net.read_bps))
        logger.info("Net write:              %sBs",
                    pretty_nb(stats.net.write_bps))
        logger.info("Disk read:               %sBs",
                    pretty_nb(stats.disk_io.read_bps))
        logger.info("Disk write:              %sBs",
                    pretty_nb(stats.disk_io.write_bps))
        logger.info("Disk usage:")
        for name, disk_usage in stats.disk_usage.items():
            logger.info("  - %s: %i/%i (%i%%)", name, disk_usage.used,
                        disk_usage.total, disk_usage.percent)

        if stats.process_list:
            for process_name, cpu in stats.process_list:
                logger.info("ActiveProcess: %s (CPU %d%%)", process_name, cpu)

        logger.info("-------------------------------------")
        logger.info("")

    def run(self):
        """
            Start collecting information of the system.
        """
        logger.debug("Start collecting stats for pool=%s node=%s",
                     self.pool_id, self.node_id)
        while True:
            self._collect_stats()
            time.sleep(self.refresh_interval)
예제 #35
0
def main():
    command = 'az login --service-principal -u ' + sys.argv[
        1] + ' -p ' + sys.argv[2] + ' --tenant ' + sys.argv[3]
    logger1.info("[INFO]: Logging in {}".format(command))
    process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
    proc_stdout = process.communicate()[0].strip()
    #y = json.loads(proc_stdout)
    logger1.info("[INFO]: output of az login {}".format(proc_stdout))
    command = 'az resource show -g ' + sys.argv[
        7] + ' --resource-type microsoft.insights/components -n ' + sys.argv[
            6] + ' --query properties.InstrumentationKey -o tsv'
    logger1.info("[INFO]: Show resources {}".format(command))
    #inst_key = subprocess.check_output(shlex.split(command)).rstrip()
    process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
    inst_key = process.communicate()[0].strip()
    logger1.info("[INFO]: output of az resource show {}".format(inst_key))

    logger1.info("[INFO]: publishing metrics {}".format(metric_list))
    tc = TelemetryClient(inst_key.rstrip())

    tc.track_metric('DataPlaneCPUUtilizationPct', 0)
    tc.flush()
    tc.track_metric('DataPlaneCPUUtilizationPct', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('panGPGatewayUtilizationPct', 0)
    tc.flush()
    tc.track_metric('panGPGatewayUtilizationPct', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('panGPGWUtilizationActiveTunnels', 0)
    tc.flush()
    tc.track_metric('panGPGWUtilizationActiveTunnels', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('DataPlanePacketBufferUtilization', 0)
    tc.flush()
    tc.track_metric('DataPlanePacketBufferUtilization', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('panSessionActive', 0)
    tc.flush()
    tc.track_metric('panSessionActive', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('panSessionSslProxyUtilization', 0)
    tc.flush()
    tc.track_metric('panSessionSslProxyUtilization', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('panSessionUtilization', 0)
    tc.flush()
    tc.track_metric('panSessionUtilization', 0)
    tc.flush()
    time.sleep(10)
예제 #36
0
def main(msg: func.QueueMessage) -> None:
    """
    Main function, triggered by Azure Storage Queue, parsed queue content and
    try to download the databricks output metadata file to get each succefully processed file location.
    Then enqueue to ingest queue for ingestion to ADX on later Azure function.
    If the file is checkpoint compact file, the code will shrink the file size.
    :param msg: func.QueueMessage
    :return: None
    """
    code_start_time = time.time()
    logging.info('Python queue trigger function processed a queue item: %s',
                 msg.get_body().decode('utf-8'))
    # modify the log level of azure sdk requests
    logging.getLogger('azure').setLevel(logging.WARNING)
    init_config_values()

    tc = TelemetryClient(APPINSIGHTS_INSTRUMENTATIONKEY)
    tc.context.application.ver = '1.0'
    tc.context.properties["PROCESS_PROGRAM"] = PROCESS_PROGRAM_NAME
    tc.context.properties["PROCESS_START"] = time.time()

    # 1. Get trigger file content (rename event)
    content_json = json.loads(msg.get_body().decode('utf-8'))

    logging.info("meta-data event content: {}".format(msg.get_body().decode('utf-8')))
    file_url = content_json['data']['destinationUrl']
    logging.info(f"file_url: {file_url}")
    event_time = content_json['eventTime']

    # 2. Download metadata blob content
    logging.info(f"{HEADER} Download blob file from {file_url}")
    temp_blob_client = BlobClient.from_blob_url(blob_url=file_url, logging_enable=False)
    blob_path = temp_blob_client.blob_name
    container_name = temp_blob_client.container_name

    try:
        metadata_file_content = get_blob_content(container_name, blob_path)
    except Exception:
        logging.exception(f"Failed to download blob from url {file_url}")
        raise

    # 3. Parse split output file from the metadata
    queue_msg_list = generate_metadata_queue_messages(event_time, metadata_file_content)
    logging.info(
        f"{HEADER} Generate metadata queue_messages from {file_url}, {len(queue_msg_list)} messages")

    # 4. Loop to enqueue msg to ADX ingest queue
    queue_client_list = []
    for q_url in ADX_INGEST_QUEUE_URL_LIST:
        queue_client = get_queue_client(q_url)
        queue_client_list.append(queue_client)

    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()
    tasks = gen_metadata_msg_enqueue_tasks(queue_msg_list, queue_client_list, tc)
    loop.run_until_complete(gather_with_concurrency(CONCURRENT_ENQUEUE_TASKS, tasks))
    close_queue_clients(queue_client_list, loop)
    loop.close()

    logging.info(f"{HEADER} Done queuing up messages to Ingestion queue")

    if file_url.endswith(".compact"): # reduce compact file size
        update_blob_content(container_name,
                            blob_path,
                            get_shrinked_checkpoint_content(
                                metadata_file_content, MAX_COMPACT_FILE_RECORDS))
        logging.info(f"{HEADER} Reduced checkpoint files {file_url}, max lines is {MAX_COMPACT_FILE_RECORDS}")

    code_duration = time.time() - code_start_time
    tc.track_event(METADATA_HANDLE_EVENT_NAME,
                   {'FILE_URL': file_url},
                   {METADATA_HANDLE_EVENT_NAME + '_DURATION_SEC': code_duration})
    tc.flush()
class AI4EAppInsights(object):
    def __init__(self):
        self.grantee_key = None
        raw_key = getenv(CONF_KEY_GRANTEE, None)
        if (raw_key and len(raw_key.strip()) > 0):
            self.grantee_key = raw_key.strip()

        if (self.grantee_key):
            self.sender = AsynchronousSender()
            self.r_queue = AsynchronousQueue(self.sender)
            self.r_context = AI4ETelemetryContext()
            self.r_channel = TelemetryChannel(self.r_context, self.r_queue)

            self.appinsights_grantee_client = TelemetryClient(
                getenv(CONF_KEY_GRANTEE), self.r_channel)
            self.appinsights_ai4e_client = None

            if (getenv(CONF_KEY_AI4E)):
                self.appinsights_ai4e_client = TelemetryClient(
                    getenv(CONF_KEY_AI4E), self.r_channel)

    def _log(self, message, sev, taskId=None, additionalProperties=None):
        if (self.grantee_key):
            if (taskId):
                if (additionalProperties is None):
                    additionalProperties = {'task_id': taskId}
                else:
                    additionalProperties['task_id'] = taskId

            self.appinsights_grantee_client.track_trace(
                message, severity=sev, properties=additionalProperties)
            self.appinsights_grantee_client.flush()

            if (self.appinsights_ai4e_client):
                self.appinsights_ai4e_client.track_trace(
                    message, severity=sev, properties=additionalProperties)
                self.appinsights_ai4e_client.flush()

    def log_debug(self, message, taskId=None, additionalProperties=None):
        self._log(message, "DEBUG", taskId, additionalProperties)

    def log_info(self, message, taskId=None, additionalProperties=None):
        self._log(message, "INFO", taskId, additionalProperties)

    def log_error(self, message, taskId=None, additionalProperties=None):
        self._log(message, "ERROR", taskId, additionalProperties)

    def log_warn(self, message, taskId=None, additionalProperties=None):
        self._log(message, "WARNING", taskId, additionalProperties)

    def log_exception(self, message, taskId=None, additionalProperties=None):
        self._log(message, "CRITICAL", taskId, additionalProperties)

    def track_metric(self, metric_name, metric_value):
        if (self.grantee_key):
            print("Tracking metric:" + metric_name + ", Value: " +
                  str(metric_value))
            self.appinsights_grantee_client.track_metric(
                metric_name, metric_value)
            self.appinsights_grantee_client.flush()

            if (self.appinsights_ai4e_client):
                self.appinsights_ai4e_client.track_metric(
                    metric_name, metric_value)
                self.appinsights_ai4e_client.flush()
예제 #38
0
def process(filesrootfolder, forceinsert):

    # Create process id as identify of this process
    process_id = time.time()

    tc = TelemetryClient('')

    tc.context.application.ver = '1.0'
    tc.context.properties["PROCESS_PROGRAM"] = "BATCH_CSV_V001a"
    tc.context.properties["PROCESS_START"] = time.time()
    tc.context.properties["DATA_FOLDER"] = filesrootfolder
    tc.context.properties["PROCESS_ID"] = process_id

    tc.track_trace('STRAT RUN BATHCH INGEST  CSV DATA from folder ' +
                   filesrootfolder)
    tc.track_event('BATHCH_INGEST_CSV_START', {
        'PROCESS_ID': process_id,
        'DATA_FOLDER': filesrootfolder
    }, {})
    tc.flush()

    tc.flush()
    #print (vm_uuid,deploy_uuid,config_uuid)

    # Prepare COSMOS Link

    url = COSMOS_URL
    #key = os.environ['ACCOUNT_KEY']
    key = COSMOS_KEY
    client = cosmos_client.CosmosClient(url, {'masterKey': key})
    database_id = COSMOS_DATABASE
    container_id = COSMOS_CONTAINER

    database_link = 'dbs/' + database_id
    collection_link = database_link + '/colls/' + container_id

    doc_id = vm_uuid + '_' + config_uuid + '_' + deploy_uuid + '_Metric'
    doc_link = collection_link + '/docs/' + doc_id

    options = {}
    options['enableCrossPartitionQuery'] = True
    options['maxItemCount'] = 5
    options['partitionKey'] = vm_uuid

    proc_log_doc = None
    try:
        proc_log_doc = client.ReadItem(doc_link, options)
    except:
        print("New Process  Metric Doc")

    if (proc_log_doc is not None):
        print("Find Existing  Metric Doc ")

        if str(forceinsert).lower(
        ) != 'true':  # Stop Proccess if data is already been proccessed
            return 400, doc_id + " is already been processed"

    else:  # New process log
        proc_log_doc = {}
        proc_log_doc["PROCESSES"] = []
        proc_log_doc["DOC_TYPE"] = "PROCESS_METRIC"
        proc_log_doc["PROCESS_PROGRAM"] = "BATCH_METRIC_CSV_V001a"
        proc_log_doc['id'] = doc_id

    tc.track_event('BATHCH_INGEST_METRIC_CSV', {'PROCESS_ID': process_id},
                   {'DATA_FOLDER': filesrootfolder})
    #+'_'+config_uuid+'_'+deploy_uuid , { 'DATA_FOLDER': telemetriespath }
    tc.flush()
    proc_log_this = {}
    proc_log_this["PROCESS_PROGRAM"] = "BATCH_METRIC_CSV_V001a"
    proc_log_this["PROCESS_START"] = time.time()
    proc_log_this["DATA_FOLDER"] = filesrootfolder
    proc_log_this[
        'id'] = vm_uuid + '_' + config_uuid + '_' + deploy_uuid + '_' + str(
            process_id)

    error_files, merged_files, source_files = merge_rename_core_columns_CSV(
        vm_uuid, deploy_uuid, config_uuid, 'defualt_metrics_csv_001A', 0,
        SOURCE_CSV_CONTAINER, filesrootfolder, FILE_OUTPUT_FOLDER, process_id)

    # ToDo  ...
    proc_log_this["PROCESS_ID"] = process_id
    proc_log_this["ERROR_SOURCE_FILES_COUNT"] = len(error_files)
    proc_log_this["SOURCE_FILES_COUNT"] = len(source_files)

    tc.track_metric('BATHCH_INGEST_CSV_ERROR_SOURCE_FILES_COUNT',
                    len(error_files))
    tc.track_metric('BATHCH_INGEST_CSV_ERROR_SOURCE_SOURCE_FILES_COUNT',
                    len(source_files))
    tc.flush()

    # print(str(len(error_files)),'  ',str(len(merged_files)))

    proc_log_this["PROCESS_END"] = time.time()
    proc_log_this["STATUS"] = "OK"

    proc_log_this["STATUS_MESSAGE"] = (
        "It takes %s seconds to ingest  CSV file from Blob Storage") % (
            proc_log_this["PROCESS_END"] - proc_log_this["PROCESS_START"])

    proc_log_doc["PROCESSES"].append(proc_log_this)
    proc_log_doc['LATEST_UPDATE_TIMESTAMP'] = time.time()

    # Update Process Log
    client.UpsertItem(collection_link, proc_log_doc, options)

    tc.track_trace('END RUN BATHCH INGEST  METRIC CSV DATA from folder ' +
                   filesrootfolder)

    tc.track_event('BATHCH_INGEST_METRIC_CSV_END', {
        'PROCESS_ID': process_id,
        'DATA_FOLDER': filesrootfolder
    }, {
        'DEFECT_FILES_COUNT': len(error_files),
        'MERGED_FILES_COUNT': len(merged_files),
        'SOURCE_FILES_COUNT': len(source_files)
    })
    tc.flush()
예제 #39
0
def merge_rename_core_columns_CSV(vm_uuid, deploy_uuid, config_uuid,
                                  schema_ver, inject_ver, container_name,
                                  filesrootfolder, fileoutputfolder,
                                  process_id):
    #block_blob_service = BlockBlobService(account_name=SOURCE_CSV_BLOB_ACCOUNT,  sas_token=SOURCE_CSV_BLOB_TOKEN)
    block_blob_service = BlockBlobService(account_name=SOURCE_CSV_BLOB_ACCOUNT,
                                          account_key=SOURCE_CSV_BLOB_KEY)
    tc = TelemetryClient('')
    print("Start merge CSV ", vm_uuid, ' ', deploy_uuid, ' ', config_uuid)

    blobs = []
    marker = None
    while True:
        batch = block_blob_service.list_blobs(container_name,
                                              prefix=filesrootfolder)
        blobs.extend(batch)
        if not batch.next_marker:
            break
        marker = batch.next_marker
    i = 0
    blobpaths = []
    for blob in blobs:
        blobpaths.append(blob.name)

    matchers = ['.csv']
    matching = [s for s in blobpaths if any(xs in s for xs in matchers)]

    mergelog = {}
    mergelog["vm_uuid"] = vm_uuid

    mergelog["process_type"] = "MERGE_METRIC_CSV"
    mergelog["DOC_TYPE"] = "MERGE_METRIC_FILES_LOG"
    mergelog["file_folder"] = filesrootfolder
    mergelog["process_time"] = time.time()
    mergelog["files"] = []
    mergelog["defect_files"] = []

    a_mergelog = copy.deepcopy(mergelog)

    dfagg = pd.DataFrame(columns=[])

    mixagg = AGGREGATION_FILES_NUM
    aggcount = 0
    aggcount_total = 0
    aggoutcount = 0
    aggsize = 0

    error_files = []
    merged_files = []
    totoal_rows = 0
    alldfs = []
    outfilenamebase = fileoutputfolder + filesrootfolder + "_aggr_"
    t1 = time.time()
    #print (outfilenamebase)
    source_col = ['']
    target_col = ['']

    tc.track_trace('Prepare to process ' + str(len(matching)) +
                   '  Metric CSV files ')
    tc.flush()

    for fname in matching:
        #print(aggcount)

        head, tail = os.path.split(fname)

        aggcount += 1
        aggcount_total += 1

        blobstring = block_blob_service.get_blob_to_text(
            container_name, fname).content
        aggsize += len(blobstring)

        #print('Prepare to merge '+str(aggcount_total)+' / '+str(len(matching)) +' Memeory '+str(aggsize)+' File Name: '+tail)
        #tc.track_trace('Prepare to merge '+tail)
        #tc.flush()

        try:  # Rread CSV And Try Processing

            dfone = pd.read_csv(StringIO(blobstring))

            dfAll_cols = dfone.columns
            #colname0=dfAll_cols
            dfAll_newcols = []

            pc_name = re.search(r'(\\{2}.*\\)(.*\\)', dfAll_cols[1]).group(1)

            for col in dfAll_cols:
                dfAll_newcols.append(
                    col.replace(pc_name, '').replace('`', '').replace(
                        '\\', '').replace(' ', '').replace('/', '').replace(
                            '.', '').replace('-', '').replace('%', '').replace(
                                '(', '').replace(')', ''))

            dfAll_newcols[0] = "Universal_datetime"

            # Rename all columns
            dfone.columns = dfAll_newcols

            alldfs.append(dfone)
            a_mergelog['files'].append(tail)

            #if (aggcount>=mixagg) or (aggcount_total==len(matching)):
            if (aggsize > MAX_FILESIZE) or (aggcount_total == len(matching)):
                if (aggcount_total == len(matching)):
                    print("Processing Final File")
                    tc.track_trace('Processing Final File')
                    tc.flush()

                alldfs.append(pd.DataFrame(columns=source_col))
                dfagg = pd.concat(alldfs, ignore_index=True)
                dfagg_out = dfagg[source_col]
                dfagg_out.columns = target_col
                dfagg_out['schema_ver'] = schema_ver
                dfagg_out['inject_ver'] = inject_ver
                output = dfagg_out.to_csv(index=False, encoding="utf-8")
                outfile = outfilenamebase + str(aggoutcount) + ".csv"
                block_blob_service.create_blob_from_text(
                    container_name, outfile, output)
                print(
                    "Output aggregated file to " + container_name,
                    outfile + " Data Shape " + str(dfagg.shape) + ' uuid: ' +
                    str(vm_uuid) + str(deploy_uuid) + str(config_uuid))
                totoal_rows += dfagg_out.shape[0]

                merged_files.append(outfile)

                a_mergelog['output_file'] = outfile
                a_mergelog['merged_files_num'] = len(a_mergelog['files'])
                a_mergelog['defect_files_num'] = len(
                    a_mergelog['defect_files'])

                # Insert Process Log to COSMOS DB
                insert_json_cosmos(a_mergelog)
                a_mergelog = copy.deepcopy(mergelog)
                t2 = time.time()

                print(("It takes %s seconds to merge " + str(aggcount) +
                       " CSV Metrics") % (t2 - t1))
                aggoutcount += 1
                aggcount = 0
                aggsize = 0
                alldfs = []
                t1 = time.time()
                file_size = BlockBlobService.get_blob_properties(
                    block_blob_service, container_name,
                    outfile).properties.content_length
                print(outfile + "  File Size " + str(file_size))

                # Ingest to AXX
                ingest_to_ADX(outfile, file_size)
        except Exception as e:
            print('Error While process ' + fname)
            error_class = e.__class__.__name__
            detail = e.args[0]
            cl, exc, tb = sys.exc_info()
            lastCallStack = traceback.extract_tb(tb)[-1]
            fileName = lastCallStack[0]
            lineNum = lastCallStack[1]
            funcName = lastCallStack[2]
            errMsg = "File \"{}\", line {}, in {}: [{}] {}".format(
                fileName, lineNum, funcName, error_class, detail)

            print("Unexpected error:", sys.exc_info()[0])
            traceback.print_exc()

            msg = errMsg + traceback.format_exc()

            tc = TelemetryClient('')
            tc.context.application.ver = '1.0'
            tc.context.properties["PROCESS_PROGRAM"] = "BATCH_METRIC_CSV_V001a"
            tc.context.properties["DATA_FOLDER"] = metricspath
            tc.track_trace(msg)

            tc.flush()
            # print("Unexpected error:", sys.exc_info()[0])
            a_mergelog["defect_files"].append(tail)
            error_files.append(fname)  # Add No-Well Formed JSON to error file
    print('Total Rows ' + str(totoal_rows))

    tc.track_trace('Proccessed Rows: ' + str(totoal_rows))
    tc.track_metric('BATHCH_INGEST_METRIC_CSV_TOTAL_ROWS', str(totoal_rows))
    tc.flush()
    return error_files, merged_files, matching
예제 #40
0
class AppInsights(object):
    """ This class represents a Flask extension that enables request telemetry,
    trace logging and exception logging for a Flask application. The telemetry
    will be sent to Application Insights service using the supplied
    instrumentation key.

    The following Flask config variables can be used to configure the extension:

    - Set ``APPINSIGHTS_INSTRUMENTATIONKEY`` to a string to provide the
      instrumentation key to send telemetry to application insights.
      Alternatively, this value can also be provided via an environment variable
      of the same name.

    - Set ``APPINSIGHTS_ENDPOINT_URI`` to a string to customize the telemetry
      endpoint to which Application Insights will send the telemetry.

    - Set ``APPINSIGHTS_DISABLE_REQUEST_LOGGING`` to ``False`` to disable
      logging of Flask requests to Application Insights.

    - Set ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` to ``False`` to disable logging
      of all log traces to Application Insights.

    - Set ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING`` to ``False`` to disable
      logging of all exceptions to Application Insights.

    .. code:: python

            from flask import Flask
            from ai4e_app_insights import AppInsights

            # instantiate the Flask application
            app = Flask(__name__)
            app.config['APPINSIGHTS_INSTRUMENTATIONKEY'] = '<YOUR INSTRUMENTATION KEY GOES HERE>'

            # log requests, traces and exceptions to the Application Insights service
            appinsights = AppInsights(app)

            # define a simple route
            @app.route('/')
            def hello_world():
                return 'Hello World!'

            # run the application
            if __name__ == '__main__':
                app.run()
    """
    def __init__(self, app=None, context=None):
        """
        Initialize a new instance of the extension.

        Args:
            app (flask.Flask). the Flask application for which to initialize the extension.
        """
        socket.setdefaulttimeout(30)
        self._key_grantee = None
        self._key_ai4e = None
        self._endpoint_uri = None
        self._channel = None
        self._requests_middleware = None
        self._trace_log_handler_grantee = None
        self._trace_log_handler_ai4e = None
        self._exception_telemetry_client_grantee = None
        self._exception_telemetry_client_ai4e = None

        if app:
            self.init_app(app, context)

    def init_app(self, app, context):
        """
        Initializes the extension for the provided Flask application.

        Args:
            app (flask.Flask). the Flask application for which to initialize the extension.
        """
        print("Starting application insights module.")
        self._key_grantee = app.config.get(CONF_KEY_GRANTEE) or getenv(
            CONF_KEY_GRANTEE)
        self._key_ai4e = app.config.get(CONF_KEY_AI4E) or getenv(CONF_KEY_AI4E)

        if (self._key_grantee and len(self._key_grantee.strip()) > 0):
            self._key_grantee = self._key_grantee.strip()
        else:
            self._key_grantee = None

        if (self._key_ai4e and len(self._key_ai4e.strip()) > 0):
            self._key_ai4e = self._key_ai4e.strip()
        else:
            self._key_ai4e = None

        if self._key_grantee:
            print("Grantee application insights key set.")

        if self._key_ai4e:
            print("AI4E application insights key set: " + str(self._key_ai4e))

        if not self._key_grantee and not self._key_ai4e:
            return

        self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)

        if self._endpoint_uri:
            sender = AsynchronousSender(self._endpoint_uri)
        else:
            sender = AsynchronousSender()

        queue = AsynchronousQueue(sender)

        if not context:
            context = AI4ETelemetryContext()

        self._channel = TelemetryChannel(context, queue)

        self._init_request_logging(app)
        self._init_trace_logging(app)
        self._init_exception_logging(app)

    def _init_request_logging(self, app):
        """
        Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``
        is set in the Flask config.

        Args:
            app (flask.Flask). the Flask application for which to initialize the extension.
        """
        enabled = not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False)

        if not enabled:
            return

        # If in the AI4E backend, only send uwsgi traces to AI4E
        wsgi_key = self._key_ai4e
        if not wsgi_key:
            wsgi_key = self._key_grantee

        self._requests_middleware = WSGIApplication(
            wsgi_key, app.wsgi_app, telemetry_channel=self._channel)

        app.wsgi_app = self._requests_middleware

    def _init_trace_logging(self, app):
        """
        Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
        set in the Flask config.

        Args:
            app (flask.Flask). the Flask application for which to initialize the extension.
        """
        enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)

        if not enabled:
            return

        if self._key_grantee:
            self._trace_log_handler_grantee = LoggingHandler(
                self._key_grantee, telemetry_channel=self._channel)

            app.logger.addHandler(self._trace_log_handler_grantee)

        if self._key_ai4e:
            print("Starting trace logging")
            self._trace_log_handler_ai4e = LoggingHandler(
                self._key_ai4e, telemetry_channel=self._channel)

            app.logger.addHandler(self._trace_log_handler_ai4e)

    def _init_exception_logging(self, app):
        """
        Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``
        is set in the Flask config.

        Args:
            app (flask.Flask). the Flask application for which to initialize the extension.
        """
        enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)

        if not enabled:
            return

        if self._key_grantee:
            self._exception_telemetry_client_grantee = TelemetryClient(
                self._key_grantee, telemetry_channel=self._channel)

        if self._key_ai4e:
            self._exception_telemetry_client_ai4e = TelemetryClient(
                self._key_ai4e, telemetry_channel=self._channel)

        @app.errorhandler(Exception)
        def exception_handler(exception):
            try:
                raise exception
            except Exception:
                if self._exception_telemetry_client_grantee:
                    self._exception_telemetry_client_grantee.track_exception()

                if self._exception_telemetry_client_ai4e:
                    self._exception_telemetry_client_ai4e.track_exception()
            finally:
                raise exception

    def flush(self):
        """Flushes the queued up telemetry to the service.
        """
        print("trying all flush")
        if self._requests_middleware:
            self._requests_middleware.flush()

        if self._trace_log_handler_grantee:
            self._trace_log_handler_grantee.flush()

        if self._trace_log_handler_ai4e:
            print("Trying trace flush...")
            self._trace_log_handler_ai4e.flush()
            print("Trace flush finsihed.")

        if self._exception_telemetry_client_grantee:
            self._exception_telemetry_client_grantee.flush()

        if self._exception_telemetry_client_ai4e:
            self._exception_telemetry_client_ai4e.flush()