def intercept_excepthook(type, value, traceback):
    client = TelemetryClient('temp_key', telemetry_channel)
    for instrumentation_key in enabled_instrumentation_keys:
        client.context.instrumentation_key = instrumentation_key
        client.track_exception(type, value, traceback)
    client.flush()
    original_excepthook(type, value, traceback)
Example #2
0
def upload(data_to_save):
    from applicationinsights import TelemetryClient
    from applicationinsights.exceptions import enable

    client = TelemetryClient(INSTRUMENTATION_KEY)
    enable(INSTRUMENTATION_KEY)

    if in_diagnostic_mode():
        sys.stdout.write('Telemetry upload begins\n')

    try:
        data_to_save = json.loads(data_to_save.replace("'", '"'))
    except Exception as err:  # pylint: disable=broad-except
        if in_diagnostic_mode():
            sys.stdout.write('{}/n'.format(str(err)))
            sys.stdout.write('Raw [{}]/n'.format(data_to_save))

    for record in data_to_save:
        client.track_event(record['name'], record['properties'])

    client.flush()

    if in_diagnostic_mode():
        json.dump(data_to_save, sys.stdout, indent=2, sort_keys=True)
        sys.stdout.write('\nTelemetry upload completes\n')
class Telemetry:
    def __init__(self):
        try:
            self.telemetry = TelemetryClient(IKEY)
            if os.path.exists("telemetry.config"):
                config_file = open("telemetry.config", "r")
                if config_file.read() == "1":
                    self.enable_telemetry = True
                else:
                    self.enable_telemetry = False
            else:
                self.enable_telemetry = self._query_yes_no(PROMPT_TEXT)
                config_file = open("telemetry.config", "w")
                if self.enable_telemetry:
                    config_file.write("1")
                    self.telemetry.track_event("yes", {"device": DEVICE, "language": LANGUAGE})
                else:
                    config_file.write("0")
                    self.telemetry.context.location.ip = "0.0.0.0"
                    self.telemetry.track_event("no", {"device": DEVICE, "language": LANGUAGE})
            self.telemetry.flush()
        except:
            pass

    def send_telemetry_data(self, iot_hub_name, event, message):
        try:
            if self.enable_telemetry:
                hash_mac = self._get_mac_hash()
                hash_iot_hub_name = hashlib.sha256(iot_hub_name.encode("utf-8")).hexdigest()
                self.telemetry.track_event(event, {"iothub": hash_iot_hub_name, "message": message,
                                            "language": LANGUAGE, "device": DEVICE, "mac": hash_mac,
                                            "osType": platform.system(), "osPlatform": platform.dist()[0],
                                            "osRelease": platform.dist()[1]})
                self.telemetry.flush()
        except:
            pass

    def _get_mac_hash(self):
        mac = ":".join(re.findall("..", "%012x" % uuid.getnode()))
        return hashlib.sha256(mac.encode("utf-8")).hexdigest()

    def _query_yes_no(self, question):
        global input
        default = "y"
        valid = {"y": True, "n": False}
        prompt = " [Y/n] "
        while True:
            sys.stdout.write(question + prompt)
            try:
                input = raw_input
            except NameError:
                pass
            choice = input().lower()
            if default is not None and choice == "":
                return valid[default]
            elif choice in valid:
                return valid[choice]
            else:
                sys.stdout.write("Please respond with 'y' or 'n' ")
 def test_track_exception_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.context.device = None
     try:
         raise Exception("blah")
     except Exception as e:
         client.track_exception(*sys.exc_info(), properties={}, measurements={ 'x': 42 })
         client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Exception", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "ExceptionData", "baseData": {"ver": 2, "handledAt": "UserCode", "exceptions": [{"id": 1, "outerId": 0, "typeName": "Exception", "message": "blah", "hasFullStack": true, "parsedStack": [{"level": 0, "method": "test_track_exception_works_as_expected", "assembly": "Unknown", "fileName": "TestTelemetryClient.py", "line": 0}]}], "measurements": {"x": 42}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     for item in sender.data.data.base_data.exceptions:
         for frame in item.parsed_stack:
             frame.file_name = os.path.basename(frame.file_name)
             frame.line = 0
     actual = json.dumps(sender.data.write())
     self.assertEqual(expected, actual)
     try:
         raise Exception("blah")
     except Exception as e:
         client.track_exception()
         client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Exception", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "ExceptionData", "baseData": {"ver": 2, "handledAt": "UserCode", "exceptions": [{"id": 1, "outerId": 0, "typeName": "Exception", "message": "blah", "hasFullStack": true, "parsedStack": [{"level": 0, "method": "test_track_exception_works_as_expected", "assembly": "Unknown", "fileName": "TestTelemetryClient.py", "line": 0}]}]}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     for item in sender.data.data.base_data.exceptions:
         for frame in item.parsed_stack:
             frame.file_name = os.path.basename(frame.file_name)
             frame.line = 0
     actual = json.dumps(sender.data.write())
     self.assertEqual(expected, actual)
 def test_track_metric_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.context.device = None
     client.track_metric('metric', 42, channel.contracts.DataPointType.aggregation, 13, 1, 123, 111, {'foo': 'bar'})
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Metric", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "MetricData", "baseData": {"ver": 2, "metrics": [{"name": "metric", "kind": 1, "value": 42, "count": 13, "min": 1, "max": 123, "stdDev": 111}], "properties": {"foo": "bar"}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
 def test_track_pageview_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.context.device = None
     client.track_pageview('test', 'http://tempuri.org', 13, { 'foo': 'bar' }, { 'x': 42 })
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.PageView", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "PageViewData", "baseData": {"ver": 2, "url": "http://tempuri.org", "name": "test", "duration": 13, "properties": {"foo": "bar"}, "measurements": {"x": 42}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
Example #7
0
def _send_feedback(score, response_what_changes, response_do_well, email_address):
    from applicationinsights import TelemetryClient
    tc = TelemetryClient(INSTRUMENTATION_KEY)
    tc.context.application.ver = core_version
    version_components, version_python = _get_version_info()
    tc.track_event(
        EVENT_NAME,
        {'response_what_changes': response_what_changes,
         'response_do_well': response_do_well,
         'response_email_address': email_address,
         'version_components': version_components,
         'version_python': version_python},
        {'response_net_promoter_score': score})
    tc.flush()
 def test_track_request_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient(channel.TelemetryChannel(context=None, queue=queue))
     client.context.instrumentation_key = '99999999-9999-9999-9999-999999999999'
     client.context.device = None
     client.track_request('test', 'http://tempuri.org', True, 'START_TIME', 13, '42', 'OPTIONS', { 'foo': 'bar' }, { 'x': 42 })
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Request", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "RequestData", "baseData": {"ver": 2, "id": "ID_PLACEHOLDER", "name": "test", "startTime": "START_TIME", "duration": "00:00:00.013", "responseCode": "42", "success": true, "httpMethod": "OPTIONS", "url": "http://tempuri.org", "properties": {"foo": "bar"}, "measurements": {"x": 42}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     sender.data.data.base_data.id = 'ID_PLACEHOLDER'
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
 def test_track_event_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.context.device = None
     client.track_event('test', { 'foo': 'bar' }, { 'x': 42 })
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Event", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "EventData", "baseData": {"ver": 2, "name": "test", "properties": {"foo": "bar"}, "measurements": {"x": 42}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
     sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
     sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
     sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
    def test_track_dependency_works_as_expected(self):
        def process(data, context):
            data.properties["NEW_PROP"] = "MYPROP"
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient(channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.instrumentation_key = '99999999-9999-9999-9999-999999999999'
        client.context.device = None
        client.track_dependency('test', 'COMMAND_PLACEHOLDER', 'HTTP', 'localhost', 13, True, 200, { 'foo': 'bar' }, { 'x': 42 }, 'ID_PLACEHOLDER')
        client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.RemoteDependency", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "RemoteDependencyData", "baseData": {"ver": 2, "name": "test", "id": "ID_PLACEHOLDER", "resultCode": "200", "duration": "00:00:00.013", "success": true, "data": "COMMAND_PLACEHOLDER", "target": "localhost", "type": "HTTP", "properties": {"NEW_PROP": "MYPROP", "foo": "bar"}, "measurements": {"x": 42}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        self.maxDiff = None
        self.assertEqual(expected, actual)
    def test_track_event_with_merged_context_properties_works_as_expected(self):
        key = '99999999-9999-9999-9999-999999999999'
        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)

        chan = channel.TelemetryChannel(queue=queue)
        chan.context.properties['foo'] = 'bar'

        client1 = TelemetryClient(key, chan)
        client1.context.device = None
        client1.context.properties['x'] = 42

        client2 = TelemetryClient(key, chan)
        client2.context.device = None
        client2.context.properties['x'] = 84

        client1.track_event('test 1')
        client1.flush()
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Event", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "EventData", "baseData": {"ver": 2, "name": "test 1", "properties": {"foo": "bar", "x": 42}}}}'
        self.maxDiff = None
        self.assertEqual(expected, actual)

        client2.track_event('test 2')
        client2.flush()
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Event", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "EventData", "baseData": {"ver": 2, "name": "test 2", "properties": {"foo": "bar", "x": 84}}}}'
        self.assertEqual(expected, actual)
Example #12
0
def upload(data_to_save):
    from applicationinsights import TelemetryClient
    from applicationinsights.exceptions import enable

    client = TelemetryClient(INSTRUMENTATION_KEY)
    enable(INSTRUMENTATION_KEY)

    if in_diagnostic_mode():
        sys.stdout.write('Telemetry upload begins\n')
        sys.stdout.write('Got data {}\n'.format(json.dumps(json.loads(data_to_save), indent=2)))

    try:
        data_to_save = json.loads(data_to_save.replace("'", '"'))
    except Exception as err:  # pylint: disable=broad-except
        if in_diagnostic_mode():
            sys.stdout.write('{}/n'.format(str(err)))
            sys.stdout.write('Raw [{}]/n'.format(data_to_save))

    for record in data_to_save:
        name = record['name']
        raw_properties = record['properties']
        properties = {}
        measurements = {}
        for k in raw_properties:
            v = raw_properties[k]
            if isinstance(v, six.string_types):
                properties[k] = v
            else:
                measurements[k] = v
        client.track_event(record['name'], properties, measurements)

        if in_diagnostic_mode():
            sys.stdout.write('\nTrack Event: {}\nProperties: {}\nMeasurements: {}'.format(
                name, json.dumps(properties, indent=2), json.dumps(measurements, indent=2)))

    client.flush()

    if in_diagnostic_mode():
        sys.stdout.write('\nTelemetry upload completes\n')
Example #13
0
def upload(data_to_save):
    if in_diagnostic_mode():
        sys.stdout.write('Telemetry upload begins\n')
        sys.stdout.write('Got data {}\n'.format(json.dumps(json.loads(data_to_save), indent=2)))

    try:
        data_to_save = json.loads(data_to_save.replace("'", '"'))
    except Exception as err:  # pylint: disable=broad-except
        if in_diagnostic_mode():
            sys.stdout.write('ERROR: {}/n'.format(str(err)))
            sys.stdout.write('Raw [{}]/n'.format(data_to_save))

    for instrumentation_key in data_to_save:
        client = TelemetryClient(instrumentation_key=instrumentation_key,
                                 telemetry_channel=TelemetryChannel(queue=SynchronousQueue(LimitedRetrySender())))
        enable(instrumentation_key)

        for record in data_to_save[instrumentation_key]:
            name = record['name']
            raw_properties = record['properties']
            properties = {}
            measurements = {}
            for k, v in raw_properties.items():
                if isinstance(v, six.string_types):
                    properties[k] = v
                else:
                    measurements[k] = v
            client.track_event(record['name'], properties, measurements)

            if in_diagnostic_mode():
                sys.stdout.write(
                    '\nTrack Event: {}\nProperties: {}\nMeasurements: {}'.format(name, json.dumps(properties, indent=2),
                                                                                 json.dumps(measurements, indent=2)))

        client.flush()

    if in_diagnostic_mode():
        sys.stdout.write('\nTelemetry upload completes\n')
    def test_track_event_processor_filtered(self):
        def process(data, context):
            return False # Filter the event

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.device = None
        client.track_event('test', { 'foo': 'bar' }, { 'x': 42 })
        client.flush()
        self.assertEqual(None, sender.data)
 def __init__(self):
     try:
         self.telemetry = TelemetryClient(IKEY)
         if os.path.exists("telemetry.config"):
             config_file = open("telemetry.config", "r")
             if config_file.read() == "1":
                 self.enable_telemetry = True
             else:
                 self.enable_telemetry = False
         else:
             self.enable_telemetry = self._query_yes_no(PROMPT_TEXT)
             config_file = open("telemetry.config", "w")
             if self.enable_telemetry:
                 config_file.write("1")
                 self.telemetry.track_event("yes", {"device": DEVICE, "language": LANGUAGE})
             else:
                 config_file.write("0")
                 self.telemetry.context.location.ip = "0.0.0.0"
                 self.telemetry.track_event("no", {"device": DEVICE, "language": LANGUAGE})
         self.telemetry.flush()
     except:
         pass
 def test_track_metric_works_as_expected(self):
     def process(data, context):
         data.properties["NEW_PROP"] = "MYPROP"
         context.user.id = "BOTUSER"
         context.session.id = "BOTSESSION"
         return True
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.add_telemetry_processor(process)
     client.context.device = None
     client.track_metric('metric', 42, channel.contracts.DataPointType.aggregation, 13, 1, 123, 111, {'foo': 'bar'})
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Metric", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "MetricData", "baseData": {"ver": 2, "metrics": [{"name": "metric", "kind": 1, "value": 42, "count": 13, "min": 1, "max": 123, "stdDev": 111}], "properties": {"NEW_PROP": "MYPROP", "foo": "bar"}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
     sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
     sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
     sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
    def test_track_event_modifes_options(self):
        def process(data, context):
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.device = None
        client.context.properties['foo'] = 'bar'
        client.track_event('test')
        client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Event", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "EventData", "baseData": {"ver": 2, "name": "test", "properties": {"foo": "bar"}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        self.maxDiff = None
        self.assertEqual(expected, actual)
    def test_track_pageview_works_as_expected(self):
        def process(data, context):
            data.properties["NEW_PROP"] = "MYPROP"
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.device = None
        client.track_pageview('test', 'http://tempuri.org', 13, { 'foo': 'bar' }, { 'x': 42 })
        client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.PageView", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "PageViewData", "baseData": {"ver": 2, "url": "http://tempuri.org", "name": "test", "duration": 13, "properties": {"NEW_PROP": "MYPROP", "foo": "bar"}, "measurements": {"x": 42}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        self.maxDiff = None
        self.assertEqual(expected, actual)
Example #19
0
class NodeStatsCollector:
    """
    Node Stats Manager class
    """
    def __init__(self,
                 pool_id,
                 node_id,
                 refresh_interval=_DEFAULT_STATS_UPDATE_INTERVAL,
                 app_insights_key=None):
        self.pool_id = pool_id
        self.node_id = node_id
        self.telemetry_client = None
        self.first_collect = True
        self.refresh_interval = refresh_interval

        self.disk = IOThroughputAggregator()
        self.network = IOThroughputAggregator()

        if app_insights_key or 'APP_INSIGHTS_INSTRUMENTATION_KEY' in os.environ or 'APP_INSIGHTS_KEY' in os.environ:
            key = (app_insights_key
                   or os.environ.get('APP_INSIGHTS_INSTRUMENTATION_KEY')
                   or os.environ.get('APP_INSIGHTS_KEY'))

            logger.info(
                "Detected instrumentation key. Will upload stats to app insights"
            )
            self.telemetry_client = TelemetryClient(key)
            context = self.telemetry_client.context
            context.application.id = 'AzureBatchInsights'
            context.application.ver = VERSION
            context.device.model = "BatchNode"
            context.device.role_name = self.pool_id
            context.device.role_instance = self.node_id
        else:
            logger.info(
                "No instrumentation key detected. Cannot upload to app insights."
                +
                "Make sure you have the APP_INSIGHTS_INSTRUMENTATION_KEY environment variable setup"
            )

    def init(self):
        """
            Initialize the monitoring
        """
        # start cpu utilization monitoring, first value is ignored
        psutil.cpu_percent(interval=None, percpu=True)

    def _get_network_usage(self):
        netio = psutil.net_io_counters()
        return self.network.aggregate(netio.bytes_recv, netio.bytes_sent)

    def _get_disk_usage(self):
        diskio = psutil.disk_io_counters()
        return self.disk.aggregate(diskio.read_bytes, diskio.write_bytes)

    def _sample_stats(self):
        # get system-wide counters
        mem = psutil.virtual_memory()
        disk_stats = self._get_disk_usage()
        net_stats = self._get_network_usage()

        swap_total, _, swap_avail, _, _, _ = psutil.swap_memory()

        stats = NodeStats(
            cpu_count=psutil.cpu_count(),
            cpu_percent=psutil.cpu_percent(interval=None, percpu=True),
            num_pids=len(psutil.pids()),

            # Memory
            mem_total=mem.total,
            mem_avail=mem.available,
            swap_total=swap_total,
            swap_avail=swap_avail,

            # Disk IO
            disk=disk_stats,

            # Net transfer
            net=net_stats,
        )
        del mem
        return stats

    def _collect_stats(self):
        """
            Collect the stats and then send to app insights
        """
        # collect stats
        stats = self._sample_stats()

        if self.first_collect:
            self.first_collect = False
            return

        if stats is None:
            logger.error("Could not sample node stats")
            return

        if self.telemetry_client:
            self._send_stats(stats)
        else:
            self._log_stats(stats)

    def _send_stats(self, stats):
        """
            Retrieve the current stats and send to app insights
        """
        process = psutil.Process(os.getpid())

        logger.debug("Uploading stats. Mem of this script: %d vs total: %d",
                     process.memory_info().rss, stats.mem_avail)
        client = self.telemetry_client

        for cpu_n in range(0, stats.cpu_count):
            client.track_metric("Cpu usage",
                                stats.cpu_percent[cpu_n],
                                properties={"Cpu #": cpu_n})

        client.track_metric("Memory used", stats.mem_used)
        client.track_metric("Memory available", stats.mem_avail)
        client.track_metric("Disk read", stats.disk.read_bps)
        client.track_metric("Disk write", stats.disk.write_bps)
        client.track_metric("Network read", stats.net.read_bps)
        client.track_metric("Network write", stats.net.write_bps)
        self.telemetry_client.flush()

    def _log_stats(self, stats):
        logger.info(
            "========================= Stats =========================")
        logger.info("Cpu percent:            %d%% %s", avg(stats.cpu_percent),
                    stats.cpu_percent)
        logger.info("Memory used:       %sB / %sB", pretty_nb(stats.mem_used),
                    pretty_nb(stats.mem_total))
        logger.info("Swap used:         %sB / %sB",
                    pretty_nb(stats.swap_avail), pretty_nb(stats.swap_total))
        logger.info("Net read:               %sBs",
                    pretty_nb(stats.net.read_bps))
        logger.info("Net write:              %sBs",
                    pretty_nb(stats.net.write_bps))
        logger.info("Disk read:               %sBs",
                    pretty_nb(stats.disk.read_bps))
        logger.info("Disk write:              %sBs",
                    pretty_nb(stats.disk.write_bps))
        logger.info("-------------------------------------")
        logger.info("")

    def run(self):
        """
            Start collecting information of the system.
        """
        logger.debug("Start collecting stats for pool=%s node=%s",
                     self.pool_id, self.node_id)
        while True:
            self._collect_stats()
            time.sleep(self.refresh_interval)
Example #20
0
 def _initialize(self):
     if self.tc is not None:
         return
     self.instrumentation_key = AppInsightsHelper.get_instrumentation_key(self.config['url'])
     self.tc = TelemetryClient(self.instrumentation_key)
     self.subscription_id = local_session(self.ctx.policy.session_factory).get_subscription_id()
 def test_add_null_telemetry_processor(self):
     client = TelemetryClient('foo')
     self.assertRaises(TypeError, lambda:client.add_telemetry_processor(None))
Example #22
0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------

# pylint: disable=line-too-long, import-error, broad-except
import json
from applicationinsights import TelemetryClient
from .repair_utils import _get_current_vmrepair_version

# For test releases and testing
TEST_KEY = 'a6bdff92-33b5-426f-9123-33875d0ae98b'
PROD_KEY = '3e7130f2-759b-41d4-afb8-f1405d1d7ed9'

tc = TelemetryClient(PROD_KEY)
tc.context.application.ver = _get_current_vmrepair_version()


def _track_command_telemetry(logger, command_name, parameters, status, message,
                             error_message, error_stack_trace, duration,
                             subscription_id, result_json):
    try:
        properties = {
            'command_name': command_name,
            'parameters': json.dumps(parameters),
            'command_status': status,
            'message': message,
            'error_message': error_message,
            'error_stack_trace': error_stack_trace,
            'subscription_id': subscription_id,
            'result_json': json.dumps(result_json)
    def test_track_event_with_common_processor_two_clients(self):
        def process(data, context):
            data.properties["NEW_PROP"] = "MYPROP"
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        key = '99999999-9999-9999-9999-999999999999'
        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)

        chan = channel.TelemetryChannel(queue=queue)
        chan.context.properties['foo'] = 'bar'

        client1 = TelemetryClient(key, chan)
        client1.add_telemetry_processor(process)
        client1.context.device = None
        client1.context.properties['x'] = 42

        client2 = TelemetryClient(key, chan)
        client2.add_telemetry_processor(process)
        client2.context.device = None
        client2.context.properties['x'] = 84

        client1.track_event('test 1')
        client1.flush()
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags[
            'ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Event", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "EventData", "baseData": {"ver": 2, "name": "test 1", "properties": {"NEW_PROP": "MYPROP", "foo": "bar", "x": 42}}}}'
        self.maxDiff = None
        self.assertEqual(expected, actual)

        client2.track_event('test 2')
        client2.flush()
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags[
            'ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Event", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "EventData", "baseData": {"ver": 2, "name": "test 2", "properties": {"NEW_PROP": "MYPROP", "foo": "bar", "x": 84}}}}'
        self.assertEqual(expected, actual)
Example #24
0
class RaftUtils():
    def __init__(self, tool_name):
        from applicationinsights import TelemetryClient
        from azure.servicebus import ServiceBusClient, ServiceBusMessage
        self.config = task_config()

        connection_str = os.environ['RAFT_SB_OUT_SAS']

        self.sb_client = ServiceBusClient.from_connection_string(
            connection_str)
        self.topic_client = self.sb_client.get_topic_sender(
            self.sb_client._entity_name)

        self.telemetry_client = TelemetryClient(
            instrumentation_key=os.environ['RAFT_APP_INSIGHTS_KEY'])

        self.job_id = os.environ['RAFT_JOB_ID']
        self.container_name = os.environ['RAFT_CONTAINER_NAME']
        self.tool_name = tool_name

        self.telemetry_properties = {
            "jobId": self.job_id,
            "taskIndex": os.environ['RAFT_TASK_INDEX'],
            "containerName": self.container_name
        }

        self.newSbMessage = ServiceBusMessage

    def report_bug(self, bugDetails):
        m = {
            'eventType': 'BugFound',
            'message': {
                'tool': self.tool_name,
                'jobId': self.job_id,
                'agentName': self.container_name,
                'bugDetails': bugDetails
            }
        }
        msg = self.newSbMessage(str.encode(json.dumps(m)))
        self.topic_client.send_messages([msg])

    def report_status(self, state, details):
        m = {
            'eventType': 'JobStatus',
            'message': {
                'tool': self.tool_name,
                'jobId': self.job_id,
                'agentName': self.container_name,
                'details': details,
                'utcEventTime': time.strftime('%Y-%m-%d %H:%M:%S',
                                              time.gmtime()),
                'state': state
            }
        }
        msg = self.newSbMessage(str.encode(json.dumps(m)))
        self.topic_client.send_messages([msg])

    def report_status_created(self, details=None):
        self.report_status('Created', details)

    def report_status_running(self, details=None):
        self.report_status('Running', details)

    def report_status_error(self, details=None):
        self.report_status('Error', details)

    def report_status_completed(self, details=None):
        self.report_status('Completed', details)

    def log_trace(self, trace):
        self.telemetry_client.track_trace(trace,
                                          properties=self.telemetry_properties)

    def log_exception(self):
        self.telemetry_client.track_exception(
            properties=self.telemetry_properties)

    def flush(self):
        self.telemetry_client.flush()
        self.sb_client.close()
Example #25
0
INSIGHTS_CONN_STRING = "InstrumentationKey=61371d7a-1887-468d-ab9c-b62b4731fe33;IngestionEndpoint=https://westus2-2.in.applicationinsights.azure.com/"

# Logging
logger = logging.getLogger(__name__)  # TODO: Setup logger
#handler = AzureLogHandler(connection_string = INSIGHTS_CONN_STRING)
logger.addHandler(AzureEventHandler(connection_string=INSIGHTS_CONN_STRING))

#logger.addHandler(handler)
logger.setLevel(logging.INFO)

# Metrics
exporter = metrics_exporter.new_metrics_exporter(
    enable_standard_metrics=True, connection_string=INSIGHTS_CONN_STRING)

# Tracing
tracer = tracer = TelemetryClient(INSIGHTS_CONN_STRING)

app = Flask(__name__)

# Requests
middleware = FlaskMiddleware(
    app,
    exporter=AzureExporter(connection_string=INSIGHTS_CONN_STRING),
    sampler=ProbabilitySampler(1.0),
)

# Load configurations from environment or config file
app.config.from_pyfile('config_file.cfg')

if ("VOTE1VALUE" in os.environ and os.environ['VOTE1VALUE']):
    button1 = os.environ['VOTE1VALUE']
Example #26
0
    def init_app(self, key, max_queue_length=0):
        if not key or len(key) == 0:
            return

        self._client = TelemetryClient(key)
        self._client.channel.queue.max_queue_length = max_queue_length
##                        AUTH                      ##
######################################################
connection_string = os.environ.get("CONNECTION_STRING")
cnxn = pyodbc.connect(connection_string)
cursor = cnxn.cursor()

######################################################
##                       QUERY                      ##
######################################################

test_id = os.environ.get("TEST_ID", str(uuid.uuid4()))

instrumentation_key = os.environ.get("APPINSIGHTS_INSTRUMENTATIONKEY")
telemetry_client = None
if instrumentation_key:
    telemetry_client = TelemetryClient(instrumentation_key)

print("Test run for '{}' started.".format(test_id))

queries_total = int(os.environ.get("QUERIES_TOTAL", -1))
queries_executed = 0

while queries_executed < queries_total or queries_total < 0:
    raw_query = get_query()
    print("\nTest '{}' executing #{}:\n{}\n".format(test_id, queries_executed,
                                                    raw_query))

    t = timeit.Timer(functools.partial(cursor.execute, raw_query))
    query_time = t.timeit(number=1)

    print("Query took: {:.2f} seconds".format(query_time))
def send_events(client: TelemetryClient, num_events: int):
    for _ in range(num_events):
        event = generate_event_name()
        properties = generate_event_properties()
        client.track_event(event, properties)
        LOG.info('sent event %s %r', event, properties)
Example #29
0
class Telemetry(metaclass=Singleton):
    """Singleton class that handles telemetry sending to AppInsights."""
    def __init__(self, toggle):
        """Initialize Telemetry instance."""
        self._toggle = toggle
        if self._toggle:
            self._telemetry_client = TelemetryClient(APP_INSIGHTS_KEY)
            self._telemetry_channel = self._setup_telemetry_channel()
            print("Telemetry enabled.")
        else:
            self._telemetry_client = None
            self._telemetry_channel = None
            print("Telemetry disabled.")

    def track_event(self, name, properties=None, measurements=None):
        """Track a telemetry event."""
        try:
            self._telemetry_client.track_event(name, properties, measurements)
        except AttributeError:
            print(f"Telemetry Disabled: Event Name: {name}")
            print(f"properties: {properties}")
            print(f"measurements: {measurements}")

    def track_metric(self,
                     name,
                     value,
                     type=None,
                     count=None,
                     min=None,
                     max=None,
                     std_dev=None,
                     properties=None):
        """Track a telemetry metric."""
        try:
            self._telemetry_client.track_metric(name, value, type, count, min,
                                                max, std_dev, properties)
        except AttributeError:
            print(f"Telemetry Disabled: Metric Name: {name}")
            print(f"value: {value}")
            if type:
                print(f"type: {type}")
            if count:
                print(f"count: {count}")
            if min:
                print(f"min: {min}")
            if max:
                print(f"max: {max}")
            if std_dev:
                print(f"std_dev: {std_dev}")
            if properties:
                print(f"properties: {properties}")

    def flush(self):
        """Flush the telemetry client info to AppInsights."""
        try:
            self._telemetry_client.flush()
        except AttributeError:
            pass

    def _setup_telemetry_channel(self):
        """Create telemetry_channel object.

        Instantiates a telemetry channel that collects unhandled exceptions.

        Return:
            telemetry_channel

        """
        from applicationinsights.exceptions import enable
        from applicationinsights import channel

        # set up channel with context
        telemetry_channel = channel.TelemetryChannel()
        telemetry_channel.context.application.ver = get_version()
        # set up exception capture
        telemetry_channel.context.properties['capture'] = 'exceptions'
        enable(APP_INSIGHTS_KEY, telemetry_channel=telemetry_channel)

        return telemetry_channel
Example #30
0
 def __init__(self, key):
     from applicationinsights import TelemetryClient
     self.tc = TelemetryClient(key)
tckey = None
tc = None
servicename = 'samplelogreg_service'

try:
    debugstr = os.environ['DEBUG']
    if (debugstr == "True"):
        debug = True
    if (debugstr == "False"):
        debug = False
except Exception as e:
    printerr("No debug found.")

try:
    tckey = os.environ['TELEMETRY_CLIENT']
    tc = TelemetryClient(tckey)
    printerr(tckey)
except Exception as e:
    printerr("No appInsights instrumentation key found.")

try:
    environment = os.environ['ENVIRONMENT']
except Exception as e:
    printerr("No indication of environment found.")

try:
    model = joblib.load("models/logregModel.pkl")
except Exception as e:
    raise

 def test_add_null_telemetry_processor(self):
     client = TelemetryClient('foo')
     self.assertRaises(TypeError,
                       lambda: client.add_telemetry_processor(None))
Example #33
0
def process(filesrootfolder, forceinsert):

    # Create process id as identify of this process
    process_id = time.time()

    tc = TelemetryClient('')

    tc.context.application.ver = '1.0'
    tc.context.properties["PROCESS_PROGRAM"] = "BATCH_CSV_V001a"
    tc.context.properties["PROCESS_START"] = time.time()
    tc.context.properties["DATA_FOLDER"] = filesrootfolder
    tc.context.properties["PROCESS_ID"] = process_id

    tc.track_trace('STRAT RUN BATHCH INGEST  CSV DATA from folder ' +
                   filesrootfolder)
    tc.track_event('BATHCH_INGEST_CSV_START', {
        'PROCESS_ID': process_id,
        'DATA_FOLDER': filesrootfolder
    }, {})
    tc.flush()

    tc.flush()
    #print (vm_uuid,deploy_uuid,config_uuid)

    # Prepare COSMOS Link

    url = COSMOS_URL
    #key = os.environ['ACCOUNT_KEY']
    key = COSMOS_KEY
    client = cosmos_client.CosmosClient(url, {'masterKey': key})
    database_id = COSMOS_DATABASE
    container_id = COSMOS_CONTAINER

    database_link = 'dbs/' + database_id
    collection_link = database_link + '/colls/' + container_id

    doc_id = vm_uuid + '_' + config_uuid + '_' + deploy_uuid + '_Metric'
    doc_link = collection_link + '/docs/' + doc_id

    options = {}
    options['enableCrossPartitionQuery'] = True
    options['maxItemCount'] = 5
    options['partitionKey'] = vm_uuid

    proc_log_doc = None
    try:
        proc_log_doc = client.ReadItem(doc_link, options)
    except:
        print("New Process  Metric Doc")

    if (proc_log_doc is not None):
        print("Find Existing  Metric Doc ")

        if str(forceinsert).lower(
        ) != 'true':  # Stop Proccess if data is already been proccessed
            return 400, doc_id + " is already been processed"

    else:  # New process log
        proc_log_doc = {}
        proc_log_doc["PROCESSES"] = []
        proc_log_doc["DOC_TYPE"] = "PROCESS_METRIC"
        proc_log_doc["PROCESS_PROGRAM"] = "BATCH_METRIC_CSV_V001a"
        proc_log_doc['id'] = doc_id

    tc.track_event('BATHCH_INGEST_METRIC_CSV', {'PROCESS_ID': process_id},
                   {'DATA_FOLDER': filesrootfolder})
    #+'_'+config_uuid+'_'+deploy_uuid , { 'DATA_FOLDER': telemetriespath }
    tc.flush()
    proc_log_this = {}
    proc_log_this["PROCESS_PROGRAM"] = "BATCH_METRIC_CSV_V001a"
    proc_log_this["PROCESS_START"] = time.time()
    proc_log_this["DATA_FOLDER"] = filesrootfolder
    proc_log_this[
        'id'] = vm_uuid + '_' + config_uuid + '_' + deploy_uuid + '_' + str(
            process_id)

    error_files, merged_files, source_files = merge_rename_core_columns_CSV(
        vm_uuid, deploy_uuid, config_uuid, 'defualt_metrics_csv_001A', 0,
        SOURCE_CSV_CONTAINER, filesrootfolder, FILE_OUTPUT_FOLDER, process_id)

    # ToDo  ...
    proc_log_this["PROCESS_ID"] = process_id
    proc_log_this["ERROR_SOURCE_FILES_COUNT"] = len(error_files)
    proc_log_this["SOURCE_FILES_COUNT"] = len(source_files)

    tc.track_metric('BATHCH_INGEST_CSV_ERROR_SOURCE_FILES_COUNT',
                    len(error_files))
    tc.track_metric('BATHCH_INGEST_CSV_ERROR_SOURCE_SOURCE_FILES_COUNT',
                    len(source_files))
    tc.flush()

    # print(str(len(error_files)),'  ',str(len(merged_files)))

    proc_log_this["PROCESS_END"] = time.time()
    proc_log_this["STATUS"] = "OK"

    proc_log_this["STATUS_MESSAGE"] = (
        "It takes %s seconds to ingest  CSV file from Blob Storage") % (
            proc_log_this["PROCESS_END"] - proc_log_this["PROCESS_START"])

    proc_log_doc["PROCESSES"].append(proc_log_this)
    proc_log_doc['LATEST_UPDATE_TIMESTAMP'] = time.time()

    # Update Process Log
    client.UpsertItem(collection_link, proc_log_doc, options)

    tc.track_trace('END RUN BATHCH INGEST  METRIC CSV DATA from folder ' +
                   filesrootfolder)

    tc.track_event('BATHCH_INGEST_METRIC_CSV_END', {
        'PROCESS_ID': process_id,
        'DATA_FOLDER': filesrootfolder
    }, {
        'DEFECT_FILES_COUNT': len(error_files),
        'MERGED_FILES_COUNT': len(merged_files),
        'SOURCE_FILES_COUNT': len(source_files)
    })
    tc.flush()
    def test_track_exception_works_as_expected(self):
        def process(data, context):
            data.properties["NEW_PROP"] = "MYPROP"
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient(
            '99999999-9999-9999-9999-999999999999',
            channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.device = None
        try:
            raise Exception("blah")
        except Exception as e:
            client.track_exception(*sys.exc_info(),
                                   properties={},
                                   measurements={'x': 42})
            client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Exception", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "ExceptionData", "baseData": {"ver": 2, "exceptions": [{"id": 1, "outerId": 0, "typeName": "Exception", "message": "blah", "hasFullStack": true, "parsedStack": [{"level": 0, "method": "test_track_exception_works_as_expected", "assembly": "Unknown", "fileName": "TestTelemetryProcessor.py", "line": 0}]}], "properties": {"NEW_PROP": "MYPROP"}, "measurements": {"x": 42}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags[
            'ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        for item in sender.data.data.base_data.exceptions:
            for frame in item.parsed_stack:
                frame.file_name = os.path.basename(frame.file_name)
                frame.line = 0
        actual = json.dumps(sender.data.write())
        self.maxDiff = None
        self.assertEqual(expected, actual)
        try:
            raise Exception("blah")
        except Exception as e:
            client.track_exception()
            client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Exception", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "ExceptionData", "baseData": {"ver": 2, "exceptions": [{"id": 1, "outerId": 0, "typeName": "Exception", "message": "blah", "hasFullStack": true, "parsedStack": [{"level": 0, "method": "test_track_exception_works_as_expected", "assembly": "Unknown", "fileName": "TestTelemetryProcessor.py", "line": 0}]}], "properties": {"NEW_PROP": "MYPROP"}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags[
            'ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        for item in sender.data.data.base_data.exceptions:
            for frame in item.parsed_stack:
                frame.file_name = os.path.basename(frame.file_name)
                frame.line = 0
        actual = json.dumps(sender.data.write())
        self.assertEqual(expected, actual)
Example #35
0
def merge_rename_core_columns_CSV(vm_uuid, deploy_uuid, config_uuid,
                                  schema_ver, inject_ver, container_name,
                                  filesrootfolder, fileoutputfolder,
                                  process_id):
    #block_blob_service = BlockBlobService(account_name=SOURCE_CSV_BLOB_ACCOUNT,  sas_token=SOURCE_CSV_BLOB_TOKEN)
    block_blob_service = BlockBlobService(account_name=SOURCE_CSV_BLOB_ACCOUNT,
                                          account_key=SOURCE_CSV_BLOB_KEY)
    tc = TelemetryClient('')
    print("Start merge CSV ", vm_uuid, ' ', deploy_uuid, ' ', config_uuid)

    blobs = []
    marker = None
    while True:
        batch = block_blob_service.list_blobs(container_name,
                                              prefix=filesrootfolder)
        blobs.extend(batch)
        if not batch.next_marker:
            break
        marker = batch.next_marker
    i = 0
    blobpaths = []
    for blob in blobs:
        blobpaths.append(blob.name)

    matchers = ['.csv']
    matching = [s for s in blobpaths if any(xs in s for xs in matchers)]

    mergelog = {}
    mergelog["vm_uuid"] = vm_uuid

    mergelog["process_type"] = "MERGE_METRIC_CSV"
    mergelog["DOC_TYPE"] = "MERGE_METRIC_FILES_LOG"
    mergelog["file_folder"] = filesrootfolder
    mergelog["process_time"] = time.time()
    mergelog["files"] = []
    mergelog["defect_files"] = []

    a_mergelog = copy.deepcopy(mergelog)

    dfagg = pd.DataFrame(columns=[])

    mixagg = AGGREGATION_FILES_NUM
    aggcount = 0
    aggcount_total = 0
    aggoutcount = 0
    aggsize = 0

    error_files = []
    merged_files = []
    totoal_rows = 0
    alldfs = []
    outfilenamebase = fileoutputfolder + filesrootfolder + "_aggr_"
    t1 = time.time()
    #print (outfilenamebase)
    source_col = ['']
    target_col = ['']

    tc.track_trace('Prepare to process ' + str(len(matching)) +
                   '  Metric CSV files ')
    tc.flush()

    for fname in matching:
        #print(aggcount)

        head, tail = os.path.split(fname)

        aggcount += 1
        aggcount_total += 1

        blobstring = block_blob_service.get_blob_to_text(
            container_name, fname).content
        aggsize += len(blobstring)

        #print('Prepare to merge '+str(aggcount_total)+' / '+str(len(matching)) +' Memeory '+str(aggsize)+' File Name: '+tail)
        #tc.track_trace('Prepare to merge '+tail)
        #tc.flush()

        try:  # Rread CSV And Try Processing

            dfone = pd.read_csv(StringIO(blobstring))

            dfAll_cols = dfone.columns
            #colname0=dfAll_cols
            dfAll_newcols = []

            pc_name = re.search(r'(\\{2}.*\\)(.*\\)', dfAll_cols[1]).group(1)

            for col in dfAll_cols:
                dfAll_newcols.append(
                    col.replace(pc_name, '').replace('`', '').replace(
                        '\\', '').replace(' ', '').replace('/', '').replace(
                            '.', '').replace('-', '').replace('%', '').replace(
                                '(', '').replace(')', ''))

            dfAll_newcols[0] = "Universal_datetime"

            # Rename all columns
            dfone.columns = dfAll_newcols

            alldfs.append(dfone)
            a_mergelog['files'].append(tail)

            #if (aggcount>=mixagg) or (aggcount_total==len(matching)):
            if (aggsize > MAX_FILESIZE) or (aggcount_total == len(matching)):
                if (aggcount_total == len(matching)):
                    print("Processing Final File")
                    tc.track_trace('Processing Final File')
                    tc.flush()

                alldfs.append(pd.DataFrame(columns=source_col))
                dfagg = pd.concat(alldfs, ignore_index=True)
                dfagg_out = dfagg[source_col]
                dfagg_out.columns = target_col
                dfagg_out['schema_ver'] = schema_ver
                dfagg_out['inject_ver'] = inject_ver
                output = dfagg_out.to_csv(index=False, encoding="utf-8")
                outfile = outfilenamebase + str(aggoutcount) + ".csv"
                block_blob_service.create_blob_from_text(
                    container_name, outfile, output)
                print(
                    "Output aggregated file to " + container_name,
                    outfile + " Data Shape " + str(dfagg.shape) + ' uuid: ' +
                    str(vm_uuid) + str(deploy_uuid) + str(config_uuid))
                totoal_rows += dfagg_out.shape[0]

                merged_files.append(outfile)

                a_mergelog['output_file'] = outfile
                a_mergelog['merged_files_num'] = len(a_mergelog['files'])
                a_mergelog['defect_files_num'] = len(
                    a_mergelog['defect_files'])

                # Insert Process Log to COSMOS DB
                insert_json_cosmos(a_mergelog)
                a_mergelog = copy.deepcopy(mergelog)
                t2 = time.time()

                print(("It takes %s seconds to merge " + str(aggcount) +
                       " CSV Metrics") % (t2 - t1))
                aggoutcount += 1
                aggcount = 0
                aggsize = 0
                alldfs = []
                t1 = time.time()
                file_size = BlockBlobService.get_blob_properties(
                    block_blob_service, container_name,
                    outfile).properties.content_length
                print(outfile + "  File Size " + str(file_size))

                # Ingest to AXX
                ingest_to_ADX(outfile, file_size)
        except Exception as e:
            print('Error While process ' + fname)
            error_class = e.__class__.__name__
            detail = e.args[0]
            cl, exc, tb = sys.exc_info()
            lastCallStack = traceback.extract_tb(tb)[-1]
            fileName = lastCallStack[0]
            lineNum = lastCallStack[1]
            funcName = lastCallStack[2]
            errMsg = "File \"{}\", line {}, in {}: [{}] {}".format(
                fileName, lineNum, funcName, error_class, detail)

            print("Unexpected error:", sys.exc_info()[0])
            traceback.print_exc()

            msg = errMsg + traceback.format_exc()

            tc = TelemetryClient('')
            tc.context.application.ver = '1.0'
            tc.context.properties["PROCESS_PROGRAM"] = "BATCH_METRIC_CSV_V001a"
            tc.context.properties["DATA_FOLDER"] = metricspath
            tc.track_trace(msg)

            tc.flush()
            # print("Unexpected error:", sys.exc_info()[0])
            a_mergelog["defect_files"].append(tail)
            error_files.append(fname)  # Add No-Well Formed JSON to error file
    print('Total Rows ' + str(totoal_rows))

    tc.track_trace('Proccessed Rows: ' + str(totoal_rows))
    tc.track_metric('BATHCH_INGEST_METRIC_CSV_TOTAL_ROWS', str(totoal_rows))
    tc.flush()
    return error_files, merged_files, matching
Example #36
0
# Databricks notebook source
dbutils.widgets.text("loadid", "", "Load Id")
loadid = dbutils.widgets.get("loadid")

# COMMAND ----------

from applicationinsights import TelemetryClient
tc = TelemetryClient(dbutils.secrets.get(scope = "storage_scope", key = "appinsights_key"))

# COMMAND ----------

import datetime
import os
from pyspark.sql.functions import col, lit
import ddo_transform.transform as t
import ddo_transform.util as util

load_id = loadid
loaded_on = datetime.datetime.now()
base_path = 'dbfs:/mnt/datalake/data/dw/'

# Read interim cleansed data
parkingbay_sdf = spark.read.table("interim.parking_bay").filter(col('load_id') == lit(load_id))
sensordata_sdf = spark.read.table("interim.sensor").filter(col('load_id') == lit(load_id))

# COMMAND ----------

# MAGIC %md
# MAGIC ### Transform and load Dimension tables

# COMMAND ----------
Example #37
0
def index():

    if request.method == 'GET':

        # Get current values
        vote1 = r.get(button1).decode('utf-8')
        # TODO: use tracer object to trace cat vote
        tracer.span(name=button1)
        vote2 = r.get(button2).decode('utf-8')
        # TODO: use tracer object to trace dog vote
        tracer.span(name=button2)

        # Return index with values
        return render_template("index.html", value1=int(vote1), value2=int(vote2), button1=button1, button2=button2, title=title)

    elif request.method == 'POST':

        if request.form['vote'] == 'reset':

            vote1 = r.get(button1).decode('utf-8')
            vote2 = r.get(button2).decode('utf-8')
            # Empty table and return results
            r.set(button1,0)

            r.set(button2,0)

            try:
                if int(vote1) > 0:
                    properties = {'custom_dimensions': {'Cats Vote': vote1}}
                    # TODO: use logger object to log cat vote
                    with tracer.span(name=vote1) as span:
                        logger.warning('cat vote', extra=properties)

                if int(vote2) > 0:
                    properties = {'custom_dimensions': {'Dogs Vote': vote2}}
                    # TODO: use logger object to log dog vote
                    with tracer.span(name=vote2) as span:
                        logger.warning('dog vote', extra=properties)
            except ValueError as e:
                logger.error("Error - not integers {0}".format(e))

            return render_template("index.html", value1=int(vote1), value2=int(vote2), button1=button1, button2=button2, title=title)

        else:

            # Insert vote result into DB
            vote = request.form['vote']
            with tracer.span(name=vote) as span:
                r.incr(vote, 1)

            # Get current values
            vote1 = r.get(button1).decode('utf-8')
            vote2 = r.get(button2).decode('utf-8')

            tc = TelemetryClient(app.config['INSTRUMENTATION_KEY'])
            if vote == button1: #button1 = Cats
                tc.track_event('Cats vote')
            else: 
                tc.track_event('Dogs vote')

            tc.flush()

            # Return results
            return render_template("index.html", value1=int(vote1), value2=int(vote2), button1=button1, button2=button2, title=title)
Example #38
0
        'bird3' : { 'small': urls[6], 'medium': urls[7], 'large': urls[8] },
        'bird4' : { 'small': urls[9], 'medium': urls[10], 'large': urls[11] },
        'bird5' : { 'small': urls[12], 'medium': urls[13], 'large': urls[14] },
        'bird6' : { 'small': urls[15], 'medium': urls[16], 'large': urls[17] },
        'caption': caption,
        'elapsed': t1 - t0
    }
    return jsonify({'bird': response}), 201

@app.route('/', methods=['GET'])
def get_bird():
    return 'Version 1'

if __name__ == '__main__':
    t0 = time.time()
    tc = TelemetryClient(os.environ["TELEMETRY"])
    
    # gpu based
    cfg.CUDA = os.environ["GPU"].lower() == 'true'
    tc.track_event('container initializing', {"CUDA": str(cfg.CUDA)})

    # load word dictionaries
    wordtoix, ixtoword = word_index()
    # lead models
    text_encoder, netG = models(len(wordtoix))
    # load blob service
    blob_service = BlockBlobService(account_name='attgan', account_key=os.environ["BLOB_KEY"])

    seed = 100
    random.seed(seed)
    np.random.seed(seed)
    return f"""<head><meta http-equiv="refresh" content="0; url=http://petstore.swagger.io/?url=http://localhost:{flask_port}/api/swagger.json" /></head>"""


if __name__ == '__main__':
    run_flask_in_debug_mode = True

    try:
        application_insights_instrumentation_key = os.environ[
            ENV_VAR_APPLICATION_INSIGHTS_INSTRUMENTATION_KEY]
    except:
        logging.warning(
            f'No environment variable "{ENV_VAR_APPLICATION_INSIGHTS_INSTRUMENTATION_KEY}" set'
        )

    if application_insights_instrumentation_key:
        telemetry_client = TelemetryClient(
            application_insights_instrumentation_key)
        telemetry_client.context.device.role_name = APPLICATION_NAME
        application_insights_handler = enable(
            application_insights_instrumentation_key)
        logging.basicConfig(handlers=[application_insights_handler],
                            format='%(levelname)s: %(message)s')

    try:
        flask_port = int(os.environ[ENV_VAR_FLASK_PORT])
    except:
        logging.warning(
            f'No environment variable "{ENV_VAR_FLASK_PORT}" set; using default port {flask_port}'
        )

    try:
        run_flask_in_debug_mode = int(os.environ[ENV_VAR_FLASK_DEBUG_MODE])
Example #40
0
class NodeStatsCollector:
    """
    Node Stats Manager class
    """
    def __init__(self,
                 pool_id,
                 node_id,
                 refresh_interval=_DEFAULT_STATS_UPDATE_INTERVAL,
                 app_insights_key=None):
        self.pool_id = pool_id
        self.node_id = node_id
        self.telemetry_client = None
        self.first_collect = True
        self.refresh_interval = refresh_interval

        self.disk = IOThroughputAggregator()
        self.network = IOThroughputAggregator()

        if app_insights_key or 'APP_INSIGHTS_INSTRUMENTATION_KEY' in os.environ or 'APP_INSIGHTS_KEY' in os.environ:
            key = (app_insights_key
                   or os.environ.get('APP_INSIGHTS_INSTRUMENTATION_KEY')
                   or os.environ.get('APP_INSIGHTS_KEY'))

            logger.info(
                "Detected instrumentation key. Will upload stats to app insights"
            )
            self.telemetry_client = TelemetryClient(key)
            context = self.telemetry_client.context
            context.application.id = 'AzureBatchInsights'
            context.application.ver = VERSION
            context.device.model = "BatchNode"
            context.device.role_name = self.pool_id
            context.device.role_instance = self.node_id
        else:
            logger.info(
                "No instrumentation key detected. Cannot upload to app insights."
                +
                "Make sure you have the APP_INSIGHTS_INSTRUMENTATION_KEY environment variable setup"
            )

    def init(self):
        """
            Initialize the monitoring
        """
        # start cpu utilization monitoring, first value is ignored
        psutil.cpu_percent(interval=None, percpu=True)

    def _get_network_usage(self):
        netio = psutil.net_io_counters()
        return self.network.aggregate(netio.bytes_recv, netio.bytes_sent)

    def _get_disk_io(self):
        diskio = psutil.disk_io_counters()
        return self.disk.aggregate(diskio.read_bytes, diskio.write_bytes)

    def _get_disk_usage(self):
        disk_usage = dict()
        try:
            disk_usage[_OS_DISK] = psutil.disk_usage(_OS_DISK)
            disk_usage[_USER_DISK] = psutil.disk_usage(_USER_DISK)
        except Exception as e:
            logger.error(
                'Could not retrieve user disk stats for {0}: {1}'.format(
                    _USER_DISK, e))
        return disk_usage

    def _sample_stats(self):
        # get system-wide counters
        mem = psutil.virtual_memory()
        disk_stats = self._get_disk_io()
        disk_usage = self._get_disk_usage()
        net_stats = self._get_network_usage()

        swap_total, _, swap_avail, _, _, _ = psutil.swap_memory()

        # Tuple (proc name, CPU %)
        process_list = list(((proc.info['name'], proc.cpu_percent(interval=1))
                             for proc in psutil.process_iter(attrs=['name'])
                             if proc.info["name"] in PROCESSES_TO_WATCH))

        stats = NodeStats(
            cpu_count=psutil.cpu_count(),
            cpu_percent=psutil.cpu_percent(interval=None, percpu=True),
            num_pids=len(psutil.pids()),

            # Memory
            mem_total=mem.total,
            mem_avail=mem.available,
            swap_total=swap_total,
            swap_avail=swap_avail,

            # Disk IO
            disk_io=disk_stats,

            # Disk usage
            disk_usage=disk_usage,

            # Net transfer
            net=net_stats,

            # Active rendering processes with CPU
            process_list=process_list)
        del mem
        return stats

    def _collect_stats(self):
        """
            Collect the stats and then send to app insights
        """
        # collect stats
        stats = self._sample_stats()

        if self.first_collect:
            self.first_collect = False
            return

        if stats is None:
            logger.error("Could not sample node stats")
            return

        if self.telemetry_client:
            self._send_stats(stats)
        else:
            self._log_stats(stats)

    def _send_stats(self, stats):
        """
            Retrieve the current stats and send to app insights
        """
        process = psutil.Process(os.getpid())

        logger.debug("Uploading stats. Mem of this script: %d vs total: %d",
                     process.memory_info().rss, stats.mem_avail)
        client = self.telemetry_client

        for cpu_n in range(0, stats.cpu_count):
            client.track_metric("Cpu usage",
                                stats.cpu_percent[cpu_n],
                                properties={"Cpu #": cpu_n})

        for name, disk_usage in stats.disk_usage.items():
            client.track_metric("Disk usage",
                                disk_usage.used,
                                properties={"Disk": name})
            client.track_metric("Disk free",
                                disk_usage.free,
                                properties={"Disk": name})

        if stats.process_list:
            for process_name, cpu in stats.process_list:
                props = {
                    "Process": process_name,
                    "PoolName": self.pool_id,
                    "ComputeNode": self.node_id
                }
                client.track_metric("ActiveProcess", cpu, properties=props)

        client.track_metric("Memory used", stats.mem_used)
        client.track_metric("Memory available", stats.mem_avail)
        client.track_metric("Disk read", stats.disk_io.read_bps)
        client.track_metric("Disk write", stats.disk_io.write_bps)
        client.track_metric("Network read", stats.net.read_bps)
        client.track_metric("Network write", stats.net.write_bps)
        self.telemetry_client.flush()

    def _log_stats(self, stats):
        logger.info(
            "========================= Stats =========================")
        logger.info("Cpu percent:            %d%% %s", avg(stats.cpu_percent),
                    stats.cpu_percent)
        logger.info("Memory used:       %sB / %sB", pretty_nb(stats.mem_used),
                    pretty_nb(stats.mem_total))
        logger.info("Swap used:         %sB / %sB",
                    pretty_nb(stats.swap_avail), pretty_nb(stats.swap_total))
        logger.info("Net read:               %sBs",
                    pretty_nb(stats.net.read_bps))
        logger.info("Net write:              %sBs",
                    pretty_nb(stats.net.write_bps))
        logger.info("Disk read:               %sBs",
                    pretty_nb(stats.disk_io.read_bps))
        logger.info("Disk write:              %sBs",
                    pretty_nb(stats.disk_io.write_bps))
        logger.info("Disk usage:")
        for name, disk_usage in stats.disk_usage.items():
            logger.info("  - %s: %i/%i (%i%%)", name, disk_usage.used,
                        disk_usage.total, disk_usage.percent)

        if stats.process_list:
            for process_name, cpu in stats.process_list:
                logger.info("ActiveProcess: %s (CPU %d%%)", process_name, cpu)

        logger.info("-------------------------------------")
        logger.info("")

    def run(self):
        """
            Start collecting information of the system.
        """
        logger.debug("Start collecting stats for pool=%s node=%s",
                     self.pool_id, self.node_id)
        while True:
            self._collect_stats()
            time.sleep(self.refresh_interval)
Example #41
0
from django.shortcuts import render
from django.http import HttpRequest
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime
from app.models import UserProfile
from app.forms import UserProfileForm
from app.models import Product
import requests
import json
from applicationinsights import TelemetryClient
import config

tc = TelemetryClient('104f9dca-6034-42a1-a646-7c66230710e7')

def home(request):
    """Renders the home page."""

    tc.track_event('home page reached')
    tc.flush()
    assert isinstance(request, HttpRequest)

    products = Product.objects.order_by('id')

    return render(
        request,
        'app/index.html',
        context_instance = RequestContext(request,
        {
    def test_track_exception_works_as_expected(self):
        def process(data, context):
            data.properties["NEW_PROP"] = "MYPROP"
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.device = None
        try:
            raise Exception("blah")
        except Exception as e:
            client.track_exception(*sys.exc_info(), properties={}, measurements={ 'x': 42 })
            client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Exception", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "ExceptionData", "baseData": {"ver": 2, "exceptions": [{"id": 1, "outerId": 0, "typeName": "Exception", "message": "blah", "hasFullStack": true, "parsedStack": [{"level": 0, "method": "test_track_exception_works_as_expected", "assembly": "Unknown", "fileName": "TestTelemetryProcessor.py", "line": 0}]}], "properties": {"NEW_PROP": "MYPROP"}, "measurements": {"x": 42}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        for item in sender.data.data.base_data.exceptions:
            for frame in item.parsed_stack:
                frame.file_name = os.path.basename(frame.file_name)
                frame.line = 0
        actual = json.dumps(sender.data.write())
        self.maxDiff = None
        self.assertEqual(expected, actual)
        try:
            raise Exception("blah")
        except Exception as e:
            client.track_exception()
            client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Exception", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "ExceptionData", "baseData": {"ver": 2, "exceptions": [{"id": 1, "outerId": 0, "typeName": "Exception", "message": "blah", "hasFullStack": true, "parsedStack": [{"level": 0, "method": "test_track_exception_works_as_expected", "assembly": "Unknown", "fileName": "TestTelemetryProcessor.py", "line": 0}]}], "properties": {"NEW_PROP": "MYPROP"}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        for item in sender.data.data.base_data.exceptions:
            for frame in item.parsed_stack:
                frame.file_name = os.path.basename(frame.file_name)
                frame.line = 0
        actual = json.dumps(sender.data.write())
        self.assertEqual(expected, actual)
Example #43
0
# Databricks notebook source
dbutils.widgets.text("infilefolder", "", "In - Folder Path")
infilefolder = dbutils.widgets.get("infilefolder")

dbutils.widgets.text("loadid", "", "Load Id")
loadid = dbutils.widgets.get("loadid")

# COMMAND ----------

from applicationinsights import TelemetryClient
tc = TelemetryClient(dbutils.secrets.get(scope = "storage_scope", key = "applicationInsightsKey"))

# COMMAND ----------

import os
import datetime

# For testing
# infilefolder = 'datalake/data/lnd/2019_03_11_01_38_00/'
load_id = loadid
loaded_on = datetime.datetime.now()
base_path = os.path.join('dbfs:/mnt/datalake/data/lnd/', infilefolder)
parkingbay_filepath = os.path.join(base_path, "MelbParkingBayData.json")
sensors_filepath = os.path.join(base_path, "MelbParkingSensorData.json")


# COMMAND ----------

import ddo_transform.standardize as s

# Retrieve schema
Example #44
0
def ocr(body, response, language: "The language(s) to use for OCR" = "eng"):

    logger.info(os.environ)

    azureAccountName = os.environ['SCANOCR_STORAGE_ACCOUNT_NAME']
    azureAccountKey = os.environ['SCANOCR_STORAGE_ACCOUNT_KEY']
    appInsightsTelemetryKey = os.environ['SCANOCR_APP_INSIGHTS_TELEMETRY_KEY']

    fileSetId = body["file_set_id"]
    zipFileUrl = body["zip_file_url"]
    fileName = body["file_name"]

    # initiate app insights
    tc = TelemetryClient(appInsightsTelemetryKey)
    tc.context.operation.id = str(uuid.uuid4())
    tc.track_event('ocr', {
        'zip_file_url': zipFileUrl,
        'file_set_id': fileSetId
    })
    tc.flush()

    # download zip, extract
    zipRequestBody = requests.get(zipFileUrl)
    z = zipfile.ZipFile(io.BytesIO(zipRequestBody.content))
    tempDir = '/tmp/' + fileSetId + '/' + fileName
    if (os.path.isdir(tempDir)):
        shutil.rmtree(tempDir)
    z.extractall(tempDir)

    # grab all PNG images from zip extract results
    image_files = []
    for root, dirs, files in os.walk(tempDir):
        for file in files:
            if file.endswith(".png"):
                image_files.append(os.path.join(root, file))

    # log file count to app insights
    tc.track_event('ocr_zip_extracted', {'file_count': len(image_files)})
    tc.flush()

    with open(tempDir + '/output.pdf', 'w+') as output:

        # convert PNGs to (non-OCR) PDF
        pdf_bytes = img2pdf.convert(image_files)
        file = open(tempDir + '/input.pdf', "wb")
        file.write(pdf_bytes)

        # log progress to app insights
        tc.track_event('ocr_pdf_created')
        tc.flush()

        # launch OCR process
        proc = subprocess.Popen("ocrmypdf --jobs 4 --output-type pdf " +
                                tempDir + "/input.pdf " + tempDir +
                                "/output.pdf",
                                stdin=subprocess.PIPE,
                                shell=True)

    # wait for OCR completion
    code = proc.wait()

    # log OCR completion to app insights
    tc.track_event('ocr_output_pdf_complete')
    tc.flush()

    # upload resulting PDF to Azure
    blob_service = BlockBlobService(account_name=azureAccountName,
                                    account_key=azureAccountKey)
    blob_service.create_blob_from_path(
        'images/' + fileSetId,
        fileName + ".pdf",
        tempDir + '/output.pdf',
        content_settings=ContentSettings(content_type='application/pdf'))

    # log upload completion to app insights
    tc.track_event('ocr_uploaded_to_azure')
    tc.flush()

    # obtain download signature from Azure
    sas_url = blob_service.generate_blob_shared_access_signature(
        'images/' + fileSetId, fileName + "pdf", BlobPermissions.READ,
        datetime.utcnow() + timedelta(hours=12))

    download_url = 'https://' + azureAccountName + '.blob.core.windows.net/' + 'images/' + fileSetId + '/' + fileName + ".pdf" + '?' + sas_url

    # return results
    return {
        'filename': zipFileUrl,
        'files': z.namelist(),
        'download_url': download_url
    }
Example #45
0
from applicationinsights import TelemetryClient

telemetry_client = TelemetryClient('0d21236a-e9fc-447d-910b-359ceda2fac5')


def log_case_not_found(case_citation):
    telemetry_client.track_event('Case Not Found', {'Citation': case_citation})
    telemetry_client.flush()


def log_successful_download(case_citation):
    telemetry_client.track_event('PDF Downloaded', {'Citation': case_citation})
    telemetry_client.flush()


def log_new_session(username, search_count):
    telemetry_client.track_event('Session Opened', {
        'User': username,
        'Search Count': search_count
    })
    telemetry_client.flush()
Example #46
0
from opencensus.stats import view as view_module
from opencensus.tags import tag_map as tag_map_module
from opencensus.ext.azure.trace_exporter import AzureExporter
from opencensus.trace.samplers import ProbabilitySampler
from opencensus.trace.tracer import Tracer
from opencensus.ext.flask.flask_middleware import FlaskMiddleware
from applicationinsights import TelemetryClient

# App Insights
# TODO: Import required libraries for App Insights

# Logging
# keep stdout/stderr logging using StreamHandler
logger = logging.getLogger(__name__)

tc = TelemetryClient('7c5b861a-1af8-4bbd-9488-c03ca464102c')


# TODO: replace the all-zero GUID with your instrumentation key.
logger.addHandler(AzureLogHandler(
    connection_string='InstrumentationKey=7c5b861a-1af8-4bbd-9488-c03ca464102c')
)

# Metrics
exporter = metrics_exporter.new_metrics_exporter(
  enable_standard_metrics=True,
  connection_string='InstrumentationKey=7c5b861a-1af8-4bbd-9488-c03ca464102c')

# Tracing
tracer = Tracer(
    exporter=AzureExporter(
Example #47
0
logger = logging.getLogger(__name__)
logger.addHandler(AzureLogHandler(connection_string=connString))
logger.setLevel(logging.INFO)

# Metrics
# exporter = # TODO: Setup exporter
exporter = metrics_exporter.new_metrics_exporter(enable_standard_metrics=True,
                                                 connection_string=connString)

# Tracing
# tracer = # TODO: Setup tracer
tracer = Tracer(
    exporter=AzureExporter(connection_string=connString),
    sampler=ProbabilitySampler(1.0),
)
telemetryClient = TelemetryClient(instrumentKey)

app = Flask(__name__)

# Requests
# middleware = # TODO: Setup flask middleware
middleware = FlaskMiddleware(
    app,
    exporter=AzureExporter(connection_string=connString),
    sampler=ProbabilitySampler(rate=1.0),
)

# Load configurations from environment or config file
app.config.from_pyfile('config_file.cfg')

if ("VOTE1VALUE" in os.environ and os.environ['VOTE1VALUE']):
def send_logs(client: TelemetryClient, num_traces: int):
    for _ in range(num_traces):
        trace = generate_log_message()
        severity = generate_log_severity()
        client.track_trace(trace, severity=severity)
        LOG.info('sent trace %s %d', trace, severity)
Example #49
0
class ApplicationInsightsTelemetryClient(BotTelemetryClient):
    def __init__(self, instrumentation_key: str):
        self._instrumentation_key = instrumentation_key
        self._client = TelemetryClient(self._instrumentation_key)

        # Telemetry Processor
        def telemetry_processor(data, context):
            post_data = IntegrationPostData().activity_json
            # Override session and user id
            from_prop = post_data['from'] if 'from' in post_data else None
            user_id = from_prop['id'] if from_prop != None else None
            channel_id = post_data[
                'channelId'] if 'channelId' in post_data else None
            conversation = post_data[
                'conversation'] if 'conversation' in post_data else None
            conversation_id = conversation[
                'id'] if 'id' in conversation else None
            context.user.id = channel_id + user_id
            context.session.id = conversation_id

            # Additional bot-specific properties
            if 'activityId' in post_data:
                data.properties["activityId"] = post_data['activityId']
            if 'channelId' in post_data:
                data.properties["channelId"] = post_data['channelId']
            if 'activityType' in post_data:
                data.properties["activityType"] = post_data['activityType']

        self._client.add_telemetry_processor(telemetry_processor)

    def track_pageview(self,
                       name: str,
                       url: str,
                       duration: int = 0,
                       properties: Dict[str, object] = None,
                       measurements: Dict[str, object] = None) -> None:
        """
        Send information about the page viewed in the application (a web page for instance).
        :param name: the name of the page that was viewed.
        :param url: the URL of the page that was viewed.
        :param duration: the duration of the page view in milliseconds. (defaults to: 0)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_pageview(name, url, duration, properties,
                                    measurements)

    def track_exception(self,
                        type_exception: type = None,
                        value: Exception = None,
                        tb: traceback = None,
                        properties: Dict[str, object] = None,
                        measurements: Dict[str, object] = None) -> None:
        """ 
        Send information about a single exception that occurred in the application.
        :param type_exception: the type of the exception that was thrown.
        :param value: the exception that the client wants to send.
        :param tb: the traceback information as returned by :func:`sys.exc_info`.
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_exception(type_exception, value, tb, properties,
                                     measurements)

    def track_event(self,
                    name: str,
                    properties: Dict[str, object] = None,
                    measurements: Dict[str, object] = None) -> None:
        """ 
        Send information about a single event that has occurred in the context of the application.
        :param name: the data to associate to this event.
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_event(name, properties, measurements)

    def track_metric(self,
                     name: str,
                     value: float,
                     type: TelemetryDataPointType = None,
                     count: int = None,
                     min: float = None,
                     max: float = None,
                     std_dev: float = None,
                     properties: Dict[str, object] = None) -> NotImplemented:
        """
        Send information about a single metric data point that was captured for the application.
        :param name: The name of the metric that was captured.
        :param value: The value of the metric that was captured.
        :param type: The type of the metric. (defaults to: TelemetryDataPointType.aggregation`)
        :param count: the number of metrics that were aggregated into this data point. (defaults to: None)
        :param min: the minimum of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param max: the maximum of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param std_dev: the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        """
        self._client.track_metric(name, value, type, count, min, max, std_dev,
                                  properties)

    def track_trace(self,
                    name: str,
                    properties: Dict[str, object] = None,
                    severity=None):
        """
        Sends a single trace statement.
        :param name: the trace statement.\n
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)\n
        :param severity: the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
        """
        self._client.track_trace(name, properties, severity)

    def track_request(self,
                      name: str,
                      url: str,
                      success: bool,
                      start_time: str = None,
                      duration: int = None,
                      response_code: str = None,
                      http_method: str = None,
                      properties: Dict[str, object] = None,
                      measurements: Dict[str, object] = None,
                      request_id: str = None):
        """
        Sends a single request that was captured for the application.
        :param name: The name for this request. All requests with the same name will be grouped together.
        :param url: The actual URL for this request (to show in individual request instances).
        :param success: True if the request ended in success, False otherwise.
        :param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
        :param duration: the number of milliseconds that this request lasted. (defaults to: None)
        :param response_code: the response code that this request returned. (defaults to: None)
        :param http_method: the HTTP method that triggered this request. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        :param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
        """
        self._client.track_request(name, url, success, start_time, duration,
                                   response_code, http_method, properties,
                                   measurements, request_id)

    def track_dependency(self,
                         name: str,
                         data: str,
                         type: str = None,
                         target: str = None,
                         duration: int = None,
                         success: bool = None,
                         result_code: str = None,
                         properties: Dict[str, object] = None,
                         measurements: Dict[str, object] = None,
                         dependency_id: str = None):
        """
        Sends a single dependency telemetry that was captured for the application.
        :param name: the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template.
        :param data: the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters.
        :param type: the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None)
        :param target: the target site of a dependency call. Examples are server name, host address. (default to: None)
        :param duration: the number of milliseconds that this dependency call lasted. (defaults to: None)
        :param success: true if the dependency call ended in success, false otherwise. (defaults to: None)
        :param result_code: the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        :param id: the id for this dependency call. If None, a new uuid will be generated. (defaults to: None)
        """
        self._client.track_dependency(name, data, type, target, duration,
                                      success, result_code, properties,
                                      measurements, dependency_id)

    def flush(self):
        """Flushes data in the queue. Data in the queue will be sent either immediately irrespective of what sender is
        being used.
        """
        self._client.flush()
def main(msg: func.QueueMessage) -> None:
    """
    Main function, triggered by Azure Storage Queue, parsed queue content and
    try to download the databricks output metadata file to get each succefully processed file location.
    Then enqueue to ingest queue for ingestion to ADX on later Azure function.
    If the file is checkpoint compact file, the code will shrink the file size.
    :param msg: func.QueueMessage
    :return: None
    """
    code_start_time = time.time()
    logging.info('Python queue trigger function processed a queue item: %s',
                 msg.get_body().decode('utf-8'))
    # modify the log level of azure sdk requests
    logging.getLogger('azure').setLevel(logging.WARNING)
    init_config_values()

    tc = TelemetryClient(APPINSIGHTS_INSTRUMENTATIONKEY)
    tc.context.application.ver = '1.0'
    tc.context.properties["PROCESS_PROGRAM"] = PROCESS_PROGRAM_NAME
    tc.context.properties["PROCESS_START"] = time.time()

    # 1. Get trigger file content (rename event)
    content_json = json.loads(msg.get_body().decode('utf-8'))

    logging.info("meta-data event content: {}".format(msg.get_body().decode('utf-8')))
    file_url = content_json['data']['destinationUrl']
    logging.info(f"file_url: {file_url}")
    event_time = content_json['eventTime']

    # 2. Download metadata blob content
    logging.info(f"{HEADER} Download blob file from {file_url}")
    temp_blob_client = BlobClient.from_blob_url(blob_url=file_url, logging_enable=False)
    blob_path = temp_blob_client.blob_name
    container_name = temp_blob_client.container_name

    try:
        metadata_file_content = get_blob_content(container_name, blob_path)
    except Exception:
        logging.exception(f"Failed to download blob from url {file_url}")
        raise

    # 3. Parse split output file from the metadata
    queue_msg_list = generate_metadata_queue_messages(event_time, metadata_file_content)
    logging.info(
        f"{HEADER} Generate metadata queue_messages from {file_url}, {len(queue_msg_list)} messages")

    # 4. Loop to enqueue msg to ADX ingest queue
    queue_client_list = []
    for q_url in ADX_INGEST_QUEUE_URL_LIST:
        queue_client = get_queue_client(q_url)
        queue_client_list.append(queue_client)

    asyncio.set_event_loop(asyncio.new_event_loop())
    loop = asyncio.get_event_loop()
    tasks = gen_metadata_msg_enqueue_tasks(queue_msg_list, queue_client_list, tc)
    loop.run_until_complete(gather_with_concurrency(CONCURRENT_ENQUEUE_TASKS, tasks))
    close_queue_clients(queue_client_list, loop)
    loop.close()

    logging.info(f"{HEADER} Done queuing up messages to Ingestion queue")

    if file_url.endswith(".compact"): # reduce compact file size
        update_blob_content(container_name,
                            blob_path,
                            get_shrinked_checkpoint_content(
                                metadata_file_content, MAX_COMPACT_FILE_RECORDS))
        logging.info(f"{HEADER} Reduced checkpoint files {file_url}, max lines is {MAX_COMPACT_FILE_RECORDS}")

    code_duration = time.time() - code_start_time
    tc.track_event(METADATA_HANDLE_EVENT_NAME,
                   {'FILE_URL': file_url},
                   {METADATA_HANDLE_EVENT_NAME + '_DURATION_SEC': code_duration})
    tc.flush()
Example #51
0
    rkroot = str(uuid.uuid4())

    return { 'service': table_service, 'name': table_name, 'pk': pk, 'rk': rkroot }

def write_entry(table_settings, entry):
    if 'PartitionKey' not in entry:
        entry['PartitionKey'] = table_settings['pk']
    if 'RowKey' not in entry:
        entry['RowKey'] = table_settings['rk'] + str(time.time())
    table_settings['service'].insert_entity(table_settings['name'], entry)

def write_msg(table_settings, msg):
    write_entry(table_settings, { 'details': msg })

app = Flask(__name__)

@app.route("/")
def hello():
    return "Hello World!"

if __name__ == '__main__':
    tc = TelemetryClient()
    tc.context.instrumentationKey = os.environ['APPINSIGHTS_KEY']
    try:
        table_settings = init_table()
        write_msg(table_settings, 'Python version: %s' % sys.version)
    except Exception as e:
        print(e)
        tc.trackException(e, {"foo": "bar"}, {"x": 42})
    app.run()