Example #1
0
def main():
    config = configparser.ConfigParser()
    config.read(CRED_FILE)

    subscription_id = str(config['DEFAULT']['AZURE_SUBSCRIPTION_ID'])
    appinsights_name = str(config['DEFAULT']['APPINSIGHTS_NAME'])
    resource_group = config['DEFAULT']['RG_NAME']
    credentials = ServicePrincipalCredentials(
        client_id=config['DEFAULT']['azure_client_id'],
        secret=config['DEFAULT']['azure_client_secret'],
        tenant=config['DEFAULT']['azure_tenant_id'])

    try:
        resource_client = ResourceManagementClient(credentials,
                                                   subscription_id)
    except Exception as e:
        self.logger.error("Getting Azure Infra handlers failed %s" % str(e))
        raise e
    inst_key = get_appinsights_instr_key(resource_group, appinsights_name,
                                         credentials, subscription_id)
    tc = TelemetryClient(inst_key)

    #logger1.info("[INFO]: Instrumentation key used {}".format(inst_key))

    for metric in metric_list:
        logger1.info("[INFO]: Publishing metrics {}".format(metric))
        tc.track_metric(metric, 0)
        tc.flush()
        time.sleep(2)
    def test_track_metric_works_as_expected(self):
        def process(data, context):
            data.properties["NEW_PROP"] = "MYPROP"
            context.user.id = "BOTUSER"
            context.session.id = "BOTSESSION"
            return True

        sender = MockTelemetrySender()
        queue = channel.SynchronousQueue(sender)
        client = TelemetryClient(
            '99999999-9999-9999-9999-999999999999',
            channel.TelemetryChannel(context=None, queue=queue))
        client.add_telemetry_processor(process)
        client.context.device = None
        client.track_metric('metric', 42,
                            channel.contracts.DataPointType.aggregation, 13, 1,
                            123, 111, {'foo': 'bar'})
        client.flush()
        expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Metric", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "MetricData", "baseData": {"ver": 2, "metrics": [{"name": "metric", "kind": 1, "value": 42, "count": 13, "min": 1, "max": 123, "stdDev": 111}], "properties": {"NEW_PROP": "MYPROP", "foo": "bar"}}}}'
        sender.data.time = 'TIME_PLACEHOLDER'
        sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
        sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
        sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
        sender.data.tags[
            'ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
        sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
        actual = json.dumps(sender.data.write())
        self.maxDiff = None
        self.assertEqual(expected, actual)
def main():
    inst_key = sys.argv[1].strip()
    tc = TelemetryClient(inst_key)

    logger1.info("[INFO]: Instrumentation key used {}".format(inst_key))

    for metric in metric_list:
        logger1.info("[INFO]: Publishing metrics {}".format(metric))
        tc.track_metric(metric, 0)
        tc.flush()
        time.sleep(30)
 def test_track_metric_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.context.device = None
     client.track_metric('metric', 42, channel.contracts.DataPointType.aggregation, 13, 1, 123, 111, {'foo': 'bar'})
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Metric", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "MetricData", "baseData": {"ver": 2, "metrics": [{"name": "metric", "kind": 1, "value": 42, "count": 13, "min": 1, "max": 123, "stdDev": 111}], "properties": {"foo": "bar"}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
 def test_track_metric_works_as_expected(self):
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.context.device = None
     client.track_metric('metric', 42, channel.contracts.DataPointType.aggregation, 13, 1, 123, 111, {'foo': 'bar'})
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Metric", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER"}, "data": {"baseType": "MetricData", "baseData": {"ver": 2, "metrics": [{"name": "metric", "kind": 1, "value": 42, "count": 13, "min": 1, "max": 123, "stdDev": 111}], "properties": {"foo": "bar"}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
Example #6
0
class AppInsightsClient:
    def __init__(self):
        self._client = None

    def init_app(self, key, max_queue_length=0):
        if not key or len(key) == 0:
            return

        self._client = TelemetryClient(key)
        self._client.channel.queue.max_queue_length = max_queue_length

    def track_metric(self, name, value, properties=None):
        if not self._client:
            return

        self._client.track_metric(name, value, properties=properties)
Example #7
0
class MetricsOutput(Metrics):
    """Send metrics data to app insights
    """
    def __init__(self, ctx, config=None):
        super(MetricsOutput, self).__init__(ctx, config)
        self.namespace = self.ctx.policy.name
        self.tc = None

    def _initialize(self):
        if self.tc is not None:
            return
        self.instrumentation_key = AppInsightsHelper.get_instrumentation_key(
            self.config['url'])
        self.tc = TelemetryClient(self.instrumentation_key)
        self.subscription_id = local_session(
            self.ctx.policy.session_factory).get_subscription_id()

    def _format_metric(self, key, value, unit, dimensions):
        self._initialize()
        d = {
            'Name': key,
            'Value': value,
            'Dimensions': {
                'Policy': self.ctx.policy.name,
                'ResType': self.ctx.policy.resource_type,
                'SubscriptionId': self.subscription_id,
                'ExecutionId': self.ctx.execution_id,
                'ExecutionMode': self.ctx.policy.execution_mode,
                'Unit': unit
            }
        }
        for k, v in dimensions.items():
            d['Dimensions'][k] = v
        return d

    def _put_metrics(self, ns, metrics):
        self._initialize()
        for m in metrics:
            self.tc.track_metric(name=m['Name'],
                                 value=m['Value'],
                                 properties=m['Dimensions'])
        self.tc.flush()
class AppInsightsReporter(object):
    def __init__(self,
                 appinsightskey,
                 monitor_list,
                 upload_interval_seconds=60,
                 quiet=False):
        self.monitors = monitor_list
        self.telemetry = []
        self.telemetry_client = TelemetryClient(appinsightskey)
        self.upload_interval_seconds = upload_interval_seconds
        self.quiet = quiet

    def run(self):
        while (True):
            self.collect_metrics()
            self.upload_metrics()
            self.purge_metrics()

            time.sleep(self.upload_interval_seconds)

    def collect_metrics(self):
        for monitor in self.monitors:
            self.telemetry.extend(monitor.get_telemetry())

    def upload_metrics(self):
        # Can just upload our telemetry schema since it matches MS schema
        if not self.quiet:
            print("Uploading telemetry...")

        for datapoint in self.telemetry:
            self.telemetry_client.track_metric(datapoint.name, datapoint.value)

        self.telemetry_client.flush()

        if not self.quiet:
            print("Upload complete")

    def purge_metrics(self):
        self.telemetry = []
 def test_track_metric_works_as_expected(self):
     def process(data, context):
         data.properties["NEW_PROP"] = "MYPROP"
         context.user.id = "BOTUSER"
         context.session.id = "BOTSESSION"
         return True
     sender = MockTelemetrySender()
     queue = channel.SynchronousQueue(sender)
     client = TelemetryClient('99999999-9999-9999-9999-999999999999', channel.TelemetryChannel(context=None, queue=queue))
     client.add_telemetry_processor(process)
     client.context.device = None
     client.track_metric('metric', 42, channel.contracts.DataPointType.aggregation, 13, 1, 123, 111, {'foo': 'bar'})
     client.flush()
     expected = '{"ver": 1, "name": "Microsoft.ApplicationInsights.Metric", "time": "TIME_PLACEHOLDER", "sampleRate": 100.0, "iKey": "99999999-9999-9999-9999-999999999999", "tags": {"ai.device.id": "DEVICE_ID_PLACEHOLDER", "ai.device.locale": "DEVICE_LOCALE_PLACEHOLDER", "ai.device.osVersion": "DEVICE_OS_VERSION_PLACEHOLDER", "ai.device.type": "DEVICE_TYPE_PLACEHOLDER", "ai.internal.sdkVersion": "SDK_VERSION_PLACEHOLDER", "ai.session.id": "BOTSESSION", "ai.user.id": "BOTUSER"}, "data": {"baseType": "MetricData", "baseData": {"ver": 2, "metrics": [{"name": "metric", "kind": 1, "value": 42, "count": 13, "min": 1, "max": 123, "stdDev": 111}], "properties": {"NEW_PROP": "MYPROP", "foo": "bar"}}}}'
     sender.data.time = 'TIME_PLACEHOLDER'
     sender.data.tags['ai.internal.sdkVersion'] = 'SDK_VERSION_PLACEHOLDER'
     sender.data.tags['ai.device.id'] = "DEVICE_ID_PLACEHOLDER"
     sender.data.tags['ai.device.locale'] = "DEVICE_LOCALE_PLACEHOLDER"
     sender.data.tags['ai.device.osVersion'] = "DEVICE_OS_VERSION_PLACEHOLDER"
     sender.data.tags['ai.device.type'] = "DEVICE_TYPE_PLACEHOLDER"
     actual = json.dumps(sender.data.write())
     self.maxDiff = None
     self.assertEqual(expected, actual)
test_id = os.environ.get("TEST_ID", str(uuid.uuid4()))

instrumentation_key = os.environ.get("APPINSIGHTS_INSTRUMENTATIONKEY")
telemetry_client = None
if instrumentation_key:
    telemetry_client = TelemetryClient(instrumentation_key)

print("Test run for '{}' started.".format(test_id))

queries_total = int(os.environ.get("QUERIES_TOTAL", -1))
queries_executed = 0

while queries_executed < queries_total or queries_total < 0:
    raw_query = get_query()
    print("\nTest '{}' executing #{}:\n{}\n".format(test_id, queries_executed,
                                                    raw_query))

    t = timeit.Timer(functools.partial(cursor.execute, raw_query))
    query_time = t.timeit(number=1)

    print("Query took: {:.2f} seconds".format(query_time))
    queries_executed += 1

    if telemetry_client:
        telemetry_client.track_metric("query_time",
                                      query_time,
                                      properties={"test_id": test_id})
        telemetry_client.flush()

print("Test run for '{}' ended.".format(test_id))
class AI4EAppInsights(object):
    def __init__(self):
        self.grantee_key = None
        raw_key = getenv(CONF_KEY_GRANTEE, None)
        if (raw_key and len(raw_key.strip()) > 0):
            self.grantee_key = raw_key.strip()

        if (self.grantee_key):
            self.sender = AsynchronousSender()
            self.r_queue = AsynchronousQueue(self.sender)
            self.r_context = AI4ETelemetryContext()
            self.r_channel = TelemetryChannel(self.r_context, self.r_queue)

            self.appinsights_grantee_client = TelemetryClient(
                getenv(CONF_KEY_GRANTEE), self.r_channel)
            self.appinsights_ai4e_client = None

            if (getenv(CONF_KEY_AI4E)):
                self.appinsights_ai4e_client = TelemetryClient(
                    getenv(CONF_KEY_AI4E), self.r_channel)

    def _log(self, message, sev, taskId=None, additionalProperties=None):
        if (self.grantee_key):
            if (taskId):
                if (additionalProperties is None):
                    additionalProperties = {'task_id': taskId}
                else:
                    additionalProperties['task_id'] = taskId

            self.appinsights_grantee_client.track_trace(
                message, severity=sev, properties=additionalProperties)
            self.appinsights_grantee_client.flush()

            if (self.appinsights_ai4e_client):
                self.appinsights_ai4e_client.track_trace(
                    message, severity=sev, properties=additionalProperties)
                self.appinsights_ai4e_client.flush()

    def log_debug(self, message, taskId=None, additionalProperties=None):
        self._log(message, "DEBUG", taskId, additionalProperties)

    def log_info(self, message, taskId=None, additionalProperties=None):
        self._log(message, "INFO", taskId, additionalProperties)

    def log_error(self, message, taskId=None, additionalProperties=None):
        self._log(message, "ERROR", taskId, additionalProperties)

    def log_warn(self, message, taskId=None, additionalProperties=None):
        self._log(message, "WARNING", taskId, additionalProperties)

    def log_exception(self, message, taskId=None, additionalProperties=None):
        self._log(message, "CRITICAL", taskId, additionalProperties)

    def track_metric(self, metric_name, metric_value):
        if (self.grantee_key):
            print("Tracking metric:" + metric_name + ", Value: " +
                  str(metric_value))
            self.appinsights_grantee_client.track_metric(
                metric_name, metric_value)
            self.appinsights_grantee_client.flush()

            if (self.appinsights_ai4e_client):
                self.appinsights_ai4e_client.track_metric(
                    metric_name, metric_value)
                self.appinsights_ai4e_client.flush()
Example #12
0
def main():
    command = 'az login --service-principal -u ' + sys.argv[
        1] + ' -p ' + sys.argv[2] + ' --tenant ' + sys.argv[3]
    logger1.info("[INFO]: Logging in {}".format(command))
    process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
    proc_stdout = process.communicate()[0].strip()
    #y = json.loads(proc_stdout)
    logger1.info("[INFO]: output of az login {}".format(proc_stdout))
    command = 'az resource show -g ' + sys.argv[
        7] + ' --resource-type microsoft.insights/components -n ' + sys.argv[
            6] + ' --query properties.InstrumentationKey -o tsv'
    logger1.info("[INFO]: Show resources {}".format(command))
    #inst_key = subprocess.check_output(shlex.split(command)).rstrip()
    process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
    inst_key = process.communicate()[0].strip()
    logger1.info("[INFO]: output of az resource show {}".format(inst_key))

    logger1.info("[INFO]: publishing metrics {}".format(metric_list))
    tc = TelemetryClient(inst_key.rstrip())

    tc.track_metric('DataPlaneCPUUtilizationPct', 0)
    tc.flush()
    tc.track_metric('DataPlaneCPUUtilizationPct', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('panGPGatewayUtilizationPct', 0)
    tc.flush()
    tc.track_metric('panGPGatewayUtilizationPct', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('panGPGWUtilizationActiveTunnels', 0)
    tc.flush()
    tc.track_metric('panGPGWUtilizationActiveTunnels', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('DataPlanePacketBufferUtilization', 0)
    tc.flush()
    tc.track_metric('DataPlanePacketBufferUtilization', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('panSessionActive', 0)
    tc.flush()
    tc.track_metric('panSessionActive', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('panSessionSslProxyUtilization', 0)
    tc.flush()
    tc.track_metric('panSessionSslProxyUtilization', 0)
    tc.flush()
    time.sleep(10)
    tc.track_metric('panSessionUtilization', 0)
    tc.flush()
    tc.track_metric('panSessionUtilization', 0)
    tc.flush()
    time.sleep(10)
class AppinsightsBotTelemetryClient(BotTelemetryClient):
    def __init__(self, instrumentation_key: str):
        self._instrumentation_key = instrumentation_key

        self._context = TelemetryContext()
        context.instrumentation_key = self._instrumentation_key
        # context.user.id = 'BOTID'        # telemetry_channel.context.session.
        # context.session.id = 'BOTSESSION'

        # set up channel with context
        self._channel = TelemetryChannel(context)
        # telemetry_channel.context.properties['my_property'] = 'my_value'

        self._client = TelemetryClient(self._instrumentation_key,
                                       self._channel)

    def track_pageview(self,
                       name: str,
                       url: str,
                       duration: int = 0,
                       properties: Dict[str, object] = None,
                       measurements: Dict[str, object] = None) -> None:
        """
        Send information about the page viewed in the application (a web page for instance).
        :param name: the name of the page that was viewed.
        :param url: the URL of the page that was viewed.
        :param duration: the duration of the page view in milliseconds. (defaults to: 0)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_pageview(name, url, duration, properties,
                                    measurements)

    def track_exception(self,
                        type_exception: type = None,
                        value: Exception = None,
                        tb: traceback = None,
                        properties: Dict[str, object] = None,
                        measurements: Dict[str, object] = None) -> None:
        """ 
        Send information about a single exception that occurred in the application.
        :param type_exception: the type of the exception that was thrown.
        :param value: the exception that the client wants to send.
        :param tb: the traceback information as returned by :func:`sys.exc_info`.
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_exception(type_exception, value, tb, properties,
                                     measurements)

    def track_event(self,
                    name: str,
                    properties: Dict[str, object] = None,
                    measurements: Dict[str, object] = None) -> None:
        """ 
        Send information about a single event that has occurred in the context of the application.
        :param name: the data to associate to this event.
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_event(name, properties, measurements)

    def track_metric(self,
                     name: str,
                     value: float,
                     type: TelemetryDataPointType = None,
                     count: int = None,
                     min: float = None,
                     max: float = None,
                     std_dev: float = None,
                     properties: Dict[str, object] = None) -> NotImplemented:
        """
        Send information about a single metric data point that was captured for the application.
        :param name: The name of the metric that was captured.
        :param value: The value of the metric that was captured.
        :param type: The type of the metric. (defaults to: TelemetryDataPointType.aggregation`)
        :param count: the number of metrics that were aggregated into this data point. (defaults to: None)
        :param min: the minimum of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param max: the maximum of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param std_dev: the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        """
        self._client.track_metric(name, value, type, count, min, max, std_dev,
                                  properties)

    def track_trace(self,
                    name: str,
                    properties: Dict[str, object] = None,
                    severity=None):
        """
        Sends a single trace statement.
        :param name: the trace statement.\n
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)\n
        :param severity: the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
        """
        self._client.track_trace(name, properties, severity)

    def track_request(self,
                      name: str,
                      url: str,
                      success: bool,
                      start_time: str = None,
                      duration: int = None,
                      response_code: str = None,
                      http_method: str = None,
                      properties: Dict[str, object] = None,
                      measurements: Dict[str, object] = None,
                      request_id: str = None):
        """
        Sends a single request that was captured for the application.
        :param name: The name for this request. All requests with the same name will be grouped together.
        :param url: The actual URL for this request (to show in individual request instances).
        :param success: True if the request ended in success, False otherwise.
        :param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
        :param duration: the number of milliseconds that this request lasted. (defaults to: None)
        :param response_code: the response code that this request returned. (defaults to: None)
        :param http_method: the HTTP method that triggered this request. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        :param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
        """
        self._client.track_request(name, url, success, start_time, duration,
                                   response_code, http_method, properties,
                                   measurements, request_id)

    def track_dependency(self,
                         name: str,
                         data: str,
                         type: str = None,
                         target: str = None,
                         duration: int = None,
                         success: bool = None,
                         result_code: str = None,
                         properties: Dict[str, object] = None,
                         measurements: Dict[str, object] = None,
                         dependency_id: str = None):
        """
        Sends a single dependency telemetry that was captured for the application.
        :param name: the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template.
        :param data: the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters.
        :param type: the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None)
        :param target: the target site of a dependency call. Examples are server name, host address. (default to: None)
        :param duration: the number of milliseconds that this dependency call lasted. (defaults to: None)
        :param success: true if the dependency call ended in success, false otherwise. (defaults to: None)
        :param result_code: the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        :param id: the id for this dependency call. If None, a new uuid will be generated. (defaults to: None)
        """
        self._client.track_dependency(name, data, type, target, duration,
                                      success, result_code, properties,
                                      measurements, dependency_id)
Example #14
0
class Telemetry(metaclass=Singleton):
    """Singleton class that handles telemetry sending to AppInsights."""
    def __init__(self, toggle):
        """Initialize Telemetry instance."""
        self._toggle = toggle
        if self._toggle:
            self._telemetry_client = TelemetryClient(APP_INSIGHTS_KEY)
            self._telemetry_channel = self._setup_telemetry_channel()
            print("Telemetry enabled.")
        else:
            self._telemetry_client = None
            self._telemetry_channel = None
            print("Telemetry disabled.")

    def track_event(self, name, properties=None, measurements=None):
        """Track a telemetry event."""
        try:
            self._telemetry_client.track_event(name, properties, measurements)
        except AttributeError:
            print(f"Telemetry Disabled: Event Name: {name}")
            print(f"properties: {properties}")
            print(f"measurements: {measurements}")

    def track_metric(self,
                     name,
                     value,
                     type=None,
                     count=None,
                     min=None,
                     max=None,
                     std_dev=None,
                     properties=None):
        """Track a telemetry metric."""
        try:
            self._telemetry_client.track_metric(name, value, type, count, min,
                                                max, std_dev, properties)
        except AttributeError:
            print(f"Telemetry Disabled: Metric Name: {name}")
            print(f"value: {value}")
            if type:
                print(f"type: {type}")
            if count:
                print(f"count: {count}")
            if min:
                print(f"min: {min}")
            if max:
                print(f"max: {max}")
            if std_dev:
                print(f"std_dev: {std_dev}")
            if properties:
                print(f"properties: {properties}")

    def flush(self):
        """Flush the telemetry client info to AppInsights."""
        try:
            self._telemetry_client.flush()
        except AttributeError:
            pass

    def _setup_telemetry_channel(self):
        """Create telemetry_channel object.

        Instantiates a telemetry channel that collects unhandled exceptions.

        Return:
            telemetry_channel

        """
        from applicationinsights.exceptions import enable
        from applicationinsights import channel

        # set up channel with context
        telemetry_channel = channel.TelemetryChannel()
        telemetry_channel.context.application.ver = get_version()
        # set up exception capture
        telemetry_channel.context.properties['capture'] = 'exceptions'
        enable(APP_INSIGHTS_KEY, telemetry_channel=telemetry_channel)

        return telemetry_channel
Example #15
0
def merge_rename_core_columns_CSV(vm_uuid, deploy_uuid, config_uuid,
                                  schema_ver, inject_ver, container_name,
                                  filesrootfolder, fileoutputfolder,
                                  process_id):
    #block_blob_service = BlockBlobService(account_name=SOURCE_CSV_BLOB_ACCOUNT,  sas_token=SOURCE_CSV_BLOB_TOKEN)
    block_blob_service = BlockBlobService(account_name=SOURCE_CSV_BLOB_ACCOUNT,
                                          account_key=SOURCE_CSV_BLOB_KEY)
    tc = TelemetryClient('')
    print("Start merge CSV ", vm_uuid, ' ', deploy_uuid, ' ', config_uuid)

    blobs = []
    marker = None
    while True:
        batch = block_blob_service.list_blobs(container_name,
                                              prefix=filesrootfolder)
        blobs.extend(batch)
        if not batch.next_marker:
            break
        marker = batch.next_marker
    i = 0
    blobpaths = []
    for blob in blobs:
        blobpaths.append(blob.name)

    matchers = ['.csv']
    matching = [s for s in blobpaths if any(xs in s for xs in matchers)]

    mergelog = {}
    mergelog["vm_uuid"] = vm_uuid

    mergelog["process_type"] = "MERGE_METRIC_CSV"
    mergelog["DOC_TYPE"] = "MERGE_METRIC_FILES_LOG"
    mergelog["file_folder"] = filesrootfolder
    mergelog["process_time"] = time.time()
    mergelog["files"] = []
    mergelog["defect_files"] = []

    a_mergelog = copy.deepcopy(mergelog)

    dfagg = pd.DataFrame(columns=[])

    mixagg = AGGREGATION_FILES_NUM
    aggcount = 0
    aggcount_total = 0
    aggoutcount = 0
    aggsize = 0

    error_files = []
    merged_files = []
    totoal_rows = 0
    alldfs = []
    outfilenamebase = fileoutputfolder + filesrootfolder + "_aggr_"
    t1 = time.time()
    #print (outfilenamebase)
    source_col = ['']
    target_col = ['']

    tc.track_trace('Prepare to process ' + str(len(matching)) +
                   '  Metric CSV files ')
    tc.flush()

    for fname in matching:
        #print(aggcount)

        head, tail = os.path.split(fname)

        aggcount += 1
        aggcount_total += 1

        blobstring = block_blob_service.get_blob_to_text(
            container_name, fname).content
        aggsize += len(blobstring)

        #print('Prepare to merge '+str(aggcount_total)+' / '+str(len(matching)) +' Memeory '+str(aggsize)+' File Name: '+tail)
        #tc.track_trace('Prepare to merge '+tail)
        #tc.flush()

        try:  # Rread CSV And Try Processing

            dfone = pd.read_csv(StringIO(blobstring))

            dfAll_cols = dfone.columns
            #colname0=dfAll_cols
            dfAll_newcols = []

            pc_name = re.search(r'(\\{2}.*\\)(.*\\)', dfAll_cols[1]).group(1)

            for col in dfAll_cols:
                dfAll_newcols.append(
                    col.replace(pc_name, '').replace('`', '').replace(
                        '\\', '').replace(' ', '').replace('/', '').replace(
                            '.', '').replace('-', '').replace('%', '').replace(
                                '(', '').replace(')', ''))

            dfAll_newcols[0] = "Universal_datetime"

            # Rename all columns
            dfone.columns = dfAll_newcols

            alldfs.append(dfone)
            a_mergelog['files'].append(tail)

            #if (aggcount>=mixagg) or (aggcount_total==len(matching)):
            if (aggsize > MAX_FILESIZE) or (aggcount_total == len(matching)):
                if (aggcount_total == len(matching)):
                    print("Processing Final File")
                    tc.track_trace('Processing Final File')
                    tc.flush()

                alldfs.append(pd.DataFrame(columns=source_col))
                dfagg = pd.concat(alldfs, ignore_index=True)
                dfagg_out = dfagg[source_col]
                dfagg_out.columns = target_col
                dfagg_out['schema_ver'] = schema_ver
                dfagg_out['inject_ver'] = inject_ver
                output = dfagg_out.to_csv(index=False, encoding="utf-8")
                outfile = outfilenamebase + str(aggoutcount) + ".csv"
                block_blob_service.create_blob_from_text(
                    container_name, outfile, output)
                print(
                    "Output aggregated file to " + container_name,
                    outfile + " Data Shape " + str(dfagg.shape) + ' uuid: ' +
                    str(vm_uuid) + str(deploy_uuid) + str(config_uuid))
                totoal_rows += dfagg_out.shape[0]

                merged_files.append(outfile)

                a_mergelog['output_file'] = outfile
                a_mergelog['merged_files_num'] = len(a_mergelog['files'])
                a_mergelog['defect_files_num'] = len(
                    a_mergelog['defect_files'])

                # Insert Process Log to COSMOS DB
                insert_json_cosmos(a_mergelog)
                a_mergelog = copy.deepcopy(mergelog)
                t2 = time.time()

                print(("It takes %s seconds to merge " + str(aggcount) +
                       " CSV Metrics") % (t2 - t1))
                aggoutcount += 1
                aggcount = 0
                aggsize = 0
                alldfs = []
                t1 = time.time()
                file_size = BlockBlobService.get_blob_properties(
                    block_blob_service, container_name,
                    outfile).properties.content_length
                print(outfile + "  File Size " + str(file_size))

                # Ingest to AXX
                ingest_to_ADX(outfile, file_size)
        except Exception as e:
            print('Error While process ' + fname)
            error_class = e.__class__.__name__
            detail = e.args[0]
            cl, exc, tb = sys.exc_info()
            lastCallStack = traceback.extract_tb(tb)[-1]
            fileName = lastCallStack[0]
            lineNum = lastCallStack[1]
            funcName = lastCallStack[2]
            errMsg = "File \"{}\", line {}, in {}: [{}] {}".format(
                fileName, lineNum, funcName, error_class, detail)

            print("Unexpected error:", sys.exc_info()[0])
            traceback.print_exc()

            msg = errMsg + traceback.format_exc()

            tc = TelemetryClient('')
            tc.context.application.ver = '1.0'
            tc.context.properties["PROCESS_PROGRAM"] = "BATCH_METRIC_CSV_V001a"
            tc.context.properties["DATA_FOLDER"] = metricspath
            tc.track_trace(msg)

            tc.flush()
            # print("Unexpected error:", sys.exc_info()[0])
            a_mergelog["defect_files"].append(tail)
            error_files.append(fname)  # Add No-Well Formed JSON to error file
    print('Total Rows ' + str(totoal_rows))

    tc.track_trace('Proccessed Rows: ' + str(totoal_rows))
    tc.track_metric('BATHCH_INGEST_METRIC_CSV_TOTAL_ROWS', str(totoal_rows))
    tc.flush()
    return error_files, merged_files, matching
Example #16
0
def process(filesrootfolder, forceinsert):

    # Create process id as identify of this process
    process_id = time.time()

    tc = TelemetryClient('')

    tc.context.application.ver = '1.0'
    tc.context.properties["PROCESS_PROGRAM"] = "BATCH_CSV_V001a"
    tc.context.properties["PROCESS_START"] = time.time()
    tc.context.properties["DATA_FOLDER"] = filesrootfolder
    tc.context.properties["PROCESS_ID"] = process_id

    tc.track_trace('STRAT RUN BATHCH INGEST  CSV DATA from folder ' +
                   filesrootfolder)
    tc.track_event('BATHCH_INGEST_CSV_START', {
        'PROCESS_ID': process_id,
        'DATA_FOLDER': filesrootfolder
    }, {})
    tc.flush()

    tc.flush()
    #print (vm_uuid,deploy_uuid,config_uuid)

    # Prepare COSMOS Link

    url = COSMOS_URL
    #key = os.environ['ACCOUNT_KEY']
    key = COSMOS_KEY
    client = cosmos_client.CosmosClient(url, {'masterKey': key})
    database_id = COSMOS_DATABASE
    container_id = COSMOS_CONTAINER

    database_link = 'dbs/' + database_id
    collection_link = database_link + '/colls/' + container_id

    doc_id = vm_uuid + '_' + config_uuid + '_' + deploy_uuid + '_Metric'
    doc_link = collection_link + '/docs/' + doc_id

    options = {}
    options['enableCrossPartitionQuery'] = True
    options['maxItemCount'] = 5
    options['partitionKey'] = vm_uuid

    proc_log_doc = None
    try:
        proc_log_doc = client.ReadItem(doc_link, options)
    except:
        print("New Process  Metric Doc")

    if (proc_log_doc is not None):
        print("Find Existing  Metric Doc ")

        if str(forceinsert).lower(
        ) != 'true':  # Stop Proccess if data is already been proccessed
            return 400, doc_id + " is already been processed"

    else:  # New process log
        proc_log_doc = {}
        proc_log_doc["PROCESSES"] = []
        proc_log_doc["DOC_TYPE"] = "PROCESS_METRIC"
        proc_log_doc["PROCESS_PROGRAM"] = "BATCH_METRIC_CSV_V001a"
        proc_log_doc['id'] = doc_id

    tc.track_event('BATHCH_INGEST_METRIC_CSV', {'PROCESS_ID': process_id},
                   {'DATA_FOLDER': filesrootfolder})
    #+'_'+config_uuid+'_'+deploy_uuid , { 'DATA_FOLDER': telemetriespath }
    tc.flush()
    proc_log_this = {}
    proc_log_this["PROCESS_PROGRAM"] = "BATCH_METRIC_CSV_V001a"
    proc_log_this["PROCESS_START"] = time.time()
    proc_log_this["DATA_FOLDER"] = filesrootfolder
    proc_log_this[
        'id'] = vm_uuid + '_' + config_uuid + '_' + deploy_uuid + '_' + str(
            process_id)

    error_files, merged_files, source_files = merge_rename_core_columns_CSV(
        vm_uuid, deploy_uuid, config_uuid, 'defualt_metrics_csv_001A', 0,
        SOURCE_CSV_CONTAINER, filesrootfolder, FILE_OUTPUT_FOLDER, process_id)

    # ToDo  ...
    proc_log_this["PROCESS_ID"] = process_id
    proc_log_this["ERROR_SOURCE_FILES_COUNT"] = len(error_files)
    proc_log_this["SOURCE_FILES_COUNT"] = len(source_files)

    tc.track_metric('BATHCH_INGEST_CSV_ERROR_SOURCE_FILES_COUNT',
                    len(error_files))
    tc.track_metric('BATHCH_INGEST_CSV_ERROR_SOURCE_SOURCE_FILES_COUNT',
                    len(source_files))
    tc.flush()

    # print(str(len(error_files)),'  ',str(len(merged_files)))

    proc_log_this["PROCESS_END"] = time.time()
    proc_log_this["STATUS"] = "OK"

    proc_log_this["STATUS_MESSAGE"] = (
        "It takes %s seconds to ingest  CSV file from Blob Storage") % (
            proc_log_this["PROCESS_END"] - proc_log_this["PROCESS_START"])

    proc_log_doc["PROCESSES"].append(proc_log_this)
    proc_log_doc['LATEST_UPDATE_TIMESTAMP'] = time.time()

    # Update Process Log
    client.UpsertItem(collection_link, proc_log_doc, options)

    tc.track_trace('END RUN BATHCH INGEST  METRIC CSV DATA from folder ' +
                   filesrootfolder)

    tc.track_event('BATHCH_INGEST_METRIC_CSV_END', {
        'PROCESS_ID': process_id,
        'DATA_FOLDER': filesrootfolder
    }, {
        'DEFECT_FILES_COUNT': len(error_files),
        'MERGED_FILES_COUNT': len(merged_files),
        'SOURCE_FILES_COUNT': len(source_files)
    })
    tc.flush()
Example #17
0
#Sending an event telemetry item with custom properties and measurements
from applicationinsights import TelemetryClient
tc = TelemetryClient('<YOUR INSTRUMENTATION KEY GOES HERE>')
tc.track_event('Test event', { 'foo': 'bar' }, { 'baz': 42 })
tc.flush()

#Sending a trace telemetry item with custom properties
from applicationinsights import TelemetryClient
tc = TelemetryClient('<YOUR INSTRUMENTATION KEY GOES HERE>')
tc.track_trace('Test trace', { 'foo': 'bar' })
tc.flush()

#Sending a metric telemetry item
from applicationinsights import TelemetryClient
tc = TelemetryClient('<YOUR INSTRUMENTATION KEY GOES HERE>')
tc.track_metric('My Metric', 42)
tc.flush()

#Sending an exception telemetry item with custom properties and measurements
import sys
from applicationinsights import TelemetryClient
tc = TelemetryClient('<YOUR INSTRUMENTATION KEY GOES HERE>')
try:
    raise Exception('blah')
except:
    tc.track_exception()

try:
    raise Exception("blah")
except:
    tc.track_exception(*sys.exc_info(), properties={ 'foo': 'bar' }, measurements={ 'x': 42 })
Example #18
0
class ApplicationInsightsTelemetryClient(BotTelemetryClient):
    def __init__(self, instrumentation_key: str):
        self._instrumentation_key = instrumentation_key
        self._client = TelemetryClient(self._instrumentation_key)

        # Telemetry Processor
        def telemetry_processor(data, context):
            post_data = IntegrationPostData().activity_json
            # Override session and user id
            from_prop = post_data['from'] if 'from' in post_data else None
            user_id = from_prop['id'] if from_prop != None else None
            channel_id = post_data[
                'channelId'] if 'channelId' in post_data else None
            conversation = post_data[
                'conversation'] if 'conversation' in post_data else None
            conversation_id = conversation[
                'id'] if 'id' in conversation else None
            context.user.id = channel_id + user_id
            context.session.id = conversation_id

            # Additional bot-specific properties
            if 'activityId' in post_data:
                data.properties["activityId"] = post_data['activityId']
            if 'channelId' in post_data:
                data.properties["channelId"] = post_data['channelId']
            if 'activityType' in post_data:
                data.properties["activityType"] = post_data['activityType']

        self._client.add_telemetry_processor(telemetry_processor)

    def track_pageview(self,
                       name: str,
                       url: str,
                       duration: int = 0,
                       properties: Dict[str, object] = None,
                       measurements: Dict[str, object] = None) -> None:
        """
        Send information about the page viewed in the application (a web page for instance).
        :param name: the name of the page that was viewed.
        :param url: the URL of the page that was viewed.
        :param duration: the duration of the page view in milliseconds. (defaults to: 0)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_pageview(name, url, duration, properties,
                                    measurements)

    def track_exception(self,
                        type_exception: type = None,
                        value: Exception = None,
                        tb: traceback = None,
                        properties: Dict[str, object] = None,
                        measurements: Dict[str, object] = None) -> None:
        """ 
        Send information about a single exception that occurred in the application.
        :param type_exception: the type of the exception that was thrown.
        :param value: the exception that the client wants to send.
        :param tb: the traceback information as returned by :func:`sys.exc_info`.
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_exception(type_exception, value, tb, properties,
                                     measurements)

    def track_event(self,
                    name: str,
                    properties: Dict[str, object] = None,
                    measurements: Dict[str, object] = None) -> None:
        """ 
        Send information about a single event that has occurred in the context of the application.
        :param name: the data to associate to this event.
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        """
        self._client.track_event(name, properties, measurements)

    def track_metric(self,
                     name: str,
                     value: float,
                     type: TelemetryDataPointType = None,
                     count: int = None,
                     min: float = None,
                     max: float = None,
                     std_dev: float = None,
                     properties: Dict[str, object] = None) -> NotImplemented:
        """
        Send information about a single metric data point that was captured for the application.
        :param name: The name of the metric that was captured.
        :param value: The value of the metric that was captured.
        :param type: The type of the metric. (defaults to: TelemetryDataPointType.aggregation`)
        :param count: the number of metrics that were aggregated into this data point. (defaults to: None)
        :param min: the minimum of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param max: the maximum of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param std_dev: the standard deviation of all metrics collected that were aggregated into this data point. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        """
        self._client.track_metric(name, value, type, count, min, max, std_dev,
                                  properties)

    def track_trace(self,
                    name: str,
                    properties: Dict[str, object] = None,
                    severity=None):
        """
        Sends a single trace statement.
        :param name: the trace statement.\n
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)\n
        :param severity: the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
        """
        self._client.track_trace(name, properties, severity)

    def track_request(self,
                      name: str,
                      url: str,
                      success: bool,
                      start_time: str = None,
                      duration: int = None,
                      response_code: str = None,
                      http_method: str = None,
                      properties: Dict[str, object] = None,
                      measurements: Dict[str, object] = None,
                      request_id: str = None):
        """
        Sends a single request that was captured for the application.
        :param name: The name for this request. All requests with the same name will be grouped together.
        :param url: The actual URL for this request (to show in individual request instances).
        :param success: True if the request ended in success, False otherwise.
        :param start_time: the start time of the request. The value should look the same as the one returned by :func:`datetime.isoformat()` (defaults to: None)
        :param duration: the number of milliseconds that this request lasted. (defaults to: None)
        :param response_code: the response code that this request returned. (defaults to: None)
        :param http_method: the HTTP method that triggered this request. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        :param request_id: the id for this request. If None, a new uuid will be generated. (defaults to: None)
        """
        self._client.track_request(name, url, success, start_time, duration,
                                   response_code, http_method, properties,
                                   measurements, request_id)

    def track_dependency(self,
                         name: str,
                         data: str,
                         type: str = None,
                         target: str = None,
                         duration: int = None,
                         success: bool = None,
                         result_code: str = None,
                         properties: Dict[str, object] = None,
                         measurements: Dict[str, object] = None,
                         dependency_id: str = None):
        """
        Sends a single dependency telemetry that was captured for the application.
        :param name: the name of the command initiated with this dependency call. Low cardinality value. Examples are stored procedure name and URL path template.
        :param data: the command initiated by this dependency call. Examples are SQL statement and HTTP URL with all query parameters.
        :param type: the dependency type name. Low cardinality value for logical grouping of dependencies and interpretation of other fields like commandName and resultCode. Examples are SQL, Azure table, and HTTP. (default to: None)
        :param target: the target site of a dependency call. Examples are server name, host address. (default to: None)
        :param duration: the number of milliseconds that this dependency call lasted. (defaults to: None)
        :param success: true if the dependency call ended in success, false otherwise. (defaults to: None)
        :param result_code: the result code of a dependency call. Examples are SQL error code and HTTP status code. (defaults to: None)
        :param properties: the set of custom properties the client wants attached to this data item. (defaults to: None)
        :param measurements: the set of custom measurements the client wants to attach to this data item. (defaults to: None)
        :param id: the id for this dependency call. If None, a new uuid will be generated. (defaults to: None)
        """
        self._client.track_dependency(name, data, type, target, duration,
                                      success, result_code, properties,
                                      measurements, dependency_id)

    def flush(self):
        """Flushes data in the queue. Data in the queue will be sent either immediately irrespective of what sender is
        being used.
        """
        self._client.flush()