def _process_metrics(self, paths, app, start, end): """ Processes metrics returned from a get_metrics() api call. Arguments: paths - list of paths returned from get_metric_paths() app - the application object start - the start datetime object end - the end datetime object """ for path in paths: if utils.CANCEL_WORKERS_EVENT.is_set(): break self.logger.info('[%s] Getting \'%s\' metrics for %s - %s', app.name, path, start, end) metrics = self.appd_client.get_metrics( path, app.id, 'BETWEEN_TIMES', None, long(utils.unix_time_seconds(start) * 1000), long(utils.unix_time_seconds(end) * 1000), False) for metric in metrics: if utils.CANCEL_WORKERS_EVENT.is_set(): break for value in metric.values: self.send_metric(self.config.namespace + '|' + path, value.current, 'appd', # the source name long(value.start_time_ms / 1000), None, # tags self.config.get_value_to_send)
def _process_metrics(self, paths, app, start, end): """ Processes metrics returned from a get_metrics() api call. Arguments: paths - list of paths returned from get_metric_paths() app - the application object start - the start datetime object end - the end datetime object """ for path in paths: if utils.CANCEL_WORKERS_EVENT.is_set(): break self.logger.info('[%s] Getting \'%s\' metrics for %s - %s', app.name, path, start, end) metrics = self.appd_client.get_metrics( path, app.id, 'BETWEEN_TIMES', None, long(utils.unix_time_seconds(start) * 1000), long(utils.unix_time_seconds(end) * 1000), False) for metric in metrics: if utils.CANCEL_WORKERS_EVENT.is_set(): break for value in metric.values: self.send_metric( self.config.namespace + '|' + path, value.current, 'appd', # the source name long(value.start_time_ms / 1000), None, # tags self.config.get_value_to_send)
def send_metric(self, name, value, host, timestamp, tags=None, value_translator=None): """ Sends the metric to writer. Arguments: name - the metric name value - the numeric value host - the source/host timestamp - the timestamp (epoch seconds) or datetime object tags - dictionary of tags value_translator - function pointer to function that will translate value from current form to something else """ if not isinstance(timestamp, numbers.Number): parsed_date = datetime.datetime.strptime( timestamp, '%Y-%m-%dT%H:%M:%S+00:00') parsed_date = parsed_date.replace(tzinfo=dateutil.tz.tzutc()) timestamp = utils.unix_time_seconds(parsed_date) if value_translator: value = value_translator(name, value) if value is None: return attempts = 0 while attempts < 5 and not utils.CANCEL_WORKERS_EVENT.is_set(): try: self.proxy.transmit_metric( self.config.namespace + '.' + utils.sanitize_name(name), value, int(timestamp), host, tags) break except: attempts = attempts + 1 self.logger.warning('Failed to transmit metric %s: %s', name, str(sys.exc_info())) if not utils.CANCEL_WORKERS_EVENT.is_set(): time.sleep(1)
def send_metric(self, name, value, host, timestamp, tags=None, value_translator=None): """ Sends the metric to writer. Arguments: name - the metric name value - the numeric value host - the source/host timestamp - the timestamp (epoch seconds) or datetime object tags - dictionary of tags value_translator - function pointer to function that will translate value from current form to something else """ if not isinstance(timestamp, numbers.Number): parsed_date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S+00:00') parsed_date = parsed_date.replace(tzinfo=dateutil.tz.tzutc()) timestamp = utils.unix_time_seconds(parsed_date) if value_translator: value = value_translator(name, value) if value is None: return attempts = 0 while attempts < 5 and not utils.CANCEL_WORKERS_EVENT.is_set(): try: self.proxy.transmit_metric(self.config.namespace + '.' + utils.sanitize_name(name), value, int(timestamp), host, tags) break except: attempts = attempts + 1 self.logger.warning('Failed to transmit metric %s: %s', name, str(sys.exc_info())) if not utils.CANCEL_WORKERS_EVENT.is_set(): time.sleep(1)
def _process_list_metrics_response(self, metrics, sub_account, region): """ This function is called by _process_cloudwatch_region() after calling list_metrics() API. Loops over all metrics and call GetMetricStatistics() on each that are included by the configuration. Arguments: metrics - the array of metrics returned from ListMetrics() ('Metrics') sub_account - the AwsSubAccount object representing the top level """ cloudwatch_config = self.config.get_region_config(region) start = cloudwatch_config.start_time end = cloudwatch_config.end_time session = sub_account.get_session(region, False) cloudwatch = session.client('cloudwatch') account_id = sub_account.get_account_id() for metric in metrics: if utils.CANCEL_WORKERS_EVENT.is_set(): break top = (metric['Namespace'] .lower() .replace('aws/', cloudwatch_config.namespace + '/') .replace('/', '.')) metric_name = '{}.{}'.format(top, metric['MetricName'].lower()) point_tags = {'Namespace': metric['Namespace'], 'Region': session.region_name, 'accountId': account_id} config = cloudwatch_config.get_metric_config( metric['Namespace'], metric['MetricName']) if config is None or len(config['stats']) == 0: self.logger.warning('No configuration found for %s/%s', metric['Namespace'], metric['MetricName']) continue dimensions = metric['Dimensions'] for dim in dimensions: if ('dimensions_as_tags' in config and dim['Name'] in config['dimensions_as_tags']): point_tags[dim['Name']] = dim['Value'] if sub_account.instances and dim['Name'] == 'InstanceId': instance_id = dim['Value'] region_instances = sub_account.get_instances(region) if instance_id in region_instances: instance_tags = region_instances[instance_id] for key, value in instance_tags.iteritems(): point_tags[key] = value else: self.logger.warning('%s not found in region %s', instance_id, region) source, _ = AwsBaseMetricsCommand.get_source( config['source_names'], point_tags, dimensions) if not source: self.logger.warning('Source is not found in %s', str(metric)) continue curr_start = start if (end - curr_start).total_seconds() > 86400: curr_end = curr_start + datetime.timedelta(days=1) else: curr_end = end while (curr_end - curr_start).total_seconds() > 0: if utils.CANCEL_WORKERS_EVENT.is_set(): break stats = cloudwatch.get_metric_statistics( Namespace=metric['Namespace'], MetricName=metric['MetricName'], Dimensions=dimensions, StartTime=curr_start, EndTime=curr_end, Period=60, Statistics=config['stats']) number_of_stats = len(config['stats']) for stat in stats['Datapoints']: for statname in config['stats']: if utils.CANCEL_WORKERS_EVENT.is_set(): return short_name = STAT_SHORT_NAMES[statname] if (number_of_stats == 1 and cloudwatch_config.has_suffix_for_single_stat): full_metric_name = metric_name else: full_metric_name = metric_name + '.' + short_name # remove point tags that we don't need for WF if 'Namespace' in point_tags: del point_tags['Namespace'] # send the metric to the proxy tstamp = int(utils.unix_time_seconds(stat['Timestamp'])) self.proxy.transmit_metric(full_metric_name, stat[statname], tstamp, source, point_tags) curr_start = curr_end if (end - curr_start).total_seconds() > 86400: curr_end = curr_start + datetime.timedelta(days=1) else: curr_end = end
def _process_metrics(self, paths, app, start, end): """ Processes metrics returned from a get_metrics() api call. Arguments: paths - list of paths returned from get_metric_paths() app - the application object start - the start datetime object end - the end datetime object """ metric_counter = 0 for path in paths: #print 'Number of paths %s ' % Counter(paths) print 'Processing metrics under path %s ' % (path) if utils.CANCEL_WORKERS_EVENT.is_set(): break self.logger.info('[%s] Getting \'%s\' metrics for %s - %s', app.name, path, start, end) #make sure the * wildcards are the correct numbers and match up below if 'Business'in path and 'Business Transaction Performance|Business Transactions|*|*|*' not in path: path = 'Business Transaction Performance|Business Transactions|*|*|*' #the last 3 components of the metric path. This should be 'tier_name|bt_name|metric_name'. if self.config.retrieve_BT_node_data: if 'Business Transaction Performance|Business Transactions|*|*|*|*|*' not in paths : print 'adding tier_name|bt_name|indvidual_nodes|node_name|metric_name to business transaction' paths.append('Business Transaction Performance|Business Transactions|*|*|*|*|*') #This should be 'tier_name|bt_name|indvidual_nodes|node_name|metric_name' if "Backends" in path: path = 'Backends|*|*' # the last two components of the metric path. This should be 'backend_name|metric_name' if 'End User Experience|*' in path: path = 'End User Experience|*|*' if self.config.retrieve_EUM_AJAX_data: if 'End User Experience|AJAX Requests|*|*' not in paths: paths.append('End User Experience|AJAX Requests|*|*') if "Errors" in path: path = 'Errors|*|*|*' # tier level error stats if self.config.retrieve_error_node_data: if 'Errors|*|*|*|*|*' not in paths: paths.append('Errors|*|*|*|*|*') # individual node level error stats if 'Application Infrastructure Performance' in path: if self.config.retrieve_Application_Infrastructure_Performance_node_data: if 'Application Infrastructure Performance|*|*|*|JVM|*|*' not in paths: paths.append('Application Infrastructure Performance|*|*|*|JVM|*|*') #Application Infrastructure Performance|abtest-consumer|Individual Nodes|16f60b849273|JVM|Garbage Collection|GC Time Spent Per Min (ms) try: metrics = self.appd_client.get_metrics( path, app.id, 'BETWEEN_TIMES', None, long(utils.unix_time_seconds(start) * 1000), long(utils.unix_time_seconds(end) * 1000), False) except: print("Unexpected error:", sys.exc_info()[0]) continue for metric in metrics: if utils.CANCEL_WORKERS_EVENT.is_set(): break #if not (metric.values ): # print 'No value for metric - %s ' % metric.path for value in metric.values: if "|/" in metric.path: metric.path = str(metric.path).replace("|/",".") metric_counter +=1 self.send_metric(app.name + '|' + metric.path, value.current, 'appd', # the source name long(value.start_time_ms / 1000), None, # tags self.config.get_value_to_send) self.global_points_counter +=metric_counter self.logger.info('Number of AppDynamics points processed in this run [%s]', metric_counter) self.logger.info('Total points processed since begining %s ', self.global_points_counter)
def _process_list_metrics_response(self, metrics, sub_account, region): """ This function is called by _process_cloudwatch_region() after calling list_metrics() API. Loops over all metrics and call GetMetricStatistics() on each that are included by the configuration. Arguments: metrics - the array of metrics returned from ListMetrics() ('Metrics') sub_account - the AwsSubAccount object representing the top level """ cloudwatch_config = self.config.get_region_config(region) start = cloudwatch_config.start_time end = cloudwatch_config.end_time session = sub_account.get_session(region) cloudwatch = session.client('cloudwatch') account_id = sub_account.get_account_id() for metric in metrics: if utils.CANCEL_WORKERS_EVENT.is_set(): break top = (metric['Namespace'].lower().replace( 'aws/', cloudwatch_config.namespace + '/').replace('/', '.')) metric_name = '{}.{}'.format(top, metric['MetricName'].lower()) point_tags = { 'Namespace': metric['Namespace'], 'Region': session.region_name, 'accountId': account_id } config = cloudwatch_config.get_metric_config( metric['Namespace'], metric['MetricName']) if config is None or len(config['stats']) == 0: self.logger.warning('No configuration found for %s/%s', metric['Namespace'], metric['MetricName']) continue dimensions = metric['Dimensions'] for dim in dimensions: if (('dimensions_as_tags' in config and dim['Name'] in config['dimensions_as_tags']) or 'dimensions_as_tags' not in config): point_tags[dim['Name']] = dim['Value'] if sub_account.instances and dim['Name'] == 'InstanceId': instance_id = dim['Value'] region_instances = sub_account.get_instances(region) if instance_id in region_instances: instance_tags = region_instances[instance_id] for key, value in instance_tags.iteritems(): point_tags[key] = value else: self.logger.warning('%s not found in region %s', instance_id, region) source, _ = AwsBaseMetricsCommand.get_source( config['source_names'], point_tags, dimensions) if not source: self.logger.warning('Source is not found in %s', str(metric)) continue curr_start = start if (end - curr_start).total_seconds() > 86400: curr_end = curr_start + datetime.timedelta(days=1) else: curr_end = end while (curr_end - curr_start).total_seconds() > 0: if utils.CANCEL_WORKERS_EVENT.is_set(): break stats = cloudwatch.get_metric_statistics( Namespace=metric['Namespace'], MetricName=metric['MetricName'], Dimensions=dimensions, StartTime=curr_start, EndTime=curr_end, Period=60, Statistics=config['stats']) number_of_stats = len(config['stats']) for stat in stats['Datapoints']: for statname in config['stats']: if utils.CANCEL_WORKERS_EVENT.is_set(): return short_name = STAT_SHORT_NAMES[statname] if (number_of_stats == 1 and cloudwatch_config.has_suffix_for_single_stat): full_metric_name = metric_name else: full_metric_name = metric_name + '.' + short_name # remove point tags that we don't need for WF if 'Namespace' in point_tags: del point_tags['Namespace'] # send the metric to the proxy tstamp = int(utils.unix_time_seconds( stat['Timestamp'])) self.proxy.transmit_metric(full_metric_name, stat[statname], tstamp, source, point_tags) curr_start = curr_end if (end - curr_start).total_seconds() > 86400: curr_end = curr_start + datetime.timedelta(days=1) else: curr_end = end
def _process_csv_row(self, row, config): # point tags point_tags = {} for header, point_tag_key in config.dimensions.iteritems(): if row[header]: point_tags[point_tag_key] = row[header] # point tags from ec2 instance #pylint: disable=too-many-nested-blocks if config.instance_id_columns: found_instance = False for header in config.instance_id_columns: instance_id = row[header] # arn:aws:ec2:us-east-1:011750033084:instance/i-33ac36e5" if instance_id and instance_id[0:12] == 'arn:aws:ec2:': parts = instance_id.split(':') instance_id = parts[5].split('/')[1] point_tags['region'] = parts[3] if not instance_id or instance_id[0:2] != 'i-': continue for region in self.account.regions: for sub_account in self.account.get_sub_accounts(): instances = sub_account.get_instances(region) if instance_id in instances: instance_tags = instances[instance_id] for key, value in instance_tags.iteritems(): point_tags[key] = value found_instance = True break if found_instance: break if found_instance: break # source names source, source_name = AwsBaseMetricsCommand.get_source( config.source_names, point_tags) if source_name in point_tags: del point_tags[source_name] # timestamp tstamp = None tstamp_col_values = [] for header, date_fmt in config.dates.iteritems(): if row[header]: tstamp_col_values.append(row[header]) tstamp = utils.unix_time_seconds( datetime.datetime.strptime(row[header], date_fmt)) if not tstamp: self.logger.warning('Unable to find valid date in columns (%s) ' '|%s|. Record is:\n\t%s', ', '.join(config.dates.keys()), ', '.join(tstamp_col_values), str(row)) return # calculate duration if config.duration and len(config.duration) == 2: start = config.duration[0].split('|') start_dt = datetime.datetime.strptime(row[start[0]], start[1]) start_tstamp = utils.unix_time_seconds(start_dt) end = config.duration[1].split('|') end_dt = datetime.datetime.strptime(row[end[0]], end[1]) end_tstamp = utils.unix_time_seconds(end_dt) duration = end_tstamp - start_tstamp else: duration = 0 # metric and value for header, metric_name in config.metrics.iteritems(): if config.namespace: metric = config.namespace + '.' + metric_name else: metric = metric_name value = row[header] if not value: value = 0.0 # send the metric to the proxy self.proxy.transmit_metric(metric, value, long(tstamp), source, point_tags) if duration: self.proxy.transmit_metric(metric + '.duration', duration, long(tstamp), source, point_tags)
def _process_metrics(self, paths, app, start, end): """ Processes metrics returned from a get_metrics() api call. Arguments: paths - list of paths returned from get_metric_paths() app - the application object start - the start datetime object end - the end datetime object """ metric_counter = 0 for path in paths: #print 'Number of paths %s ' % Counter(paths) print 'Processing metrics under path %s ' % (path) if utils.CANCEL_WORKERS_EVENT.is_set(): break self.logger.info('[%s] Getting \'%s\' metrics for %s - %s', app.name, path, start, end) #make sure the * wildcards are the correct numbers and match up below if 'Business' in path and 'Business Transaction Performance|Business Transactions|*|*|*' not in path: path = 'Business Transaction Performance|Business Transactions|*|*|*' #the last 3 components of the metric path. This should be 'tier_name|bt_name|metric_name'. if self.config.retrieve_BT_node_data: if 'Business Transaction Performance|Business Transactions|*|*|*|*|*' not in paths: print 'adding tier_name|bt_name|indvidual_nodes|node_name|metric_name to business transaction' paths.append( 'Business Transaction Performance|Business Transactions|*|*|*|*|*' ) #This should be 'tier_name|bt_name|indvidual_nodes|node_name|metric_name' if "Backends" in path: path = 'Backends|*|*' # the last two components of the metric path. This should be 'backend_name|metric_name' if 'End User Experience|*' in path: path = 'End User Experience|*|*' if self.config.retrieve_EUM_AJAX_data: if 'End User Experience|AJAX Requests|*|*' not in paths: paths.append('End User Experience|AJAX Requests|*|*') if "Errors" in path: path = 'Errors|*|*|*' # tier level error stats if self.config.retrieve_error_node_data: if 'Errors|*|*|*|*|*' not in paths: paths.append('Errors|*|*|*|*|*' ) # individual node level error stats if 'Application Infrastructure Performance' in path: if self.config.retrieve_Application_Infrastructure_Performance_node_data: if 'Application Infrastructure Performance|*|*|*|JVM|*|*' not in paths: paths.append( 'Application Infrastructure Performance|*|*|*|JVM|*|*' ) #Application Infrastructure Performance|abtest-consumer|Individual Nodes|16f60b849273|JVM|Garbage Collection|GC Time Spent Per Min (ms) try: metrics = self.appd_client.get_metrics( path, app.id, 'BETWEEN_TIMES', None, long(utils.unix_time_seconds(start) * 1000), long(utils.unix_time_seconds(end) * 1000), False) except: print("Unexpected error:", sys.exc_info()[0]) continue for metric in metrics: if utils.CANCEL_WORKERS_EVENT.is_set(): break #if not (metric.values ): # print 'No value for metric - %s ' % metric.path for value in metric.values: if "|/" in metric.path: metric.path = str(metric.path).replace("|/", ".") metric_counter += 1 self.send_metric( app.name + '|' + metric.path, value.current, 'appd', # the source name long(value.start_time_ms / 1000), None, # tags self.config.get_value_to_send) self.global_points_counter += metric_counter self.logger.info( 'Number of AppDynamics points processed in this run [%s]', metric_counter) self.logger.info('Total points processed since begining %s ', self.global_points_counter)
def _process_csv_row(self, row, config): # point tags point_tags = {} for header, point_tag_key in config.dimensions.iteritems(): if row[header]: point_tags[point_tag_key] = row[header] # point tags from ec2 instance #pylint: disable=too-many-nested-blocks if config.instance_id_columns: found_instance = False for header in config.instance_id_columns: instance_id = row[header] # arn:aws:ec2:us-east-1:011750033084:instance/i-33ac36e5" if instance_id and instance_id[0:12] == 'arn:aws:ec2:': parts = instance_id.split(':') instance_id = parts[5].split('/')[1] point_tags['region'] = parts[3] if not instance_id or instance_id[0:2] != 'i-': continue for region in self.account.regions: for sub_account in self.account.get_sub_accounts(): instances = sub_account.get_instances(region) if instance_id in instances: instance_tags = instances[instance_id] for key, value in instance_tags.iteritems(): point_tags[key] = value found_instance = True break if found_instance: break if found_instance: break # source names source, source_name = AwsBaseMetricsCommand.get_source( config.source_names, point_tags) if source_name in point_tags: del point_tags[source_name] # timestamp tstamp = None tstamp_col_values = [] for header, date_fmt in config.dates.iteritems(): if row[header]: tstamp_col_values.append(row[header]) tstamp = utils.unix_time_seconds( datetime.datetime.strptime(row[header], date_fmt)) if not tstamp: self.logger.warning( 'Unable to find valid date in columns (%s) ' '|%s|. Record is:\n\t%s', ', '.join(config.dates.keys()), ', '.join(tstamp_col_values), str(row)) return # calculate duration if config.duration and len(config.duration) == 2: start = config.duration[0].split('|') start_dt = datetime.datetime.strptime(row[start[0]], start[1]) start_tstamp = utils.unix_time_seconds(start_dt) end = config.duration[1].split('|') end_dt = datetime.datetime.strptime(row[end[0]], end[1]) end_tstamp = utils.unix_time_seconds(end_dt) duration = end_tstamp - start_tstamp else: duration = 0 # metric and value for header, metric_name in config.metrics.iteritems(): if config.namespace: metric = config.namespace + '.' + metric_name else: metric = metric_name value = row[header] if not value: value = 0.0 # send the metric to the proxy self.proxy.transmit_metric(metric, value, long(tstamp), source, point_tags) if duration: self.proxy.transmit_metric(metric + '.duration', duration, long(tstamp), source, point_tags)