def __get_connection_cloudwatch(): """ Ensure connection to CloudWatch """ region = get_global_option('region') try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to CloudWatch using ' 'credentials in configuration file') connection = cloudwatch.connect_to_region( region, aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: logger.debug( 'Authenticating using boto\'s authentication handler') connection = cloudwatch.connect_to_region(region) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch in {0}'.format(region)) return connection
def __get_connection_cloudwatch(): """ Ensure connection to CloudWatch """ region = get_global_option('region') try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug('Authenticating to CloudWatch using ' 'credentials in configuration file') connection = cloudwatch.connect_to_region( region, aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: logger.debug('Authenticating using boto\'s authentication handler') connection = cloudwatch.connect_to_region(region) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error('Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch in {0}'.format(region)) return connection
def metrics_catalog(region): '''Build a catalog of available metrics''' conn_ec2 = ec2.connect_to_region(region) conn_elb = elb.connect_to_region(region) conn_rds = rds.connect_to_region(region) conn_cloudwatch = cloudwatch.connect_to_region(region) catalog = {'ec2':{}, 'ebs':{}, 'elb':{}, 'rds':{}} # EC2 instances for reservation in conn_ec2.get_all_instances(): for instance in reservation.instances: catalog['ec2'][instance] = conn_cloudwatch.list_metrics( dimensions={'InstanceId': [instance.id]}) # EBS Volumes for volume in conn_ec2.get_all_volumes(): catalog['ebs'][volume] = conn_cloudwatch.list_metrics( dimensions={'VolumeId': [volume.id]}) # ELB instances for balancer in conn_elb.get_all_load_balancers(): catalog['elb'][balancer] = conn_cloudwatch.list_metrics( dimensions={'LoadBalancerName': [balancer.name]}) # RDS instances for instance in conn_rds.get_all_dbinstances(): catalog['rds'][instance] = conn_cloudwatch.list_metrics( dimensions={'DBInstanceIdentifier': [instance.id]}) return catalog
def __init__(self, namespace="Statsd"): self.namespace = namespace self.metrics = {} self.flush_due = datetime.datetime.now() self.region = get_instance_metadata( )["placement"]["availability-zone"][:-1] self.cloudwatch = connect_to_region(self.region)
def __init__(self): # Current host and region self.region = boto.utils.get_instance_metadata()['placement']['availability-zone'][:-1] self.instance_id = boto.utils.get_instance_metadata()['instance-id'] # Connect to current host region using IAM credentials self.c = cloudwatch.connect_to_region(self.region)
def metrics_catalog(region): '''Build a catalog of available metrics''' conn_ec2 = ec2.connect_to_region(region) conn_elb = elb.connect_to_region(region) conn_rds = rds.connect_to_region(region) conn_cloudwatch = cloudwatch.connect_to_region(region) catalog = {'ec2': {}, 'ebs': {}, 'elb': {}, 'rds': {}} # EC2 instances for reservation in conn_ec2.get_all_instances(): for instance in reservation.instances: catalog['ec2'][instance] = conn_cloudwatch.list_metrics( dimensions={'InstanceId': [instance.id]}) # EBS Volumes for volume in conn_ec2.get_all_volumes(): catalog['ebs'][volume] = conn_cloudwatch.list_metrics( dimensions={'VolumeId': [volume.id]}) # ELB instances for balancer in conn_elb.get_all_load_balancers(): catalog['elb'][balancer] = conn_cloudwatch.list_metrics( dimensions={'LoadBalancerName': [balancer.name]}) # RDS instances for instance in conn_rds.get_all_dbinstances(): catalog['rds'][instance] = conn_cloudwatch.list_metrics( dimensions={'DBInstanceIdentifier': [instance.id]}) return catalog
def main(argv=sys.argv): global logger, settings, ca, ri, aws_access, aws_secret, thekey if len(argv) != 2: usage(argv) config_uri = argv[1] setup_logging(config_uri) settings = get_appsettings(config_uri) logger = logging.getLogger('scripts') aws_access = settings.get('aws.access') or None aws_secret = settings.get('aws.secret') or None aws_region = settings.get('aws.region') or None if not aws_region: print "ERROR: Settings missing 'aws.region' value, please define the region and try again." return ec2_conn = ec2.connect_to_region(aws_region, aws_access_key_id=aws_access, aws_secret_access_key=aws_secret) cw_conn = cw.connect_to_region(aws_region, aws_access_key_id=aws_access, aws_secret_access_key=aws_secret) for alarm in cw_conn.describe_alarms(): if 'InstanceId' in alarm.dimensions: instance_ids = alarm.dimensions['InstanceId'] try: if not ec2_conn.get_only_instances(instance_ids=instance_ids): logger.info('Deleting alarm %s for instance %s which no longer exists', alarm.name, instance_ids) cw_conn.delete_alarms([alarm.name]) except EC2ResponseError as e: if 'InvalidInstanceID.NotFound' in e.body: logger.info('Deleting alarm %s for instance %s which no longer exists', alarm.name, instance_ids) cw_conn.delete_alarms([alarm.name]) continue
def get_cloudwatch_conn(): from snaptastic import settings from boto.ec2 import cloudwatch conn = cloudwatch.connect_to_region(settings.REGION, aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) return conn
def _connect(self): try: self._cw except AttributeError: self._cw = cloudwatch.connect_to_region(self.region, profile_name=self.profile) return self._cw
def _create_connection(self): conn = cloudwatch.connect_to_region( self.options.get('region_name'), aws_access_key_id=self.options.get('aws_access_key_id'), aws_secret_access_key=self.options.get('aws_secret_access_key') ) return conn
def SumRequests(ELB_NAME): conn = cloudwatch.connect_to_region(AWS_REGION, aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY) end = datetime.datetime.utcnow() start = end - datetime.timedelta(minutes=MINUTES) for k, m in metrics.items(): try: # print k res = conn.get_metric_statistics( PERIOD, start, end, k, "AWS/ELB", m['stat'], dimensions={"LoadBalancerName": ELB_NAME}) except Exception, e: print "WARN - status err Error running elb stats: %s" % e.message sys.exit(1) for r in res: return '%d' % r[m['stat']]
def __init__(self): # Current host and region self.region = boto.utils.get_instance_metadata( )['placement']['availability-zone'][:-1] self.instance_id = boto.utils.get_instance_metadata()['instance-id'] # Connect to current host region using IAM credentials self.c = cloudwatch.connect_to_region(self.region)
def send_process_list_metrics(instance_id, region, metrics, namespace, unit='Count'): ''' Send multiple metrics to CloudWatch metrics is expected to be a map of key -> value pairs of metrics ''' cw = cloudwatch.connect_to_region(region) cw.put_metric_data(namespace, metrics.keys(), metrics.values(), unit=unit, dimensions={"InstanceId": instance_id})
def aws_conn_cloudwatch(region, profile='default'): try: conn = cloudwatch.connect_to_region(region, profile_name=profile) return conn except Exception as e: logging.error( "Unable to connect to region, please investigate: {0}".format( e))
def get_cloudwatch_conn(): from snaptastic import settings from boto.ec2 import cloudwatch conn = cloudwatch.connect_to_region( settings.REGION, aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY) return conn
def make_CW_connection(region_name, aws_access_key_id, aws_secret_access_key): """ Make a Cloudwatch connection to an AWS account. Pass in region, AWS access key id, and AWS secret access key """ return connect_to_region(region_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
def send_multi_metrics(instance_id, region, metrics, namespace='EC2/Memory', unit='Percent'): ''' Send multiple metrics to CloudWatch metrics is expected to be a map of key -> value pairs of metrics ''' cw = cloudwatch.connect_to_region('us-east-1',aws_access_key_id ='####',aws_secret_access_key = '###') cw.put_metric_data(namespace, metrics.keys(), metrics.values(), unit=unit, dimensions={"InstanceId": instance_id})
def send_multi_metrics(instance_id, region, metrics, namespace='EC2/Memory', unit='Percent'): ''' Send multiple metrics to CloudWatch metrics is expected to be a map of key -> value pairs of metrics ''' cw = cloudwatch.connect_to_region('us-east-1',aws_access_key_id ='AKIAI6UGOTLGICBMREBA',aws_secret_access_key = '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') cw.put_metric_data(namespace, metrics.keys(), metrics.values(), unit=unit, dimensions={"InstanceId": instance_id})
def send_multi_metrics(instance_id, region, metrics, namespace='EC2/Memory', unit='Percent'): ''' Send multiple metrics to CloudWatch metrics is expected to be a map of key -> value pairs of metrics ''' cw = cloudwatch.connect_to_region(region) cw.put_metric_data(namespace, metrics.keys(), metrics.values(), unit=unit, dimensions={"InstanceId": instance_id})
def main(argv=sys.argv): global logger, settings, ca, ri, aws_access, aws_secret, thekey if len(argv) != 2: usage(argv) config_uri = argv[1] setup_logging(config_uri) settings = get_appsettings(config_uri) logger = logging.getLogger('scripts') aws_access = settings.get('aws.access') or None aws_secret = settings.get('aws.secret') or None aws_region = settings.get('aws.region') or None arn = settings.get('alarm.arn') period = settings.get('alarm.period') or '300' evalperiods = settings.get('alarm.evalperiods') or '1' cpu_threshold = settings.get('alarm.cpu.threshold') or '60.0' es_user = settings.get('es.user') es_pass = settings.get('es.pass') if not aws_region: print "ERROR: Settings missing 'aws.region' value, please define the region and try again." return ec2_conn = ec2.connect_to_region(aws_region, aws_access_key_id=aws_access, aws_secret_access_key=aws_secret) cw_conn = cw.connect_to_region(aws_region, aws_access_key_id=aws_access, aws_secret_access_key=aws_secret) cluster_info = compiled_cluster_info(ec2_conn, es_user, es_pass) for groupname, nodes in cluster_info.items(): for nodename, info in nodes.items(): instance_id = info['ec2instance_id'] alarm_name = "%s-maxcpu-%s" % (groupname, instance_id) dakwargs = { 'name': alarm_name, 'metric': 'CPUUtilization', 'namespace': 'AWS/EC2', 'statistic': 'Maximum', 'comparison': '>', 'threshold': cpu_threshold, 'period': period, 'evaluation_periods': evalperiods, 'description': 'CPU alarm for instance %s in cluster %s' % (nodename, groupname), 'dimensions': {'InstanceId':instance_id}, 'alarm_actions': [arn] } alarm = cw.alarm.MetricAlarm(**dakwargs) # creates OR updates the alarm. cw_conn.create_alarm(alarm) logger.info('Created/updated alarm \'%s\' for cluster %s, instance %s', alarm_name, groupname, instance_id)
def __init__(self, name, describe=None, region='us-east-1', conn=None): super(CloudWatchAlarm, self).__init__(name) if not describe: self.describe = {'alarm_names':self.name} else: self.describe = describe self.region = region if not conn: self.conn = cw.connect_to_region(self.region) else: self.conn = conn
def send_multi_metrics(instance_id, region, metrics, unit='Count', namespace='EC2/Redis'): cw = cloudwatch.connect_to_region(region) cw.put_metric_data(namespace, metrics.keys(), metrics.values(), unit=unit, dimensions={"InstanceId": instance_id})
def collect(self): if not cloudwatch: self.log.error("boto module not found!") return now = datetime.datetime.utcnow() end_time = now.replace(second=0, microsecond=0) start_time = end_time - datetime.timedelta(seconds=self.interval) conn = cloudwatch.connect_to_region('us-east-1', **self.auth_kwargs) self.process_billing(conn, start_time, end_time)
def put_cloudwatch_metric(cw_metric_object): print cw_metric_object,"cw_metric_object" cw = cloudwatch.connect_to_region(aws_region) cw.put_metric_data(cw_metric_object["namespace"], cw_metric_object["name"], value = cw_metric_object["value"], unit = cw_metric_object["unit"], timestamp = cw_metric_object["timestamp"], dimensions = cw_metric_object["dimensions"], statistics = cw_metric_object["statistics"]) print "Successfully sent metrics to cloudwatch"
def __init__(self, access_key_id, secret_access_key, region_name): self._ec2_conn = ec2.connect_to_region(region_name, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) logging.info('Initialized aws connection to %s' % region_name) self._cloudwatch_conn = cloudwatch.connect_to_region(region_name, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) logging.info('Initialized cloud watch connection to %s' % region_name) self._region_name = region_name
def send_multi_metrics(instance_id, region, metrics, namespace='LMS/EC2/Memory', unit='Percent'): """ Send multiple metrics to CloudWatch metrics is expected to be a map of key -> value pairs of metrics """ cw = cloudwatch.connect_to_region(region) cw.put_metric_data(namespace, metrics.keys(), metrics.values(), unit=unit, dimensions={"InstanceId": instance_id})
def send_conn_metrics(instance_id, region, metrics, namespace, unit="Count"): """ Send connection metrics to CloudWatch. :param str instance_id: EC2 instance id that awe are monitoring :param str region: AWS region :param dict metrics: connection metrics :param str namespace: name of the metrics :param str unit: unit of the monitored metric """ cw = cloudwatch.connect_to_region(region) cw.put_metric_data(namespace, metrics.keys(), metrics.values(), unit=unit, dimensions={"InstanceId": instance_id})
def collect(self): if not self.check_boto(): return now = datetime.datetime.utcnow() end_time = now.replace(second=0, microsecond=0) start_time = end_time - datetime.timedelta(seconds=self.interval) for region in self.config['regions'].keys(): region_cw_conn = cloudwatch.connect_to_region(region, **self.auth_kwargs) self.process_region(region_cw_conn, start_time, end_time)
def send(self): metadata = get_instance_metadata() instance_id = metadata['instance-id'] region = metadata['placement']['availability-zone'][0:-1] cw = cloudwatch.connect_to_region(region) group = self._get_auto_scaling_group_name(instance_id, region) for (unit, metrics) in self._metrics.items(): cw.put_metric_data('EC2', metrics.keys(), metrics.values(), unit=unit, dimensions={'InstanceId': instance_id}) if group: cw.put_metric_data('EC2', metrics.keys(), metrics.values(), unit=unit, dimensions={'AutoScalingGroupName': group})
def __get_connection_cloudwatch(): """ Ensure connection to CloudWatch """ try: if (configuration['global']['aws_access_key_id'] and configuration['global']['aws_secret_access_key']): connection = cloudwatch.connect_to_region( configuration['global']['region'], aws_access_key_id=configuration['global']['aws_access_key_id'], aws_secret_access_key=\ configuration['global']['aws_secret_access_key']) else: connection = cloudwatch.connect_to_region( configuration['global']['region']) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error('Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch') return connection
def __get_connection_cloudwatch(): """ Ensure connection to SNS """ try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to CloudWatch using ' 'credentials in configuration file') connection = cloudwatch.connect_to_region( get_global_option('region'), aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: try: logger.debug( 'Authenticating to CloudWatch using EC2 instance profile') metadata = get_instance_metadata(timeout=1, num_retries=1) connection = cloudwatch.connect_to_region( metadata['placement']['availability-zone'][:-1], profile_name=metadata['iam']['info'][u'InstanceProfileArn']) except KeyError: logger.debug( 'Authenticating to CloudWatch using ' 'env vars / boto configuration') connection = cloudwatch.connect_to_region( get_global_option('region')) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch in {0}'.format( get_global_option('region'))) return connection
def __init__( self, region, namespace, dimensions, interval=5, headers=[], **kwargs): self._refresh_interval = interval self._region = region self._namespace = namespace self._dimensions = dimensions self._headers = headers self._conn = cw.connect_to_region(self._region, **kwargs)
def __get_connection_cloudwatch(): """ Ensure connection to CloudWatch """ try: if (configuration['global']['aws_access_key_id'] and configuration['global']['aws_secret_access_key']): connection = cloudwatch.connect_to_region( configuration['global']['region'], aws_access_key_id=configuration['global']['aws_access_key_id'], aws_secret_access_key=\ configuration['global']['aws_secret_access_key']) else: connection = cloudwatch.connect_to_region( configuration['global']['region']) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch') return connection
def __init__(self): self._pin_pump = 11 self._zone_to_pin = {1: 12, 2: 13, 3: 15} # use board index scheme so mapping will work with different hardware versions GPIO.setmode(GPIO.BOARD) self._all_pins = self._zone_to_pin.values() self._all_pins.append(self._pin_pump) # relay board uses reversed logic (high = off) GPIO.setup(self._all_pins, GPIO.OUT, initial=1) self._cloudwatch = cloudwatch.connect_to_region('us-west-2') return
def __init__(self, namespace, region_name='us-east-1', dimensions={}, alarms={}, actions={}, **kwargs): Emitter.__init__(self) self.namespace = namespace self.conn = connect_to_region(region_name, **kwargs) # Set our dimensions, including instance ID self.dims = dimensions or {} self.setInstanceId() # Make sure our actions exist... self.actions = {} # Store our AWS credential args for later self.kwargs = kwargs self.updateActions(actions) # Now update our alarms self.updateAlarms(alarms)
def __get_connection_cloudwatch(): """ Ensure connection to SNS """ try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug('Authenticating to CloudWatch using ' 'credentials in configuration file') connection = cloudwatch.connect_to_region( get_global_option('region'), aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: try: logger.debug( 'Authenticating to CloudWatch using EC2 instance profile') metadata = get_instance_metadata(timeout=1, num_retries=1) connection = cloudwatch.connect_to_region( metadata['placement']['availability-zone'][:-1], profile_name=metadata['iam']['info'] [u'InstanceProfileArn']) except KeyError: logger.debug('Authenticating to CloudWatch using ' 'env vars / boto configuration') connection = cloudwatch.connect_to_region( get_global_option('region')) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error('Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch in {0}'.format( get_global_option('region'))) return connection
def send_multi_metrics(instance_id, region, metrics, namespace='EC2/Memory', unit='Percent'): ''' Send multiple metrics to CloudWatch metrics is expected to be a map of key -> value pairs of metrics ''' asg = autoscale.AutoScaleConnection() asg_name = asg.get_all_autoscaling_instances(instance_ids=[instance_id])[0].group_name cw = cloudwatch.connect_to_region(region) cw.put_metric_data(namespace, metrics.keys(), metrics.values(), unit=unit, dimensions={"InstanceId": instance_id}) cw.put_metric_data(namespace, metrics.keys(), metrics.values(), unit=unit, dimensions={"AutoScalingGroupName": asg_name})
def send_custom_metrics(instance_id, region, metrics, namespace, unit): cw = cloudwatch.connect_to_region(region) ec2 = boto.connect_ec2() instance = ec2.get_only_instances(instance_ids=[instance_id])[0] if "aws:autoscaling:groupName" in instance.tags: cw.put_metric_data(namespace, metrics.keys(), metrics.values(), unit=unit, dimensions={ "AutoScalingGroupName": instance.tags["aws:autoscaling:groupName"] })
def put_data(args, log): "Get RQ data and send to CloudWatch" log.info('put_data()') cw = cloudwatch.connect_to_region(args['--region']) def put_metrics(metrics, dimensions): dimensions['env'] = args['--env'] log.info('{} --> {}'.format(dimensions, metrics)) if not args['--no-cloudwatch']: cw.put_metric_data('RQ', list(metrics.keys()), list(metrics.values()), unit='Count', dimensions=dimensions) try: redis = StrictRedis.from_url(args['--url']) redis.ping() except Exception as e: log.error('Unable to connect to redis: {}'.format(e)) return # group workers by queue workers_by_queue = {} for w in Worker.all(connection=redis): for q in w.queues: ws = workers_by_queue.get(q, []) ws.append(w) workers_by_queue[q] = ws for q in workers_by_queue: # report queue level rollup put_metrics({ 'jobs': len(q), 'workers': len(workers_by_queue[q]) }, {'queue': q.name}) # report workers for each queue in each worker state states = {} for w in workers_by_queue[q]: count = states.get(w.state, 0) + 1 states[w.state] = count for state in states: put_metrics({'workers': states[state]}, { 'queue': q.name, 'state': state.decode(), })
def __init__(self, namespace, region_name=None, dimensions={}, alarms={}, actions={}, **kwargs): Emitter.__init__(self) if not region_name: region_name = get_instance_metadata()['placement']['availability-zone'][:-1] self.namespace = namespace self.conn = connect_to_region(region_name, **kwargs) # Set our dimensions, including instance ID self.dims = dimensions or {} self.setInstanceId() # Make sure our actions exist... self.actions = {} # Store our AWS credential args for later self.kwargs = kwargs self.updateActions(actions) # Now update our alarms self.updateAlarms(alarms)
def __init__(self): self._pin_pump = 11 self._zone_to_pin = { 1 : 12, 2 : 13, 3 : 15 } # use board index scheme so mapping will work with different hardware versions GPIO.setmode(GPIO.BOARD) self._all_pins = self._zone_to_pin.values() self._all_pins.append(self._pin_pump) # relay board uses reversed logic (high = off) GPIO.setup(self._all_pins, GPIO.OUT, initial=1) self._cloudwatch = cloudwatch.connect_to_region('us-west-2') return
def _getData(cls, regionName, instanceID, metricName, stats, unit, period, startTime, endTime): """ For experimentation """ cw = cloudwatch.connect_to_region(region_name=regionName, **getAWSCredentials()) data = cw.get_metric_statistics( period=period, start_time=startTime, end_time=endTime, metric_name=metricName, namespace="AWS/EC2", statistics=stats, dimensions=dict(InstanceId=instanceID), unit=unit, ) return data
def _getData(cls, regionName, instanceID, metricName, stats, unit, period, startTime, endTime): """ For experimentation """ cw = cloudwatch.connect_to_region(region_name=regionName, **getAWSCredentials()) data = cw.get_metric_statistics(period=period, start_time=startTime, end_time=endTime, metric_name=metricName, namespace="AWS/EC2", statistics=stats, dimensions=dict(InstanceId=instanceID), unit=unit) return data
def __init__(self, dummy=False): if dummy: self._region = 'us-west-2' self._hostclass = "mhccloudwatchtest" self._environment_name = "desktop" else: metadata = boto.utils.get_instance_metadata() self._region = metadata['placement']['availability-zone'][0:-1] userdata = self.get_userdata() self._hostclass = userdata["hostclass"] self._environment_name = userdata["environment_name"] self._connection = cloudwatch.connect_to_region(self._region) self._dimensions = { "env_hostclass": "_".join((self._environment_name, self._hostclass)) } self._metrics = None
def __init__(self, namespace): """ 构造函数会自动获取其所运行的服务器的'实例-ID',如果是本地运行默认是'00000000' :param namespace: str, 命名空间,应该是公司AWS账号全局内唯一的命名,默认取项目入口文件的命名 :return: None """ self.cw_conn = cloudwatch.connect_to_region(region) if str(sys.platform).startswith('darwin'): self.ec2_id = '00000000' else: self.ec2_id = check_output([ "curl", "-s", 'http://169.254.169.254/latest/meta-data/instance-id' ]) self.namespace = namespace
def put_data(args, log): "Get RQ data and send to CloudWatch" log.info('put_data()') cw = cloudwatch.connect_to_region(args['--region']) def put_metrics(metrics, dimensions): dimensions['env'] = args['--env'] log.info('{} --> {}'.format(dimensions, metrics)) if not args['--no-cloudwatch']: cw.put_metric_data('RQ', list(metrics.keys()), list(metrics.values()), unit='Count', dimensions=dimensions) try: redis = StrictRedis.from_url(args['--url']) redis.ping() except Exception as e: log.error('Unable to connect to redis: {}'.format(e)) return # group workers by queue workers_by_queue = {} for w in Worker.all(connection=redis): for q in w.queues: ws = workers_by_queue.get(q, []) ws.append(w) workers_by_queue[q] = ws for q in workers_by_queue: # report queue level rollup put_metrics({'jobs': len(q), 'workers': len(workers_by_queue[q])}, {'queue': q.name}) # report workers for each queue in each worker state states = {} for w in workers_by_queue[q]: count = states.get(w.state, 0) + 1 states[w.state] = count for state in states: put_metrics({'workers': states[state]}, { 'queue': q.name, 'state': state.decode(), })
def process_region(self, region_cw_conn, start_time, end_time): threads = [] for zone in get_zones(region_cw_conn.region.name, self.auth_kwargs): # Create a new connection for each thread, Boto isn't threadsafe. t_conn = cloudwatch.connect_to_region(region_cw_conn.region.name, **self.auth_kwargs) zone_thread = threading.Thread(target=self.process_zone, args=(t_conn, zone, start_time, end_time)) zone_thread.start() threads.append(zone_thread) # Make sure all threads have completed. Also allows scheduler to work # more 'correctly', because without this, the collector will 'complete' # in about 7ms. for thread in threads: thread.join()
def __init__(self, dummy=False): if dummy: self._region = 'us-west-2' self._hostclass = "mhccloudwatchtest" self._environment_name = "desktop" else: metadata = boto.utils.get_instance_metadata() self._region = metadata['placement']['availability-zone'][0:-1] userdata = self.get_userdata() self._hostclass = userdata["hostclass"] self._environment_name = userdata["environment_name"] self._connection = cloudwatch.connect_to_region(self._region) self._dimensions = { "env_hostclass": "_".join( (self._environment_name, self._hostclass)) } self._metrics = None
def send_multi_metrics(instanceid, aws_region, cw_metrics, namespace='EC2/Infrastructure', unit='Percent'): """ Send multiple metrics to CloudWatch metrics is expected to be a map of key -> value pairs of metrics :param instanceid: AWS instance ID :param aws_region: AWS region :param cw_metrics: :param namespace: Namespace to store the CloudWatch metric :param unit: What unit to store the CloudWatch metric """ cw = cloudwatch.connect_to_region(aws_region) cw.put_metric_data( namespace, cw_metrics.keys(), unit=unit, dimensions={"InstanceId": instanceid}, statistics=cw_metrics.values() )
def get_metrics(region): """ Get all metrics in specified region. """ client = cloudwatch.connect_to_region(region) if not client: raise Exception('Failed to connect to region: %s' % region) buf = [] next_token = None while True: if next_token: result = client.list_metrics(next_token=next_token) else: result = client.list_metrics() buf += list(result) if not result.next_token: break next_token = result.next_token return buf
def initialise_connections(region): # FOR TESTING ON REMOTE MACHINE # ec2connection = ec2.connect_to_region(region_name=region, # aws_access_key_id=c.ACCESS_KEY_ID, # aws_secret_access_key=c.SECRET_ACCESS_KEY) # rds_connection = rds.connect_to_region(region_name=region, # aws_access_key_id=c.ACCESS_KEY_ID, # aws_secret_access_key=c.SECRET_ACCESS_KEY) # cw_connection = cwatch.connect_to_region(region_name=region, # aws_access_key_id=c.ACCESS_KEY_ID, # aws_secret_access_key=c.SECRET_ACCESS_KEY) ec2connection = ec2.connect_to_region(region_name=region, profile_name=sys.argv[1]) rds_connection = rds.connect_to_region(region_name=region, profile_name=sys.argv[1]) cw_connection = cwatch.connect_to_region(region_name=region, profile_name=sys.argv[1]) mysql_connection = func.connect_to_mysql_server() return ec2connection, rds_connection, cw_connection, mysql_connection
def __init__(self, virtapi, read_only=False): super(EC2Driver, self).__init__(virtapi) self.host_status_base = { 'vcpus': CONF.AWS.max_vcpus, 'memory_mb': CONF.AWS.max_memory_mb, 'local_gb': CONF.AWS.max_disk_gb, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 0, 'hypervisor_type': 'EC2', 'hypervisor_version': '1.0', 'hypervisor_hostname': CONF.host, 'cpu_info': {}, 'disk_available_least': CONF.AWS.max_disk_gb, } global _EC2_NODES self._mounts = {} self._interfaces = {} self._uuid_to_ec2_instance = {} self.ec2_flavor_info = EC2_FLAVOR_MAP aws_region = CONF.AWS.region_name aws_endpoint = "ec2." + aws_region + ".amazonaws.com" region = RegionInfo(name=aws_region, endpoint=aws_endpoint) self.ec2_conn = ec2.EC2Connection( aws_access_key_id=CONF.AWS.access_key, aws_secret_access_key=CONF.AWS.secret_key, region=region) self.cloudwatch_conn = cloudwatch.connect_to_region( aws_region, aws_access_key_id=CONF.AWS.access_key, aws_secret_access_key=CONF.AWS.secret_key) # Allow keypair deletion to be controlled by conf if CONF.AWS.enable_keypair_notifications: eventlet.spawn(KeyPairNotifications(self.ec2_conn).run) LOG.info("EC2 driver init with %s region" % aws_region) if _EC2_NODES is None: set_nodes([CONF.host])
def get_asg_metrics(asg, metric_name, region, namespace='Learn/Instance', statistic='Average', period=60): asc = autoscale.connect_to_region(region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) instances = asc.get_all_groups([asg])[0].instances data = [] cw = cloudwatch.connect_to_region(region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) end = datetime.datetime.now() start = end - datetime.timedelta(hours=3) instance_index = 0; for instance in instances: metrics = cw.list_metrics(metric_name=metric_name, dimensions={'InstanceId': [str(instance.instance_id)]}) points = metrics[0].query(start, end, statistic) for point in points: time_diff = point['Timestamp'] - start date_index = time_diff.total_seconds() data[int(date_index)][instance_index] = point[statistic] instance_index += 1 return data