def getLongRunningAutoscalingGroup(region, awsAccessKeyId, awsSecretAccessKey, groupAge): """ Returns the list of long running autoscaling groups :param region : AWS region to query :param awsAccessKeyId : aws_access_key_id :param awsSecretAccessKey : aws_secret_access_key :param groupAge : Accepted in number of days :returns: A list of long-running autoscaling groups. e.g. getLongRunningEC2Instances( "us-west-2", os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"], 15) """ autoscalingConn = autoscale.connect_to_region( region_name=region, aws_access_key_id=awsAccessKeyId, aws_secret_access_key=awsSecretAccessKey) groups = autoscalingConn.get_all_groups() monitoredGroups = [group for group in groups if group.enabled_metrics] longRunningGroups = [] for group in monitoredGroups: nowutc = datetime.now(tzutc()) createdTime = dateutil.parser.parse(group.created_time) activeTime = nowutc - createdTime if activeTime.days > groupAge: longRunningGroups.append(group) return longRunningGroups
def list_all_groups(region): header = [ "Name", "LaunchConfig", "Instances", "Instance Class", "Desired", "Min", "Max", "Region" ] asg_data = [] asconn = autoscale.connect_to_region(region) autoscaling_groups = [asg for asg in asconn.get_all_groups()] for asg in autoscaling_groups: try: asg_instance_class = asconn.get_all_launch_configurations( names=[asg.launch_config_name])[0].instance_type except exception.BotoServerError as err: if err.message == "Rate exceeded": sleep(3) asg_instance_class = asconn.get_all_launch_configurations( names=[asg.launch_config_name]) asg_instance_class = asg_instance_class[0].instance_type asg_name = asg.name.replace("asg-", "") asg_lc = asg.launch_config_name asg_instances = len(asg.instances) asg_desired = asg.desired_capacity asg_min = asg.min_size asg_max = asg.max_size asg_region = region asg_data.append([ asg_name, asg_lc, asg_instances, asg_instance_class, asg_desired, asg_min, asg_max, asg_region ]) asg_data.insert(0, header) asg_data = {col[0]: col[1:] for col in zip(*asg_data)} return asg_data
def _get_conn(region, key, keyid, profile): ''' Get a boto connection to autoscale. ''' if profile: if isinstance(profile, string_types): _profile = __salt__['config.option'](profile) elif isinstance(profile, dict): _profile = profile key = _profile.get('key', None) keyid = _profile.get('keyid', None) region = _profile.get('region', None) if not region and __salt__['config.option']('asg.region'): region = __salt__['config.option']('asg.region') if not region: region = 'us-east-1' if not key and __salt__['config.option']('asg.key'): key = __salt__['config.option']('asg.key') if not keyid and __salt__['config.option']('asg.keyid'): keyid = __salt__['config.option']('asg.keyid') try: conn = autoscale.connect_to_region(region, aws_access_key_id=keyid, aws_secret_access_key=key) except boto.exception.NoAuthHandlerFound: log.error('No authentication credentials found when attempting to' ' make boto autoscale connection.') return None return conn
def test_userdata(self): autoscale_conn = autoscale.connect_to_region("eu-west-1") instance_stack_resources = self.cfn_conn.describe_stack_resource("cfn-sphere-test-instances", "lc") lc_name = \ instance_stack_resources["DescribeStackResourceResponse"]["DescribeStackResourceResult"][ "StackResourceDetail"][ "PhysicalResourceId"] lc = autoscale_conn.get_all_launch_configurations(names=[lc_name])[0] user_data_lines = lc.user_data.split('\n') self.assertEqual("#taupage-ami-config", user_data_lines[0]) self.assertTrue("application_version: 1" in user_data_lines) self.assertTrue(" stack: cfn-sphere-test-instances" in user_data_lines) dockercfg_root_index = user_data_lines.index("dockercfg:") self.assertEqual(" https://my-private-registry:", user_data_lines[dockercfg_root_index + 1]) self.assertEqual(" email: [email protected]", user_data_lines[dockercfg_root_index + 2]) self.assertEqual(" auth: my-secret-string", user_data_lines[dockercfg_root_index + 3]) environment_root_index = user_data_lines.index("environment:") self.assertEqual(" DYNAMO_DB_PREFIX: cfn-sphere-test-instances", user_data_lines[environment_root_index + 1]) notify_cfn_root_index = user_data_lines.index("notify_cfn:") self.assertEqual(" resource: asg", user_data_lines[notify_cfn_root_index + 1]) self.assertEqual(" stack: cfn-sphere-test-instances", user_data_lines[notify_cfn_root_index + 2]) ports_root_index = user_data_lines.index("ports:") self.assertEqual(" 8080: 9000", user_data_lines[ports_root_index + 1])
def index(self): asc = autoscale.connect_to_region("us-east-1") groups = asc.get_all_groups() template = lookup.get_template('cumulus.js') cherrypy.response.headers['Content-Type'] = "text/javascript" cherrypy.response.headers['Cache-Control'] = "no-cache, must-revalidate" return template.render(groups=groups, request_base=cherrypy.request.base)
def aws_conn_auto(region, profile='default'): try: conn = a.connect_to_region(region, profile_name=profile) return conn except Exception as e: logging.error( "Unable to connect to region, please investigate: {0}".format( e))
def _get_asg_instances(self) -> List[str]: asg_conn = autoscale.connect_to_region(self.node['region']) asg_name = asg_conn.get_all_autoscaling_instances( [self.node['metadata']['instance-id']], )[0].group_name log.info('Instance is part of ASG %s', asg_name) asg = asg_conn.get_all_groups([asg_name])[0] instance_ids = [ instance.instance_id for instance in asg.instances if instance.lifecycle_state == 'InService' ] return instance_ids
def get_asg_instance_ids(region): asg_conn = autoscale.connect_to_region(region) launch_config = autoscale.LaunchConfiguration(name='test_lc') asg_conn.create_launch_configuration(launch_config) asg = autoscale.AutoScalingGroup( name='test_asg', min_size=3, max_size=3, launch_config=launch_config, ) asg_conn.create_auto_scaling_group(asg) asg = asg_conn.get_all_groups([asg.name])[0] instance_ids = [instance.instance_id for instance in asg.instances] return instance_ids
def get_running_instance_count(body): """Function to get running rds instance count Args: body (json string): post body in json string Returns: TYPE: Description """ body = dict(body) conn = autoscale.connect_to_region( region_name='us-west-2', aws_access_key_id=body["aws_access_key_id"], aws_secret_access_key=body["aws_secret_access_key"]) instance_status = conn.get_all_autoscaling_instances() running_instances = 0 for item in instance_status: if (item.health_status == 'HEALTHY'): running_instances += 1 return {"data": running_instances}
def describe_group_instances(name): asg_name = "asg-%s" % name for region in [region.name for region in attributes.regions]: asconn = autoscale.connect_to_region(region) asg = asconn.get_all_groups(names=[asg_name]) if len(asg) > 0: if asg[0].name == asg_name: break try: autoscaling_group = asg[0] except IndexError as e: raise ValueError("Invalid autoscaling group name %s. Error: %s" % (asg_name, e)) instances = [ instance.instance_id for instance in autoscaling_group.instances ] return instances
def get_asg_metrics(asg, metric_name, region, namespace='Learn/Instance', statistic='Average', period=60): asc = autoscale.connect_to_region(region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) instances = asc.get_all_groups([asg])[0].instances data = [] cw = cloudwatch.connect_to_region(region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) end = datetime.datetime.now() start = end - datetime.timedelta(hours=3) instance_index = 0; for instance in instances: metrics = cw.list_metrics(metric_name=metric_name, dimensions={'InstanceId': [str(instance.instance_id)]}) points = metrics[0].query(start, end, statistic) for point in points: time_diff = point['Timestamp'] - start date_index = time_diff.total_seconds() data[int(date_index)][instance_index] = point[statistic] instance_index += 1 return data
def describe_group(name, old_method=False): if old_method: asg_name = "asg-%s" % name else: asg_name = name for region in [region.name for region in attributes.regions]: asconn = autoscale.connect_to_region(region) asg = asconn.get_all_groups(names=[asg_name]) if len(asg) > 0: if asg[0].name == asg_name: break try: autoscaling_group = asg[0] header = [autoscaling_group.name, "Information"] asg_data = [] except IndexError as e: raise ValueError("Invalid autoscaling group name %s. Error: %s" % (asg_name, e)) instances = [ "%s - %s" % (instance.instance_id, instance.health_status) for instance in autoscaling_group.instances ] asg_data.append(["ARN", autoscaling_group.autoscaling_group_arn]) asg_data.append(["Desired Capacity", autoscaling_group.desired_capacity]) asg_data.append(["LaunchConfig", autoscaling_group.launch_config_name]) asg_data.append(["Min Size", autoscaling_group.min_size]) asg_data.append(["Max Size", autoscaling_group.max_size]) asg_data.append( ["Termination Policy", str(autoscaling_group.termination_policies[0])]) asg_data.append(["Instances", ', '.join(instances)]) asg_data.insert(0, header) asg_data = {col[0]: col[1:] for col in zip(*asg_data)} return asg_data
def get_autoscale_connection(): connection = autoscale.connect_to_region(config.get_value('Boto', 'autoscale_region_name')) if connection is None: raise AutoscaleError.CannotGetConnection("cannot get autoscale connection. invalid 'autoscale_region_name'?") else: return connection
def __init__(self): use_proxy = True use_proxy_credentials = True try: PROXY_SERVER = config.get('PROXY', 'proxy') PROXY_PORT = config.get('PROXY', 'proxy_port') if PROXY_SERVER == None or PROXY_PORT == None: raise Exception print("[+] Proxy Configuration found. Using proxy.") except Exception: print("[+] No Proxy Configuration in config file. Skipping Proxy.") use_proxy = False try: CREDENTIALS = yaml.load( file(os.path.join(_CREDENTIALS_FILE_PATH, _CREDENTIALS_FILE), 'r')) except IOError as e: print("[-] Could not retrieve credentials") exit() if use_proxy: try: PROXY_USER = CREDENTIALS['http_proxy_username'] PROXY_PASSWORD = CREDENTIALS['http_proxy_password'] print("[+] Proxy Credentials found.") if PROXY_USER == None or PROXY_PASSWORD == None: raise Exception except Exception: print("[+] No Proxy Credentials found. Assuming Proxy Doesn't Require Credentials.") use_proxy_credentials = False try: AWS_CREDENTIALS = yaml.load_all(file(_AWS_CRED_FILE_PATH + _AWS_CRED_FILE, 'r')) print("[+] Retrieved AWS credentials") except IOError: print("[-] Could not retrieve AWS credentials") raise regions = self.get_regions() self.region_ec2_mapping_dict = defaultdict(list) self.region_as_mapping_dict = defaultdict(list) for creds in AWS_CREDENTIALS: for k, v in creds.items(): AWS_ACCESS_KEY_ID = creds[k]['aws_access_key_id'] AWS_SECRET_ACCESS_KEY = creds[k]['aws_secret_access_key'] for region in regions: try: print("[+] Attempting to establish an AWS Connection to {region} region.".format(region=region.name)) if use_proxy: ec2_conn = ec2.connect_to_region(region.name, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, is_secure=True, proxy=PROXY_SERVER, proxy_port=PROXY_PORT) as_conn = autoscale.connect_to_region(region.name, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, is_secure=True, proxy=PROXY_SERVER, proxy_port=PROXY_PORT) if use_proxy_credentials: ec2_conn = ec2.connect_to_region(region.name, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, is_secure=True, proxy=PROXY_SERVER, proxy_port=PROXY_PORT, proxy_user=PROXY_USER, proxy_pass=PROXY_PASSWORD) as_conn = autoscale.connect_to_region(region.name, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, is_secure=True, proxy=PROXY_SERVER, proxy_port=PROXY_PORT, proxy_user=PROXY_USER, proxy_pass=PROXY_PASSWORD) else: ec2_conn = ec2.connect_to_region(region.name, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, is_secure=True) as_conn = autoscale.connect_to_region(region.name, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, is_secure=True) except Exception: print("[-] Unable to establish communications with AWS. Exiting.") exit() self.region_ec2_mapping_dict[k].append({'region': region.name, 'connectionEC2': ec2_conn}) self.region_as_mapping_dict[k].append({'region': region.name, 'connectionAS': as_conn})
dest='threshold', default=THRESHOLD, help= 'Acceptable amount of events per process we will tolerate being backed up on' ) parser.add_option('-g', '--group', dest='group', default=GROUP_NAME, help='The autoscale group name to operate over') (options, args) = parser.parse_args() conn = connect_s3() bucket = conn.get_bucket('nlp-data') autoscale = connect_to_region('us-west-2') lastInQueue = None intervals = [] while True: group = autoscale.get_all_groups(names=[options.group])[0] inqueue = len([k for k in bucket.list(QUEUES[options.group]) ]) - 1 #because it lists itself, #lame if lastInQueue is not None and lastInQueue != inqueue: delta = (lastInQueue - inqueue) intervals += [(mins, delta * 250)] avg = numpy.mean( map(lambda x: float(x[1]) / float(x[0] * 60), intervals)) rate = ", %.3f docs/sec; %d in the last %d minute(s)" % (avg, delta * 250, mins)
if not args.image_id and not args.instance_type: print("Specify at least one of image_id or instance_type") sys.exit(0) ec2 = ec2.connect_to_region(args.region) try: if args.image_id: ec2.get_all_images(image_ids=[args.image_id]) except exception.EC2ResponseError: print( "It seems that '{0}' is not a valid image_id name or it does not exist " .format(args.image_id)) sys.exit(1) autoscale = autoscale.connect_to_region(args.region) try: as_launch_config = autoscale.get_all_launch_configurations( names=[args.launch_config_name]).pop() except IndexError: print("Couldn't found AutoScaling Launch Configuration") sys.exit(1) try: as_group = autoscale.get_all_groups(names=[args.autoscale_group_name])[0] except IndexError: print("Couldn't found autoscale group '{0}'".format( args.autoscale_group_name)) sys.exit(1)
def __init__(self, old_method=False, destroy_confirmation=True, autoscale_update=False, launchconfig_update=False, scale_name=None, name=None, process=None, platform=None, env=None, region=None, minimum=None, maximum=None, instance_class=None, desired_capacity=None, elb=None, ami_id=None, public=False): config = Config() self.old_method = old_method self.autoscale_update = autoscale_update self.launchconfig_update = launchconfig_update self.name = name self.process = process self.platform = platform self.env = env self.region = region self.instance_class = instance_class self.desired_capacity = desired_capacity self.minimum = minimum self.maximum = maximum self.connection = autoscale.connect_to_region(self.region) self.availability_zones = attributes.availability_zones[self.region] self.ami_id = ami_id if ami_id is not None else config.ami[self.region] self.public = public if scale_name is not None: self.scale_name = "asg-%s" % scale_name hostname = self.scale_name.replace("asg-", "").split("-") self.name = hostname[0] self.platform = hostname[1] self.env = hostname[3] for lc in self.get_all_launch_configs(): if scale_name in lc.name: self.launch_config_list.append(lc) if len(self.launch_config_list) > 0: self.launch_config_name = sorted( self.launch_config_list, key=lambda lc: lc.created_time)[-1].name else: self.launch_config_name = "alc1-%s" % scale_name else: for lc in self.get_all_launch_configs(): if "%s-%s-%s-%s" % (self.process, self.name, self.platform, self.env) in lc.name: self.launch_config_list.append(lc) if len(self.launch_config_list) == 0: lc_number = 1 elif len(self.launch_config_list) > 0: lc_number = len(self.launch_config_list) self.launch_config_name = "config%s-%s-%s-%s-%s" % ( lc_number, self.process, self.name, self.platform, self.env) self.scale_name = "scale-%s-%s-%s-%s" % (self.process, self.name, self.platform, self.env) self.elb = elb
def __init__(self, settings): super(ASService, self).__init__(settings) region_name = settings.get('AS', 'REGION_NAME', 'us-west-1') self.conn = autoscale.connect_to_region(region_name=region_name) assert self.conn is not None