def _make_client(self, config): """Construct a Boto3 client for talking to SSM Parameter Store """ region_name = config.get('ckanext.ssm_config.region_name', None) access_key = config.get('ckanext.ssm_config.aws_access_key_id', None) secret_key = config.get('ckanext.ssm_config.aws_secret_access_key', None) if not region_name: LOG.debug('region_name not found; attempting to auto-detect') try: region_name = get_instance_identity()['document']['region'] except Exception: LOG.warn("""Unable to determine AWS region; please specify 'ckanext.ssm_config.region_name'.""") return False LOG.info('Retrieving SSM parameters from region %s', region_name) try: self.client = boto3.client('ssm', region_name, aws_access_key_id=access_key, aws_secret_access_key=secret_key) return self.client except Exception, e: LOG.error('Failed to initialise SSM Parameter Store client: %s', e) return False
def create_repository(args): """Initial create of repository""" create_repository_url = "/".join([ES_LOCAL_URL, "_snapshot", args.repository]) # Get the region from the instance try: instance_metadata = get_instance_identity() instance_region = instance_metadata["document"]["region"] except: logging.exception("Failure getting EC2 instance data") raise # Repository data create_repository_data = { "type": "s3", "settings": {"bucket": args.s3_bucket, "region": instance_region, "base_path": args.s3_path}, } try: headers = {"content-type": "application/json"} create_repository_request = requests.put( create_repository_url, data=json.dumps(create_repository_data), headers=headers, timeout=REQUESTS_TIMEOUT ) create_repository_request.raise_for_status() except: logging.exception("Failure creating repository") raise repository_ = "Created or updated repository: %s" % args.repository return repository_
def is_first_of_asg_group(): """ Returns True if the current instance is the first instance in the sorted by instance_id ASG group. XXX: some methods can be generalized and moved to a common.py file """ # Collect together instance data try: instance_identity = get_instance_identity() instance_id = instance_identity['document']['instanceId'] instance_region = instance_identity['document']['availabilityZone'].strip()[:-1] conn = boto.ec2.connect_to_region(instance_region) instance_data = conn.get_all_instances( instance_ids=[instance_id])[0].instances[0] except boto.exception.AWSConnectionError as e: log.error("There was a problem collecting instance data, '{}'").format(e.message) return False # my autoscaling group asg_group = instance_data.tags['aws:autoscaling:groupName'] try: autoscale = boto.ec2.autoscale.connect_to_region(instance_region) group = autoscale.get_all_groups(names=[asg_group])[0] sorted_instances = sorted(group.instances, key=operator.attrgetter('instance_id')) except boto.exception.AWSConnectionError as e: log.error("There was a problem collecting instance data, '{}'").format(e.message) return False if sorted_instances[0].instance_id == instance_id: return True else: return False
def _get_instance_info(self): ''' Get instance metadata and identity data''' metadata = get_instance_metadata(timeout=1, num_retries=2) if metadata == {}: raise Exception("Should be on an EC2 instance for this inventory script to work.") identity = get_instance_identity(timeout=1, num_retries=2) return metadata, identity
def _get_instance_info(self): ''' Get instance metadata and identity data''' metadata = get_instance_metadata(timeout=1, num_retries=2) if metadata == {}: raise Exception( "Should be on an EC2 instance for this inventory script to work." ) identity = get_instance_identity(timeout=1, num_retries=2) return metadata, identity
def __init__(self, **kwargs): self.region = kwargs.get('region') self.instance_id = kwargs.get('instance_id') self.tags = kwargs.get('tags') self.inventory_filename = kwargs.get('inventory_filename', '/etc/ansible/hosts') self.inventory_tags = frozenset( kwargs.get('inventory_tags', ('StackName', 'IpaType'))) # Set these from ec2_metadata if not passed explicitly if not (self.region or self.instance_id or self.tags): inst_id_doc = get_instance_identity()['document'] self.region = inst_id_doc['region'] self.instance_id = inst_id_doc['instanceId'] self.set_tags()
def _get_node_metadata(self) -> Dict[str, Any]: node = { 'metadata': get_instance_metadata(), 'region': get_instance_identity()['document']['region'], } ec2_connection = ec2.connect_to_region(node['region']) instance_id = node['metadata']['instance-id'] instance_tags = ec2_connection.get_all_tags( filters={'resource-id': instance_id}, ) node['tags'] = { tag.name: tag.value for tag in instance_tags } return node
def get_region(self): # get from environment if self.region_name is None: self.region_name = os.environ.get('AWS_DEFAULT_REGION', None) if self.region_name is None: self.region_name = os.environ.get('AWS_REGION', None) # Attempt instance metadata if self.region_name is None and looks_like_ec2(): identity = get_instance_identity() self.region_name = identity['document']['region'] if self.region_name is None: raise OperationError("aws.region unset, and could not be determined from environment") self.region = boto.ec2.get_region(self.region_name)
def main(): options, args = parse_args() if get_usage_ratio(options.mount_point) < options.ratio: logger.info("{0} usage ratio is less than {1}. Quitting".format( options.mount_point, options.ratio)) return logger.info("Proceeding with extending media volume") aws = AWS() instance_info = utils.get_instance_identity()['document'] volume_id = create_volume(aws.ec2, options.increment, instance_info['availabilityZone']) new_device_name = '/dev/' + increment_partition_name( get_sorted_partitions()[-1]) aws.ec2.attach_volume(volume_id, instance_info['instanceId'], new_device_name) extend_lvm(new_device_name, options.logical_group, options.logical_device)
def main(): # get instance information instance_identity = get_instance_identity() instance_id = instance_identity['document']['instanceId'] region = instance_identity['document']['region'] # connect to region conn = ec2.connect_to_region(region) # search for ec2 instance tags of running host tags = conn.get_all_tags(filters={"resource-id": instance_id}) # inject tags into rows aws_tags = [] for tag in tags: row = {} row['key'] = tag.name row['value'] = tag.value aws_tags.append(row) return aws_tags
def handle_instance_identity(): ''' {'document': {'devpayProductCodes': None, 'availabilityZone': 'ap-southeast-1a', 'instanceId': 'i-807e52d7', 'region': 'ap-southeast-1', 'imageId': 'ami-a02f66f2', 'version': '2010-08-31', 'architecture': 'x86_64', 'billingProducts': None, 'kernelId': 'aki-fe1354ac', 'ramdiskId': None, 'privateIp': '10.130.77.91', 'instanceType': 't1.micro', 'pendingTime': '2013-08-22T19:24:30Z', 'accountId': '334918212912'}, 'pkcs7': '...'} ''' identity = get_instance_identity() logging.debug(pprint.pformat(identity)) logging.info('Architecture: %s' % identity['document']['architecture']) logging.info('Private IP: %s' % identity['document']['privateIp'])
def get_instance_config(): """ Use the instance metadata API to get region, instance ID """ LOG.info("Finding instance reservation ID and ENI ID...") config = {} # Find my instance ID try: identity_data = utils.get_instance_identity(timeout=2, num_retries=2) metadata = utils.get_instance_metadata(timeout=2, num_retries=2) except IndexError: # IndexError because boto throws this when it tries to parse empty response raise SystemExit( "Could not connect to instance metadata endpoint, bailing...") config["inst_id"] = identity_data["document"]["instanceId"] config["region"] = identity_data["document"]["region"] config["reservation_id"] = metadata["reservation-id"] for k, v in config.iteritems(): LOG.info("Found %s with value %s", k, v) return config
". {env_file}; sudo -E -u discovery {python} {code_dir}/manage.py showmigrations", 'registrar': ". {env_file}; sudo -E -u registrar {python} {code_dir}/manage.py showmigrations", } NGINX_ENABLE = { 'lms': "sudo ln -sf /edx/app/nginx/sites-available/lms /etc/nginx/sites-enabled/lms", 'cms': "sudo ln -sf /edx/app/nginx/sites-available/cms /etc/nginx/sites-enabled/cms", } # Max amount of time to wait for tags to be applied. MAX_BACKOFF = 120 INITIAL_BACKOFF = 1 REGION = get_instance_identity()['document']['region'] def services_for_instance(instance_id): """ Get the list of all services named by the services tag in this instance's tags. """ ec2 = boto.ec2.connect_to_region(REGION) reservations = ec2.get_all_instances(instance_ids=[instance_id]) for reservation in reservations: for instance in reservation.instances: if instance.id == instance_id: try: services = instance.tags['services'].split(',') except KeyError as ke:
for i in range(0, max(int(self.TIMEOUT / self.INTERVAL), 1)): state = self._get_elb_instance_state(instance_id, elb_name) if state == 'InService': self.logger.info("instance in service") return True else: self.logger.debug('waiting for instance') sleep(self.INTERVAL) self.logger.warning("timeout for in-service check exceeded") return False if __name__ == '__main__': region = get_instance_identity()['document']['region'] instance_id = get_instance_identity()['document']['instanceId'] config = get_config() loadbalancer_name = config['healthcheck']['loadbalancer_name'] healthchecker = ElbHealthChecker(region) is_in_service = healthchecker.is_in_service_from_elb_perspective( instance_id, loadbalancer_name) if is_in_service: sys.exit(0) else: sys.exit(1)
except: sys.stderr.write("ERROR: Can't import boto, please install it\n") sys.exit(1) # # Initialize inventory # inventory = {} inventory['_meta'] = { 'hostvars': {} } inventory['all'] = [] # Want everything but localhost used in sk8ts-ansible inventory['all_instances'] = [] try: instance_identity = utils.get_instance_identity() aws_region = instance_identity['document']['region'] except: sys.stderr.write('ERROR: Could not get region name from instance metadata\n') sys.exit(1) # # Make boto connection # # Make the connection to AWS API try: ec2conn = ec2.connect_to_region(aws_region) except: print "ERROR: Unable to connect to AWS" sys.exit(1)
def get_region(): from boto.utils import get_instance_identity return get_instance_identity()['document']['region']
'cms': "NO_EDXAPP_SUDO=1 /edx/bin/edxapp-migrate-cms --noinput --list", 'xqueue': "SERVICE_VARIANT=xqueue {python} {code_dir}/manage.py migrate --noinput --list --settings=xqueue.aws_settings", 'ecommerce': ". {env_file}; {python} {code_dir}/manage.py migrate --noinput --list", 'programs': ". {env_file}; {python} {code_dir}/manage.py migrate --noinput --list", 'insights': ". {env_file}; {python} {code_dir}/manage.py migrate --noinput --list", 'analytics_api': ". {env_file}; {python} {code_dir}/manage.py migrate --noinput --list", 'credentials': ". {env_file}; {python} {code_dir}/manage.py migrate --noinput --list", 'discovery': ". {env_file}; {python} {code_dir}/manage.py migrate --noinput --list", } HIPCHAT_USER = "******" # Max amount of time to wait for tags to be applied. MAX_BACKOFF = 120 INITIAL_BACKOFF = 1 REGION = get_instance_identity()['document']['region'] def services_for_instance(instance_id): """ Get the list of all services named by the services tag in this instance's tags. """ ec2 = boto.ec2.connect_to_region(REGION) reservations = ec2.get_all_instances(instance_ids=[instance_id]) for reservation in reservations: for instance in reservation.instances: if instance.id == instance_id: try: services = instance.tags['services'].split(',') except KeyError as ke: msg = "Tag named 'services' not found on this instance({})".format(instance_id)
#!/usr/bin/env python from boto.s3 import connect_to_region from boto.s3.key import Key from boto.utils import get_instance_identity from sys import argv from os import environ environ['S3_USE_SIGV4'] = 'true' def download_auth(bucket_name, key_name, region): conn = connect_to_region(region, calling_format = 'boto.s3.connection.OrdinaryCallingFormat') bucket = conn.get_bucket(bucket_name, validate = False) key = Key(bucket = bucket, name = key_name) print key.get_contents_as_string() if __name__ == '__main__': download_auth(argv[1], argv[2], get_instance_identity()['document']['region'])
self.logger.info("Waiting for instance {0} to become healthy in elb {1}".format(instance_id, elb_name)) for i in range(0, max(int(self.TIMEOUT / self.INTERVAL), 1)): state = self._get_elb_instance_state(instance_id, elb_name) if state == 'InService': self.logger.info("instance in service") return True else: self.logger.debug('waiting for instance') sleep(self.INTERVAL) self.logger.warning("timeout for in-service check exceeded") return False if __name__ == '__main__': region = get_instance_identity()['document']['region'] instance_id = get_instance_identity()['document']['instanceId'] config = get_config() loadbalancer_name = config['healthcheck']['loadbalancer_name'] healthchecker = ElbHealthChecker(region) is_in_service = healthchecker.is_in_service_from_elb_perspective(instance_id, loadbalancer_name) if is_in_service: sys.exit(0) else: sys.exit(1)