def main(): ec2_helper = EC2Helper() connection = ec2_helper.ec2_connection settings = {} for snapshot in connection.get_all_snapshots(owner='self'): name = snapshot.tags.get('Name') if name is None: LOG.info('Looking at {0} - None'.format(snapshot.id)) elif snapshot.status == 'completed': LOG.info('Looking at {0} - {1}'.format(snapshot.id, snapshot.tags['Name'])) if snapshot.tags['Name'].endswith('_FINAL_PRODUCTS'): settings[name[:-15]] = snapshot.id else: LOG.info('Looking at {0} - {1} which is {2}'.format( snapshot.id, snapshot.tags['Name'], snapshot.status)) ordered_dictionary = collections.OrderedDict(sorted(settings.items())) output = '\n' for key, value in ordered_dictionary.iteritems(): output += '''{0} = "{1}" '''.format(key, value) LOG.info(output)
def delete_volumes(volume_ids, force, aws_access_key_id=None, aws_secret_access_key=None): ec2_helper = EC2Helper(aws_access_key_id, aws_secret_access_key) for volume_id in volume_ids: ec2_helper.delete_volume(volume_id)
def __call__(self): """ Actually run the job """ LOGGER.info('frequency_id: {0}'.format(self._frequency_id)) ec2_helper = EC2Helper() zone = ec2_helper.get_cheapest_spot_price(self._instance_type, self._spot_price) if zone is not None: user_data_mime = self.get_mime_encoded_user_data() LOGGER.info('{0}'.format(user_data_mime)) ec2_helper.run_spot_instance( self._ami_id, self._spot_price, user_data_mime, self._instance_type, None, self._created_by, '{0}-{1}'.format(self._frequency_id, self._name), instance_details=self._instance_details, zone=zone, ephemeral=True) else: LOGGER.error('Cannot get a spot instance of {0} for ${1}'.format( self._instance_type, self._spot_price))
def start_servers( ami_id, user_data, setup_disks, instance_type, obs_id, created_by, name, instance_details, spot_price, ebs, bottom_frequency, frequency_range): LOGGER.info('obs_id: {0}, bottom_frequency: {1}, frequency_range: {2}'.format(obs_id, bottom_frequency, frequency_range)) ec2_helper = EC2Helper() zone = ec2_helper.get_cheapest_spot_price(instance_type, spot_price) if zone is not None: # Swap size if ebs is None: swap_size = 1 else: ephemeral_size = instance_details.number_disks * instance_details.size swap_size = min(int(ephemeral_size * 0.75), 16) user_data_mime = get_mime_encoded_user_data( user_data, obs_id, setup_disks, bottom_frequency, frequency_range, swap_size ) LOGGER.info('{0}'.format(user_data_mime)) ec2_helper.run_spot_instance( ami_id, spot_price, user_data_mime, instance_type, None, created_by, name + '- {0}'.format(obs_id), instance_details=instance_details, zone=zone, ebs_size=ebs, number_ebs_volumes=4, ephemeral=True) else: LOGGER.error('Cannot get a spot instance of {0} for ${1}'.format(instance_type, spot_price))
def get_snapshots(): ec2_helper = EC2Helper() connection = ec2_helper.ec2_connection snapshots = [] for snapshot in connection.get_all_snapshots(owner='self'): name = snapshot.tags.get('Name') if name is None: LOG.info('Looking at {0} - None'.format(snapshot.id)) elif snapshot.status == 'completed': LOG.info('Looking at {0} - {1}'.format(snapshot.id, snapshot.tags['Name'])) if snapshot.tags['Name'].endswith('_FINAL_PRODUCTS'): snapshots.append(str(name[:-15])) else: LOG.info('Looking at {0} - {1} which is {2}'.format(snapshot.id, snapshot.tags['Name'], snapshot.status)) return snapshots
def start_servers(ami_id, user_data, setup_disks, instance_type, obs_id, created_by, name, instance_details, spot_price): snapshot_id = OBS_IDS.get(obs_id) if snapshot_id is None: LOGGER.warning('The obs-id: {0} does not exist in the settings file') else: ec2_helper = EC2Helper() iops = None if instance_details.iops_support: iops = 500 zone = ec2_helper.get_cheapest_spot_price(instance_type, spot_price) if zone is not None: volume, snapshot_name = ec2_helper.create_volume(snapshot_id, zone, iops=iops) LOGGER.info('obs_id: {0}, volume_name: {1}'.format( obs_id, snapshot_name)) now = datetime.datetime.now() user_data_mime = get_mime_encoded_user_data( volume.id, setup_disks, user_data, now.strftime('%Y-%m-%dT%H-%M-%S')) if spot_price is not None: ec2_helper.run_spot_instance(ami_id, spot_price, user_data_mime, instance_type, volume.id, created_by, '{1}-{0}'.format( name, snapshot_name), instance_details, zone, ephemeral=True) else: LOGGER.error('Cannot get a spot instance of {0} for ${1}'.format( instance_type, spot_price))
def __call__(self): """ Actually run the job """ # Get the name of the volume ec2_helper = EC2Helper() iops = None if self._instance_details.iops_support: iops = 500 zone = ec2_helper.get_cheapest_spot_price(self._instance_type, self._spot_price) if zone is not None: volume, snapshot_name = ec2_helper.create_volume(self._snapshot_id, zone, iops=iops) LOGGER.info('obs_id: {0}, volume_name: {1}'.format( self._obs_id, snapshot_name)) user_data_mime = self.get_mime_encoded_user_data(volume.id) if self._spot_price is not None: ec2_helper.run_spot_instance(self._ami_id, self._spot_price, user_data_mime, self._instance_type, volume.id, self._created_by, '{1}-{2}-{0}'.format( self._name, snapshot_name, self._counter), self._instance_details, zone, ephemeral=True) else: LOGGER.error('Cannot get a spot instance of {0} for ${1}'.format( self._instance_type, self._spot_price))
import boto3 import datetime import json from ec2_helper import EC2Helper from elb_helper import ELBHelper elb_helper = ELBHelper() ec2_helper = EC2Helper() s3 = boto3.client('s3') def lambda_handler(event, context): # The purpose of this function is to place a json file in s3 that contains # the various resources associated with ec2 instances that have a certain # tag. For example, if you have 3 ec2 instances with Name:Client1 in # us-east1, then this lambda function should print those instances and # associated ELBS/snapshots/loadbalancers to the file. # # The expected downstream use of this s3 file is AWS Glue and Athena # Inputs sent during lambda invoke tagName = event['TagName'] tagValue = event['TagValue'] bucket = event['DestinationBucket'] region = event['Region'] account = event['Account'] role = event['LambdaRole'] #helper function to set ELB/EC2 API regions # NOTE: We do not change s3 region, assumes lambda runs in same region as bucket elb_helper.setConfig(region, account, role)
def __awsInit(self, credentials): if not ('secret_key' in credentials and 'access_key' in credentials): return False self.ec2 = EC2Helper(credentials['access_key'], credentials['secret_key'])