def create_volume(module, ec2, zone): changed = False name = module.params.get('name') id = module.params.get('id') instance = module.params.get('instance') iops = module.params.get('iops') encrypted = module.params.get('encrypted') volume_size = module.params.get('volume_size') volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = 'io1' volume = get_volume(module, ec2) if volume is None: try: if boto_supports_volume_encryption(): volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) changed = True else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) changed = True while volume.status != 'available': time.sleep(3) volume.update() if name: ec2.create_tags([volume.id], {"Name": name}) except boto.exception.BotoServerError, e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
def create_box(): old_ids = set(i.id for i in ec2.get_only_instances()) machine = ec2.run_instances(AMI_ID, key_name=KEYNAME, min_count=NODE_COUNT, max_count=NODE_COUNT, security_groups=[SECURITY_GROUP,], instance_type=os.environ.get('EC2_INSTANCE_TYPE', 'm3.medium')) new_instances = [i for i in ec2.get_only_instances() if i.id not in old_ids] for new_instance in new_instances: print("new instance:", new_instance.id) ec2.create_tags([new_instance.id], {"billingProject": "aei"}) is_running = [False] * len(new_instances) while not all(is_running): for count, newinstance in enumerate(new_instances): is_running[count] = new_instance.state == u'running' time.sleep(3) for new_instance in new_instances: new_instance.update() is_reachable = [False] * len(new_instances) while not all(is_reachable): instance_ids=[new_instance.id for new_instance in new_instances] inst_statuses = ec2.get_all_instance_status(instance_ids=instance_ids) is_reachable = [inst_status.system_status.details['reachability'] != 'passed' for inst_status in inst_statuses] time.sleep(3) time.sleep(1) for new_instance in new_instances: assert new_instance.public_dns_name print("public dns name:", new_instance.public_dns_name) return new_instances
def create_volume(module, ec2, zone): changed = False name = module.params.get('name') iops = module.params.get('iops') encrypted = module.params.get('encrypted') volume_size = module.params.get('volume_size') volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = 'io1' volume = get_volume(module, ec2) if volume is None: try: if boto_supports_volume_encryption(): volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) changed = True else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) changed = True while volume.status != 'available': time.sleep(3) volume.update() if name: ec2.create_tags([volume.id], {"Name": name}) except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) return volume, changed
def create_volume(module, ec2, zone): changed = False name = module.params.get("name") id = module.params.get("id") instance = module.params.get("instance") iops = module.params.get("iops") encrypted = module.params.get("encrypted") volume_size = module.params.get("volume_size") volume_type = module.params.get("volume_type") snapshot = module.params.get("snapshot") # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = "io1" volume = get_volume(module, ec2) if volume is None: try: if boto_supports_volume_encryption(): volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) changed = True else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) changed = True while volume.status != "available": time.sleep(3) volume.update() if name: ec2.create_tags([volume.id], {"Name": name}) except boto.exception.BotoServerError, e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
def tag_resources(self, stack, hosts=None, volumes=None): if hosts is None: hosts = [] if volumes is None: volumes = [] ec2 = self.connect_ec2() # Tag each volume with a unique name. This makes it easier to view # the volumes in the AWS console for v in volumes: name = 'stackdio::volume::{0!s}'.format(v.id) logger.info('tagging volume {0}: {1}'.format(v.volume_id, name)) ec2.create_tags([v.volume_id], { 'Name': name, }) # Next tag all resources with a set of common fields resource_ids = [v.volume_id for v in volumes] + \ [h.instance_id for h in hosts] # filter out empty strings resource_ids = filter(None, resource_ids) if resource_ids: logger.info('tagging {0!r}'.format(resource_ids)) ec2.create_tags(resource_ids, stack.get_tags())
def tag_resources(self, stack, hosts=None, volumes=None): if hosts is None: hosts = [] if volumes is None: volumes = [] ec2 = self.connect_ec2() # First tag each volume with a unique name. This makes it easier to view # the volumes in the AWS console for v in volumes: # Only tag the if the volume isn't null / empty if v.volume_id: name = 'stackdio::volume::{0!s}'.format(v.id) logger.debug('tagging volume {0}: {1}'.format(v.volume_id, name)) ec2.create_tags([v.volume_id], { 'Name': name, }) # Next tag ALL resources with a set of common fields resource_ids = [v.volume_id for v in volumes] + [h.instance_id for h in hosts] # filter out empty strings resource_ids = [rid for rid in resource_ids if rid] if resource_ids: logger.debug('tagging {0!r}'.format(resource_ids)) # Replace None with the empty string tags = {} for key, value in stack.get_tags().items(): tags[key] = '' if value is None else value ec2.create_tags(resource_ids, tags)
def create_vpn_connection(module, ec2, vpc_conn): """ Creates a vpn connection module: Ansible module object ec2: authenticated ec2 connection object vpc_conn: authenticated VPCConnection connection object name: the option set name type: the type of vpn connection customer_gateway_id: the id of the customer gateway vpn_gateway_id: the id of the vpn gateway static_routes_only: indicates whether the VPN connection requires static routes. wait: True/False indicating to wait for the VPN to be in the available state wait_timeout: The timeout for waiting Returns a tuple """ name = module.params.get('name') type = module.params.get('type') customer_gateway_id = module.params.get('customer_gateway_id') vpn_gateway_id = module.params.get('vpn_gateway_id') static_routes_only = module.params.get('static_routes_only') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') vpn_connections = vpc_conn.get_all_vpn_connections() for vpn_connection in vpn_connections: filters = {'resource-id': vpn_connection.id} gettags = ec2.get_all_tags(filters=filters) tagdict = {} for tag in gettags: tagdict[tag.name] = tag.value if ('Name', name) in set(tagdict.items()): changed = False if type != vpn_connection.type: changed = True if customer_gateway_id != vpn_connection.customer_gateway_id: changed = True if vpn_gateway_id != vpn_connection.vpn_gateway_id: changed = True # cant seem to get the routing type to check if that has changed if changed: module.fail_json(msg='VPN connection cannot be modified') return (vpn_connection.id, False) vpn_connection = vpc_conn.create_vpn_connection(type, customer_gateway_id, vpn_gateway_id, static_routes_only) ec2.create_tags(vpn_connection.id, {'Name': name}) if wait: await_vpn_connection_state(module, vpc_conn, vpn_connection, 'available', wait_timeout) return (vpn_connection.id, True)
def tag_instances(): for region in regions(): region_name = region.name if region_name != 'cn-north-1' and region_name != 'us-gov-west-1': # Skip these regions print "Region: %s" % (region_name) # Connect ec2 = boto.ec2.connect_to_region( region_name, aws_access_key_id=access_key, aws_secret_access_key=access_secret) cloudtrail = boto.cloudtrail.connect_to_region( region_name, aws_access_key_id=access_key, aws_secret_access_key=access_secret) reservations = ec2.get_all_reservations() tags = ec2.get_all_tags() for reservation in reservations: for instance in reservation.instances: events_dict = cloudtrail.lookup_events( lookup_attributes=[{ 'AttributeKey': 'ResourceName', 'AttributeValue': instance.id }]) if len(events_dict['Events']) == 0: print("No CloudTrail events for instance: %s - %s" % (instance.id, instance.instance_type)) else: for data in events_dict['Events']: json_file = json.loads(data['CloudTrailEvent']) # Only interested in RunInstances (e.g. created instances) to find owners # There's also StartInstances, but that event is fired if someone else # restarts an instance, which isn't what we're really looking for if json_file['eventName'] == 'RunInstances': arn = json_file['userIdentity']['arn'] username = json_file['userIdentity'][ 'userName'] user_type = json_file['userIdentity']['type'] print( "Tagging Instance: %s, Username: %s, ARN: %s, Type: %s, eventName: %s" % (instance.id, username, arn, user_type, json_file['eventName'])) # Tag the instance ec2.create_tags( [instance.id], { "IAM Username": username, "IAM ARN": arn, "IAM Type": user_type }) # CloudTrail calls are throttled if there's more than 1 req/s time.sleep(1)
def test_ami_filter_by_empty_tag(): ec2 = boto3.resource("ec2", region_name="us-west-1") client = boto3.client("ec2", region_name="us-west-1") fake_images = [] instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0] for i in range(10): image = client.create_image( InstanceId=instance.instance_id, Name="MyAMI{}".format(i), Description="Test", ) ec2.create_tags( Resources=[image["ImageId"]], Tags=[ { "Key": "Base_AMI_Name", "Value": "Deep Learning Base AMI (Amazon Linux 2) Version 31.0", }, { "Key": "OS_Version", "Value": "AWS Linux 2" }, ], ) fake_images.append(image) # Add release tags to some of the images in the middle for image in fake_images[3:6]: ec2.create_tags(Resources=[image["ImageId"]], Tags=[{ "Key": "RELEASE", "Value": "" }]) images_filter = [ { "Name": "tag:Base_AMI_Name", "Values": ["Deep Learning Base AMI (Amazon Linux 2) Version 31.0"], }, { "Name": "tag:OS_Version", "Values": ["AWS Linux 2"] }, { "Name": "tag:RELEASE", "Values": [""] }, ] assert len(client.describe_images(Filters=images_filter)["Images"]) == 3
def create_submaster(): global subMasterCounter subMasterCounter = int(subMasterCounter) + 1 ec2 = boto.connect_ec2() reservation = ec2.run_instances(image_id='ami-d05223b8', key_name='keyy', subnet_id='subnet-aaeb39f3', instance_type='t2.micro', instance_profile_name='workerProfile', user_data=str(subMasterCounter)) ec2.create_tags(reservation.instances[0].id,{"Name":str(subMasterCounter)+'Submaster',"type":'Submaster',"idnumber":subMasterCounter})
def create_volume(module, ec2, zone): name = module.params.get('name') id = module.params.get('id') instance = module.params.get('instance') iops = module.params.get('iops') encrypted = module.params.get('encrypted') volume_size = module.params.get('volume_size') volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = 'io1' if instance == 'None' or instance == '': instance = None volume = get_volume(module, ec2) if volume: if volume.attachment_state() is not None: if instance is None: return volume adata = volume.attach_data if adata.instance_id != instance: module.fail_json( msg="Volume %s is already attached to another instance: %s" % (name or id, adata.instance_id)) else: module.exit_json( msg="Volume %s is already mapped on instance %s: %s" % (name or id, adata.instance_id, adata.device), volume_id=id, device=adata.device, changed=False) else: try: if boto_supports_volume_encryption(): volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) while volume.status != 'available': time.sleep(3) volume.update() if name: ec2.create_tags([volume.id], {"Name": name}) except boto.exception.BotoServerError, e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
def create_workers(subMasterID,workers_no): ec2 = boto.connect_ec2() reservation = ec2.run_instances(image_id='ami-ea552482', min_count=workers_no, max_count=workers_no,key_name='keyy', subnet_id='subnet-aaeb39f3', instance_type='t2.micro', instance_profile_name='workerProfile', user_data=str(subMasterID)) ids = [] for instance in reservation.instances: ids.append(instance.id) print ids #for instance in reservation: ec2.create_tags(ids,{"Name":str(subMasterID)+'Worker',"type":'Worker',"idnumber":subMasterID})
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( resource = dict(required=True), tags = dict(), state = dict(default='present', choices=['present', 'absent', 'list']), ) ) module = AnsibleModule(argument_spec=argument_spec) resource = module.params.get('resource') tags = module.params.get('tags') state = module.params.get('state') ec2 = ec2_connect(module) # We need a comparison here so that we can accurately report back changed status. # Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate. filters = {'resource-id' : resource} gettags = ec2.get_all_tags(filters=filters) dictadd = {} dictremove = {} baddict = {} tagdict = {} for tag in gettags: tagdict[tag.name] = tag.value if state == 'present': if not tags: module.fail_json(msg="tags argument is required when state is present") if set(tags.items()).issubset(set(tagdict.items())): module.exit_json(msg="Tags already exists in %s." %resource, changed=False) else: for (key, value) in set(tags.items()): if (key, value) not in set(tagdict.items()): dictadd[key] = value tagger = ec2.create_tags(resource, dictadd) gettags = ec2.get_all_tags(filters=filters) module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True) if state == 'absent': if not tags: module.fail_json(msg="tags argument is required when state is absent") for (key, value) in set(tags.items()): if (key, value) not in set(tagdict.items()): baddict[key] = value if set(baddict) == set(tags): module.exit_json(msg="Nothing to remove here. Move along.", changed=False) for (key, value) in set(tags.items()): if (key, value) in set(tagdict.items()): dictremove[key] = value tagger = ec2.delete_tags(resource, dictremove) gettags = ec2.get_all_tags(filters=filters) module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True) if state == 'list': module.exit_json(changed=False, tags=tagdict) sys.exit(0)
def create_volume(module, ec2, zone): name = module.params.get('name') id = module.params.get('id') instance = module.params.get('instance') iops = module.params.get('iops') encrypted = module.params.get('encrypted') volume_size = module.params.get('volume_size') volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = 'io1' if instance == 'None' or instance == '': instance = None volume = get_volume(module, ec2) if volume: if volume.attachment_state() is not None: if instance is None: return volume adata = volume.attach_data if adata.instance_id != instance: module.fail_json(msg = "Volume %s is already attached to another instance: %s" % (name or id, adata.instance_id)) else: module.exit_json(msg="Volume %s is already mapped on instance %s: %s" % (name or id, adata.instance_id, adata.device), volume_id=id, device=adata.device, changed=False) else: try: if boto_supports_volume_encryption(): volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) while volume.status != 'available': time.sleep(3) volume.update() if name: ec2.create_tags([volume.id], {"Name": name}) except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def copy_image(module, ec2): """ Copies an AMI module : AnsibleModule object ec2: ec2 connection object """ tags = module.params.get('tags') params = { 'SourceRegion': module.params.get('source_region'), 'SourceImageId': module.params.get('source_image_id'), 'Name': module.params.get('name'), 'Description': module.params.get('description'), 'Encrypted': module.params.get('encrypted'), } if module.params.get('kms_key_id'): params['KmsKeyId'] = module.params.get('kms_key_id') try: image_id = ec2.copy_image(**params)['ImageId'] if module.params.get('wait'): ec2.get_waiter('image_available').wait(ImageIds=[image_id]) if module.params.get('tags'): ec2.create_tags(Resources=[image_id], Tags=[{ 'Key': k, 'Value': v } for k, v in module.params.get('tags').items()]) module.exit_json(changed=True, image_id=image_id) except WaiterError as we: module.fail_json( msg= 'An error occurred waiting for the image to become available. (%s)' % str(we), exception=traceback.format_exc()) except ClientError as ce: module.fail_json(msg=ce.message) except NoCredentialsError: module.fail_json( msg='Unable to authenticate, AWS credentials are invalid.') except Exception as e: module.fail_json(msg='Unhandled exception. (%s)' % str(e))
def create_volume(module, ec2, zone): changed = False name = module.params.get('name') iops = module.params.get('iops') encrypted = module.params.get('encrypted') kms_key_id = module.params.get('kms_key_id') volume_size = module.params.get('volume_size') volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') tags = module.params.get('tags') # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = 'io1' volume = get_volume(module, ec2) if volume is None: try: if boto_supports_volume_encryption(): if kms_key_id is not None: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted, kms_key_id) else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) changed = True else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) changed = True while volume.status != 'available': time.sleep(3) volume.update() if name: tags["Name"] = name if tags: ec2.create_tags([volume.id], tags) except boto.exception.BotoServerError as e: module.fail_json_aws(e) return volume, changed
def register_volumes_for_delete(self, hosts): ec2 = self.connect_ec2() # for each host, modify the instance attribute to enable automatic # volume deletion automatically when the host is terminated for h in hosts: if not h.instance_id: logger.info('Host {0} has no instance ID...skipping volume ' 'delete.'.format(h)) continue # get current block device mappings _, devices = ec2.get_instance_attribute( h.instance_id, 'blockDeviceMapping').popitem() # find those devices that aren't already registered for deletion # and build a list of the modify strings mods = [] for device_name, device in devices.items(): if not device.delete_on_termination: mods.append('{0}=true'.format(device_name)) # use the modify strings to change the existing volumes flag if mods: ec2.modify_instance_attribute(h.instance_id, 'blockDeviceMapping', mods) # for each volume, rename them so we can create new volumes with # the same now, just in case for v in h.volumes.all(): if not v.volume_id: logger.warning( '{0!r} missing volume_id. Skipping delete retag.'. format(v)) continue name = 'stackdio::volume::{0!s}-DEL-{1}'.format( v.id, uuid4().hex) logger.info('tagging volume {0}: {1}'.format( v.volume_id, name)) ec2.create_tags([v.volume_id], { 'Name': name, })
def register_volumes_for_delete(self, hosts): ec2 = self.connect_ec2() # for each host, modify the instance attribute to enable automatic # volume deletion automatically when the host is terminated for h in hosts: if not h.instance_id: logger.info('Host {0} has no instance ID...skipping volume ' 'delete.'.format(h)) continue # get current block device mappings _, devices = ec2.get_instance_attribute( h.instance_id, 'blockDeviceMapping').popitem() # find those devices that aren't already registered for deletion # and build a list of the modify strings mods = [] for device_name, device in devices.iteritems(): if not device.delete_on_termination: mods.append('{0}=true'.format(device_name)) # use the modify strings to change the existing volumes flag if mods: ec2.modify_instance_attribute(h.instance_id, 'blockDeviceMapping', mods) # for each volume, rename them so we can create new volumes with # the same now, just in case for v in h.volumes.all(): if not v.volume_id: logger.warn('{0!r} missing volume_id. Skipping delete ' 'retag.'.format(v)) continue name = 'stackdio::volume::{0!s}-DEL-{1}'.format(v.id, uuid4().hex) logger.info('tagging volume {0}: {1}'.format(v.volume_id, name)) ec2.create_tags([v.volume_id], { 'Name': name, })
def run_instance(obs_worker_id): global print_only if not print_only: # WARNING: Do NOT start more than 1 instance at a time here! # Multilaunching is managed at higher level to create unique tags etc. instances_min = 1 instances_max = 1 worker_customization = "#!/bin/sh\nhostname " + obs_worker_id +\ ";echo " + obs_worker_id + " > /etc/HOSTNAME;" global aws_ami_image global aws_keypair_name global aws_security_group global aws_instance_type reservation = ec2.run_instances(image_id=aws_ami_image, min_count=instances_min, max_count=instances_max, key_name=aws_keypair_name, security_groups=[aws_security_group], user_data=worker_customization, instance_type=aws_instance_type) #placement='eu-west-1b') instance = reservation.instances[0] ec2.create_tags([instance.id], {'Name': obs_worker_id}) global elastic_build_hosts elastic_build_hosts.append({ 'instance_name': obs_worker_id, 'instance_id': instance.id, 'instance_type': instance.instance_type, 'cpu': "-", 'disk': "-", 'instance_state': "SPAWNING", 'launch_time': 0, 'time_left': 60, 'ip': "-", 'workers': [] }) return
def tag_volume(ec2, volume_id, name): tags = { 'Name' : name } print 'Tagging {0} with Name={1}...'.format(volume_id, name) if ec2.create_tags(volume_id, tags): print ' * Done.' else: print ' * Failed.' print '==================' return
def createInstance(instance_name): ec2 = boto.ec2.connect_to_region('us-west-2', **auth) try: # ami id of the instance as image to spin new ec2 (eg ami-g284cd96) image_id = '<ami-id>' # create key and security grp in advance reservations = ec2.run_instances( image_id, key_name='<key_name>', instance_type='t2.micro', security_groups=['<security-group-name>']) instance = reservations.instances[0] while instance.state != 'running': time.sleep(5) instance.update() ec2.create_tags([instance.id], {'Name': instance_name}) return instance.public_dns_name except Exception, e3: error3 = "Error3: %s" % str(e3) return error3
def run_instance(obs_worker_id): global print_only if not print_only: # WARNING: Do NOT start more than 1 instance at a time here! # Multilaunching is managed at higher level to create unique tags etc. instances_min = 1 instances_max = 1 worker_customization = "#!/bin/sh\nhostname " + obs_worker_id +\ ";echo " + obs_worker_id + " > /etc/HOSTNAME;" global aws_ami_image global aws_keypair_name global aws_security_group global aws_instance_type reservation = ec2.run_instances( image_id = aws_ami_image, min_count = instances_min, max_count = instances_max, key_name = aws_keypair_name, security_groups = [aws_security_group], user_data = worker_customization, instance_type = aws_instance_type) #placement='eu-west-1b') instance = reservation.instances[0] ec2.create_tags([instance.id],{'Name':obs_worker_id}) global elastic_build_hosts elastic_build_hosts.append({'instance_name':obs_worker_id, 'instance_id':instance.id, 'instance_type':instance.instance_type, 'cpu':"-", 'disk':"-", 'instance_state':"SPAWNING", 'launch_time':0, 'time_left':60, 'ip':"-", 'workers':[]}) return
def set_metadata(ec2, instance_id, zone, metadata): if 'instance' in metadata: instance_metadata = metadata['instance'] resources = [instance_id] # append zone to the name before tagging if 'Name' in instance_metadata and zone != '': instance_metadata['Name'] += ' - ' + zone if not ec2.create_tags(resources, instance_metadata): print >> sys.stderr, "Couldn't tag instance: " + instance_id raise Exception("Couldn't tag instance: " + instance_id); # this doesn't seem inefficient - should be a way to query instance volumes directly volumes = [v.id for v in ec2.get_all_volumes() if v.attach_data.instance_id == instance_id] # metadata keys are case-sensitive - we assume that if the user wants to tag the name # of assets, they've used 'Name' because that's the only one that works if 'Name' in instance_metadata: if not ec2.create_tags(volumes, {'Name': instance_metadata['Name']}): print >> sys.stderr, "Couldn't tag volumes with instance name: " + instance_metadata['Name'] raise Exception("Couldn't tag volumes with instance name: " + instance_metadata['Name']);
def ec2_run_instances(ami_name, tag_name): ec2 = _get_ec2_connection() ami = fabtaskit.amis.get(ami_name) if ami is None: abort("Couldn't find ami conf with name: {}".format(ami_name)) kwargs = ami.copy() kwargs['key_name'] = os.environ['AWS_KEYPAIR_NAME'] reservation = ec2.run_instances(**kwargs) instance = reservation.instances[0] ec2.create_tags([instance.id], {'Name': tag_name}) tries = 5 booted = False while not booted and tries != 0: time.sleep(5) for r in ec2.get_all_instances(): if r.id == reservation.id: print_ec2(r.instances[0]) booted = True break tries -= 1 if not booted: abort("Couldn't find if instance booted")
def copy_image(ec2, module): """ Copies an AMI module : AnsibleModule object ec2: ec2 connection object """ tags = module.params.get('tags') params = { 'SourceRegion': module.params.get('source_region'), 'SourceImageId': module.params.get('source_image_id'), 'Name': module.params.get('name'), 'Description': module.params.get('description'), 'Encrypted': module.params.get('encrypted'), # 'KmsKeyId': module.params.get('kms_key_id') } if module.params.get('kms_key_id'): params['KmsKeyId'] = module.params.get('kms_key_id') try: image_id = ec2.copy_image(**params)['ImageId'] if module.params.get('wait'): ec2.get_waiter('image_available').wait(ImageIds=[image_id]) if module.params.get('tags'): ec2.create_tags(Resources=[image_id], Tags=[{ 'Key': k, 'Value': v } for k, v in module.params.get('tags').items()]) module.exit_json(changed=True, image_id=image_id) except ClientError as ce: module.fail_json(msg=ce) except NoCredentialsError: module.fail_json(msg="Unable to locate AWS credentials") except Exception as e: module.fail_json(msg=str(e))
def copy_image(module, ec2): """ Copies an AMI module : AnsibleModule object ec2: ec2 connection object """ tags = module.params.get('tags') params = {'SourceRegion': module.params.get('source_region'), 'SourceImageId': module.params.get('source_image_id'), 'Name': module.params.get('name'), 'Description': module.params.get('description'), 'Encrypted': module.params.get('encrypted'), } if module.params.get('kms_key_id'): params['KmsKeyId'] = module.params.get('kms_key_id') try: image_id = ec2.copy_image(**params)['ImageId'] if module.params.get('wait'): ec2.get_waiter('image_available').wait(ImageIds=[image_id]) if module.params.get('tags'): ec2.create_tags( Resources=[image_id], Tags=[{'Key' : k, 'Value': v} for k,v in module.params.get('tags').items()] ) module.exit_json(changed=True, image_id=image_id) except WaiterError as we: module.fail_json(msg='An error occurred waiting for the image to become available. (%s)' % str(we), exception=traceback.format_exc()) except ClientError as ce: module.fail_json(msg=ce.message) except NoCredentialsError: module.fail_json(msg='Unable to authenticate, AWS credentials are invalid.') except Exception as e: module.fail_json(msg='Unhandled exception. (%s)' % str(e))
def copy_image(ec2, module): """ Copies an AMI module : AnsibleModule object ec2: ec2 connection object """ tags = module.params.get('tags') params = {'SourceRegion': module.params.get('source_region'), 'SourceImageId': module.params.get('source_image_id'), 'Name': module.params.get('name'), 'Description': module.params.get('description'), 'Encrypted': module.params.get('encrypted'), # 'KmsKeyId': module.params.get('kms_key_id') } if module.params.get('kms_key_id'): params['KmsKeyId'] = module.params.get('kms_key_id') try: image_id = ec2.copy_image(**params)['ImageId'] if module.params.get('wait'): ec2.get_waiter('image_available').wait(ImageIds=[image_id]) if module.params.get('tags'): ec2.create_tags( Resources=[image_id], Tags=[{'Key' : k, 'Value': v} for k,v in module.params.get('tags').items()] ) module.exit_json(changed=True, image_id=image_id) except ClientError as ce: module.fail_json(msg=ce) except NoCredentialsError: module.fail_json(msg="Unable to locate AWS credentials") except Exception as e: module.fail_json(msg=str(e))
def tag_resources(self, stack, hosts=None, volumes=None): if hosts is None: hosts = [] if volumes is None: volumes = [] ec2 = self.connect_ec2() # First tag each volume with a unique name. This makes it easier to view # the volumes in the AWS console for v in volumes: # Only tag the if the volume isn't null / empty if v.volume_id: name = 'stackdio::volume::{0!s}'.format(v.id) logger.debug('tagging volume {0}: {1}'.format( v.volume_id, name)) ec2.create_tags([v.volume_id], { 'Name': name, }) # Next tag ALL resources with a set of common fields resource_ids = [v.volume_id for v in volumes] + [h.instance_id for h in hosts] # filter out empty strings resource_ids = [rid for rid in resource_ids if rid] if resource_ids: logger.debug('tagging {0!r}'.format(resource_ids)) # Replace None with the empty string tags = {} for key, value in stack.get_tags().items(): tags[key] = '' if value is None else value ec2.create_tags(resource_ids, tags)
def create_instances(region=None, instance_names=[], pg_name='', image_id='', instance_type='', key_name='', security_group_ids=[], subnet_id=None, ipv6_address_count=0, ebs_optimized=None, min_count=1, max_count=1): '''Create EC2 instance. Parameters: region : string, the AWS region. instance_names : list, the list of instnace name for creating instances. pg_name : string, the name of placement group for creating instances in. ...... Reture values: True : if request succeed. False : if request failed. ''' # check inputs if min_count > max_count: print 'Invalid Inputs: parameter "min_count" ({0}) can not be greater than "max_count" ({1}).'.format( min_count, max_count) return False if len(instance_names) != max_count: print 'Invalid Inputs: parameter "instance_names" provided {0} name(s), however "max_count" ({1}) name(s) are required.'.format( len(instance_names), max_count) return False if not key_name: key_name = EC2CFG['KEY_NAME'] # connect to resource ec2 = boto3.resource('ec2') # check if instance already exists instance_iterator = ec2.instances.filter(Filters=[{ 'Name': 'tag:Name', 'Values': instance_names }]) instance_list = list(instance_iterator) if instance_list: for instance in instance_list: print '%s, known as %s, already exists.' % (instance, [ x['Value'] for x in instance.tags if x['Key'] == 'Name' ]) print 'Exit without creating any instance.' return False # launch instance print '1. Creating instance: %s' % (instance_names) kwargs = {} kwargs['ImageId'] = image_id kwargs['InstanceType'] = instance_type kwargs['KeyName'] = key_name kwargs['SecurityGroupIds'] = security_group_ids kwargs['SubnetId'] = subnet_id kwargs['Ipv6AddressCount'] = ipv6_address_count kwargs['MinCount'] = min_count kwargs['MaxCount'] = max_count kwargs['Placement'] = {'GroupName': pg_name} if ebs_optimized in (True, False): kwargs['EbsOptimized'] = ebs_optimized print 'kwargs = %s' % (kwargs) try: instance_list = ec2.create_instances(DryRun=True, **kwargs) except ClientError as e: if 'DryRunOperation' not in str(e): print e return False try: instance_list = ec2.create_instances(**kwargs) print(instance_list) except ClientError as e: raise # set instance name print '2. Creating tag as instance name' for (instance, instance_name) in zip(instance_list, instance_names): print '%s {\'Name\': %s}' % (instance, instance_name) ec2.create_tags(Resources=[instance.id], Tags=[{ 'Key': 'Name', 'Value': instance_name }]) # waiting for running print '3. Waiting instance state become running' for instance in instance_list: print "{0}: Instance State: {1}".format(instance, instance.state['Name']) while instance.state['Name'] == u'pending': time.sleep(10) instance.load() print "{0}: Instance State: {1}".format(instance, instance.state['Name']) print 'create_instances() finished' return True
# Check up on its status every so often status = instance.update() while status == 'pending': time.sleep(1) sys.stdout.write('.') sys.stdout.flush() status = instance.update() print(u".") if status != 'running': print('Instance ' + instance.id + ' never reached status "running". Instance status: ' + status) exit(1) print(u"Tagging instance") ec2.create_tags([instance.id],{"Name": "VoodooVPN"}) print(u"VPN instance created and now running") results = {"region_name":region_name, "instance_id":instance.id, "public_dns_name":instance.public_dns_name, "securitygroup_id":voodoovpngroup.id, "IPSEC_PSK":IPSEC_PSK, "VPN_USER":VPN_USER, "VPN_PASSWORD":VPN_PASSWORD} print(results) # return region_name, instance id, PSK, user/pass creds
def create_image(module, ec2): """ Creates new AMI module : AnsibleModule object ec2: authenticated ec2 connection object """ instance_id = module.params.get('instance_id') name = module.params.get('name') wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) description = module.params.get('description') architecture = module.params.get('architecture') kernel_id = module.params.get('kernel_id') root_device_name = module.params.get('root_device_name') virtualization_type = module.params.get('virtualization_type') no_reboot = module.params.get('no_reboot') device_mapping = module.params.get('device_mapping') tags = module.params.get('tags') launch_permissions = module.params.get('launch_permissions') try: params = {'name': name, 'description': description} images = ec2.get_all_images(filters={'name': name}) if images and images[0]: # ensure that launch_permissions are up to date update_image(module, ec2, images[0].id) bdm = None if device_mapping: bdm = BlockDeviceMapping() for device in device_mapping: if 'device_name' not in device: module.fail_json(msg='Device name must be set for volume') device_name = device['device_name'] del device['device_name'] bd = BlockDeviceType(**device) bdm[device_name] = bd if instance_id: params['instance_id'] = instance_id params['no_reboot'] = no_reboot if bdm: params['block_device_mapping'] = bdm image_id = ec2.create_image(**params) else: params['architecture'] = architecture params['virtualization_type'] = virtualization_type if kernel_id: params['kernel_id'] = kernel_id if root_device_name: params['root_device_name'] = root_device_name if bdm: params['block_device_map'] = bdm image_id = ec2.register_image(**params) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) # Wait until the image is recognized. EC2 API has eventual consistency, # such that a successful CreateImage API call doesn't guarantee the success # of subsequent DescribeImages API call using the new image id returned. for i in range(wait_timeout): try: img = ec2.get_image(image_id) if img.state == 'available': break elif img.state == 'failed': module.fail_json(msg="AMI creation failed, please see the AWS console for more details") except boto.exception.EC2ResponseError as e: if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1: module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer " "wait_timeout may help. %s: %s" % (e.error_code, e.error_message)) finally: time.sleep(1) if img.state != 'available': module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.") if tags: try: ec2.create_tags(image_id, tags) except boto.exception.EC2ResponseError as e: module.fail_json(msg="Image tagging failed => %s: %s" % (e.error_code, e.error_message)) if launch_permissions: try: img = ec2.get_image(image_id) img.set_launch_permissions(**launch_permissions) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id) module.exit_json(msg="AMI creation operation complete", changed=True, **get_ami_info(img))
ZONE = 'us-east-1a' DEVICE = '/dev/xvdcz' MOUNT = '/tmp/xvdcz' DISK_SZ = 10 instance_id = boto.utils.get_instance_metadata()['instance-id'] print("running on instance " + instance_id) ec2 = boto.ec2.connect_to_region(REGION) print("creating volume...") vol = ec2.create_volume(DISK_SZ, ZONE, volume_type='gp2') _status_err_exit(vol, 'available', 'creating volume') print("created volume " + vol.id) print("adding tags...") ec2.create_tags([vol.id], {"Name": 'jbox_user_disk_template'}) print("attaching at " + DEVICE + " ...") ec2.attach_volume(vol.id, instance_id, DEVICE) if (not _wait_for_status(vol, 'in-use')) or (not _wait_for_device(DEVICE)): _err_exit("attaching at " + DEVICE) _sh_err_exit(lambda: sh.sudo.mkfs(DEVICE, t="ext4"), 'making ext4 file system') if not os.path.exists(MOUNT): os.makedirs(MOUNT) _sh_err_exit(lambda: sh.sudo.mount(DEVICE, MOUNT), 'mounting device at ' + MOUNT) _sh_err_exit(lambda: sh.sudo.chown('-R', str(os.getuid())+':'+str(os.getgid()), MOUNT), 'changing file owmership') _sh_err_exit(lambda: sh.sudo.umount(MOUNT), 'ummounting device from ' + MOUNT) os.rmdir(MOUNT)
def create_image(module, ec2): """ Creates new AMI module : AnsibleModule object ec2: authenticated ec2 connection object """ instance_id = module.params.get("instance_id") name = module.params.get("name") wait = module.params.get("wait") wait_timeout = int(module.params.get("wait_timeout")) description = module.params.get("description") no_reboot = module.params.get("no_reboot") device_mapping = module.params.get("device_mapping") tags = module.params.get("tags") launch_permissions = module.params.get("launch_permissions") try: params = {"instance_id": instance_id, "name": name, "description": description, "no_reboot": no_reboot} images = ec2.get_all_images(filters={"name": name}) if images and images[0]: module.exit_json( msg="AMI name already present", image_id=images[0].id, state=images[0].state, changed=False ) if device_mapping: bdm = BlockDeviceMapping() for device in device_mapping: if "device_name" not in device: module.fail_json(msg="Device name must be set for volume") device_name = device["device_name"] del device["device_name"] bd = BlockDeviceType(**device) bdm[device_name] = bd params["block_device_mapping"] = bdm image_id = ec2.create_image(**params) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) # Wait until the image is recognized. EC2 API has eventual consistency, # such that a successful CreateImage API call doesn't guarantee the success # of subsequent DescribeImages API call using the new image id returned. for i in range(wait_timeout): try: img = ec2.get_image(image_id) if img.state == "available": break elif img.state == "failed": module.fail_json(msg="AMI creation failed, please see the AWS console for more details") except boto.exception.EC2ResponseError as e: if ( ("InvalidAMIID.NotFound" not in e.error_code and "InvalidAMIID.Unavailable" not in e.error_code) and wait and i == wait_timeout - 1 ): module.fail_json( msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help. %s: %s" % (e.error_code, e.error_message) ) finally: time.sleep(1) if img.state != "available": module.fail_json( msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help." ) if tags: try: ec2.create_tags(image_id, tags) except boto.exception.EC2ResponseError as e: module.fail_json(msg="Image tagging failed => %s: %s" % (e.error_code, e.error_message)) if launch_permissions: try: img = ec2.get_image(image_id) img.set_launch_permissions(**launch_permissions) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id) module.exit_json(msg="AMI creation operation complete", changed=True, **get_ami_info(img))
def start_node(): start_logging() print(" ".join(argv)) if len(argv) != 2: print("Usage: %s <nodename>" % (argv[0], ), file=sys.stderr) return 1 nodename = argv[1] cc = ClusterConfiguration.from_config() region = get_region() ec2 = boto.ec2.connect_to_region(region) if not ec2: print("Could not connect to EC2 endpoint in region %r" % (region, ), file=sys.stderr) return 1 kw = {} slurm_s3_root = cc.slurm_s3_root kw['image_id'] = (cc.compute_ami if cc.compute_ami is not None else amazon_linux_ami[region]) if cc.instance_profile is not None: if cc.instance_profile.startswith("arn:"): kw['instance_profile_arn'] = cc.instance_profile else: kw['instance_profile_name'] = cc.instance_profile kw['key_name'] = cc.key_name kw['instance_type'] = cc.compute_instance_type if cc.compute_bid_price is not None: end = time() + 24 * 60 * 60 # FIXME: Don't hardcode this. kw['price'] = cc.compute_bid_price kw['valid_until'] = strftime("%Y-%m-%dT%H:%M:%SZ", gmtime(end)) node_address = cc.get_address_for_nodename(nodename) node_subnet = cc.get_subnet_for_address(node_address) user_data = init_script % { "region": region, "nodename": nodename, "os_packages": " ".join(cc.compute_os_packages if cc. compute_os_packages is not None else []), "external_packages": " ".join(cc.compute_external_packages if cc. compute_external_packages is not None else []), "slurm_ec2_conf": cc.slurm_ec2_configuration, "slurm_s3_root": slurm_s3_root, } user_data = b64encode(user_data) kw['user_data'] = user_data # Map the ethernet interface to the correct IP address eth0 = NetworkInterfaceSpecification(associate_public_ip_address=True, delete_on_termination=True, device_index=0, groups=cc.security_groups, private_ip_address=str(node_address), subnet_id=node_subnet.id) kw['network_interfaces'] = NetworkInterfaceCollection(eth0) # Attach any ephemeral storage devices block_device_map = BlockDeviceMapping() block_device_map['/dev/xvda'] = BlockDeviceType(size=32, volume_type="gp2") devices = cc.ephemeral_stores[cc.compute_instance_type] for i, device in enumerate(devices): drive = "/dev/sd" + chr(ord('b') + i) block_device_map[drive] = BlockDeviceType( ephemeral_name="ephemeral%d" % i) kw['block_device_map'] = block_device_map if cc.compute_bid_price is None: print("run_instances: %r" % kw) reservation = ec2.run_instances(**kw) tags = { 'SLURMHostname': nodename, 'SLURMS3Root': slurm_s3_root, 'Name': "SLURM Computation Node %s" % nodename, } print("instances: %s" % " ".join([instance.id for instance in reservation.instances])) # create-tags can fail at times since the tag resource database is # a bit behind EC2's actual state. for i in xrange(10): try: ec2.create_tags( [instance.id for instance in reservation.instances], tags) break except Exception as e: print("Failed to tag instance: %s" % e, file=sys.stderr) sleep(0.5 * i) else: print("request_spot_instances: %r" % kw, file=sys.stderr) requests = ec2.request_spot_instances(**kw) print("requests: %s" % " ".join([request.id for request in requests])) return 0
def create_ec2_instance(self): # connect to ec2 self.log.info("Connecting to ec2 ...") ec2 = boto.ec2.connect_to_region( self.config.get(self.section, 'region'), aws_access_key_id=self.config.get(self.default, 'aws_access_key_id'), aws_secret_access_key=self.config.get(self.default, 'aws_secret_access_key') ) vpc_conn = boto.vpc.connect_to_region( self.config.get(self.section, 'region'), aws_access_key_id=self.config.get(self.default, 'aws_access_key_id'), aws_secret_access_key=self.config.get(self.default, 'aws_secret_access_key') ) self.log.info("Ec2 connection success!") compu_key = str(uuid.uuid4()) key = ec2.create_key_pair(compu_key) key.save(self.temp) os.rename(self.temp + '/' + compu_key + '.pem', self.config.get(self.general, 'ssh_pubkey_path')) keys = ec2.get_all_key_pairs() for key in keys: self.log.info("Key found: " + key.name) self.log.info("Starting instance ...") # Create a VPC vpc = vpc_conn.create_vpc('10.0.0.0/16') # Configure the VPC to support DNS resolution and hostname assignment vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=True) vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=True) # Create an Internet Gateway gateway = vpc_conn.create_internet_gateway() # Attach the Internet Gateway to our VPC vpc_conn.attach_internet_gateway(gateway.id, vpc.id) # Create a Route Table route_table = vpc_conn.create_route_table(vpc.id) # Create a size /16 subnet subnet = vpc_conn.create_subnet(vpc.id, '10.0.0.0/24') # Associate Route Table with our subnet vpc_conn.associate_route_table(route_table.id, subnet.id) # Create a Route from our Internet Gateway to the internet vpc_conn.create_route(route_table.id, '0.0.0.0/0', gateway.id) # Create a new VPC security group sg = vpc_conn.create_security_group('compu_group', 'A group for compucorp', vpc.id) # Authorize access to port 22 from anywhere sg.authorize(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip='0.0.0.0/0') sg.authorize(ip_protocol='tcp', from_port=443, to_port=443, cidr_ip='0.0.0.0/0') # Run an instance in our new VPC reservation = vpc_conn.run_instances(self.config.get(self.section, 'ami_id'), key_name=compu_key, security_group_ids=[sg.id], instance_type=self.config.get(self.section, 'instance_type'), subnet_id=subnet.id) instance = reservation.instances[0] # Wait for the instance to be running and have an public DNS name while instance.state != 'running': self.log.info("Instance state: %s" % instance.state) time.sleep(10) instance.update() # Now create an Elastic IP address for the instance # And associate the EIP with our instance eip = vpc_conn.allocate_address(domain='vpc') eip.associate(instance_id=instance.id) # tag machine ec2.create_tags([instance.id, vpc.id], {"Name": "deployment_"+eip.public_ip}) # Copy key as new name shutil.copy(self.config.get(self.general, 'ssh_pubkey_path'), self.temp + '/' + eip.public_ip + '.pem') self.log.info("Instance state: %s" % instance.state) self.log.info("Public IP: %s" % eip.public_ip) self.log.info("Waiting for SSH service to be available") self.wait_for_ssh(self.config.get(self.general, 'ssh_pubkey_path'), self.config.get(self.box, 'username'), eip.public_ip) return eip.public_ip
instance = ec2.run_instances( image_id=region['ami'], min_count=1, max_count=1, key_name=region['key'], security_group_ids=region['groups'], instance_type='m3.medium', subnet_id=region['subnet']).instances[0] tags = { 'uuid': validator.uuid, 'environment': 'production', 'site': 'rippletest.net', 'devnet_name': 'altnet', 'Name': 'Altnet validator %s'%(validator.uuid) } ec2.create_tags(instance.id, tags) logging.info("Launching: %s -> %s", validator, instance.id) instance.update() while instance.ip_address is None: time.sleep(2) instance.update() logging.info("Got address: %s", instance.ip_address) instanceMeta = { 'uuid': validator.uuid, 'key': validator.public_key, 'ip': str(instance.ip_address), 'port': 51235, 'region': args.region, } inventory['validators'].append(instanceMeta)
with open(facts_yaml) as fh: puppet = yaml.safe_load(fh) regions = boto.ec2.regions() region = [region for region in regions if get_current_region() in region.name][0] instance_id = boto.utils.get_instance_metadata()['instance-id'] ec2 = region.connect() res = ec2.get_all_instances(filters={'instance-id': instance_id})[0] instance = res.instances[0] #print instance.tags.get('Name'), instance.id, instance.placement s_classes = ','.join([str(classes) for classes in puppet['krux_classes'].split() if classes.startswith(puppet_class_tag_val_startswith) and puppet_class_tag_ignore not in classes]) tags_dict.update({puppet_class_tag_key: s_classes[-254:]}) # also, add the environment tag tags_dict.update({'environment': puppet.get('environment')[-254:]}) # cluster name! tags_dict.update({'cluster_name': puppet.get('cluster_name')[-254:]}) # make the API call: ec2.create_tags([instance.id], tags_dict)
# Check up on its status every so often status = instance.update() while status == 'pending': time.sleep(1) sys.stdout.write('.') sys.stdout.flush() status = instance.update() print(u".") if status != 'running': print('Instance ' + instance.id + ' never reached status "running". Instance status: ' + status) exit(1) print(u"Tagging instance") ec2.create_tags([instance.id], {"Name": "VoodooVPN"}) print(u"VPN instance created and now running") results = { "region_name": region_name, "instance_id": instance.id, "public_dns_name": instance.public_dns_name, "securitygroup_id": voodoovpngroup.id, "IPSEC_PSK": IPSEC_PSK, "VPN_USER": VPN_USER, "VPN_PASSWORD": VPN_PASSWORD } print(results)
subnet_id=subnet) tags = { 'devnet_id': id, 'devnet_name': args.name, 'devnet_size': args.size } inventory = { 'validator_quorum': int(args.size * 0.8), 'validators': [] } for i in reservation.instances: instanceID = str(uuid.uuid4()) ec2.create_tags(i.id, tags) ec2.create_tags(i.id, {'uuid': instanceID}) logging.info("Launching: %s -> %s", instanceID, i.id) i.update() while i.ip_address is None: time.sleep(2) i.update() logging.info("Got address: %s", i.ip_address) keys = gen_validator_keys() instanceMeta = { 'uuid': instanceID, 'key': str(keys['validation_public_key']), 'ip': str(i.ip_address), 'port': 51235, 'region': region }
def start_node(): start_logging() print(" ".join(argv)) if len(argv) != 2: print("Usage: %s <nodename>" % (argv[0],), file=sys.stderr) return 1 nodename = argv[1] cc = ClusterConfiguration.from_config() region = get_region() ec2 = boto.ec2.connect_to_region(region) if not ec2: print("Could not connect to EC2 endpoint in region %r" % (region,), file=sys.stderr) return 1 kw = {} slurm_s3_root = cc.slurm_s3_root kw['image_id'] = ( cc.compute_ami if cc.compute_ami is not None else amazon_linux_ami[region]) if cc.instance_profile is not None: if cc.instance_profile.startswith("arn:"): kw['instance_profile_arn'] = cc.instance_profile else: kw['instance_profile_name'] = cc.instance_profile kw['key_name'] = cc.key_name kw['instance_type'] = cc.compute_instance_type if cc.compute_bid_price is not None: end = time() + 24 * 60 * 60 # FIXME: Don't hardcode this. kw['price'] = cc.compute_bid_price kw['valid_until'] = strftime("%Y-%m-%dT%H:%M:%SZ", gmtime(end)) node_address = cc.get_address_for_nodename(nodename) node_subnet = cc.get_subnet_for_address(node_address) user_data = init_script % { "region": region, "nodename": nodename, "os_packages": " ".join( cc.compute_os_packages if cc.compute_os_packages is not None else []), "external_packages": " ".join( cc.compute_external_packages if cc.compute_external_packages is not None else []), "slurm_ec2_conf": cc.slurm_ec2_configuration, "slurm_s3_root": slurm_s3_root, } user_data = b64encode(user_data) kw['user_data'] = user_data # Map the ethernet interface to the correct IP address eth0 = NetworkInterfaceSpecification( associate_public_ip_address=True, delete_on_termination=True, device_index=0, groups=cc.security_groups, private_ip_address=str(node_address), subnet_id=node_subnet.id) kw['network_interfaces'] = NetworkInterfaceCollection(eth0) # Attach any ephemeral storage devices block_device_map = BlockDeviceMapping() block_device_map['/dev/xvda'] = BlockDeviceType(size=32, volume_type="gp2") devices = cc.ephemeral_stores[cc.compute_instance_type] for i, device in enumerate(devices): drive = "/dev/sd" + chr(ord('b') + i) block_device_map[drive] = BlockDeviceType( ephemeral_name="ephemeral%d" % i) kw['block_device_map'] = block_device_map if cc.compute_bid_price is None: print("run_instances: %r" % kw) reservation = ec2.run_instances(**kw) tags = { 'SLURMHostname': nodename, 'SLURMS3Root': slurm_s3_root, 'Name': "SLURM Computation Node %s" % nodename, } print("instances: %s" % " ".join([instance.id for instance in reservation.instances])) # create-tags can fail at times since the tag resource database is # a bit behind EC2's actual state. for i in xrange(10): try: ec2.create_tags([ instance.id for instance in reservation.instances], tags) break except Exception as e: print("Failed to tag instance: %s" % e, file=sys.stderr) sleep(0.5 * i) else: print("request_spot_instances: %r" % kw, file=sys.stderr) requests = ec2.request_spot_instances(**kw) print("requests: %s" % " ".join([request.id for request in requests])) return 0
def get_ebs_volumes(ebs_vol_list, volcount, volsize, volume_type='standard'): """Work with Amazon to create EBS volumes, tag them and attach them to the local host""" # How large will each volume be? individual_vol_size = int(volsize / volcount) # Some local instance ID info.. zone = commands.getoutput("wget -q -O - http://169.254.169.254/latest/meta-data/placement/availability-zone") region = zone[:-1] instanceid = commands.getoutput("wget -q -O - http://169.254.169.254/latest/meta-data/instance-id") available_ebs_vol_list = [] attached_ebs_vol_list = [] # Open our EC2 connection print("INFO: Connecting to Amazon...") ec2 = boto.ec2.connect_to_region(region) # Make sure that the device list we got is good. If a device exists already, # remove it from the potential 'device targets' for potential_volume in ebs_vol_list: if os.path.exists(potential_volume): print("INFO: (%s) is already an attached EBS volume." % (potential_volume)) attached_ebs_vol_list.append(potential_volume) else: print("INFO: (%s) is available as a disk target." % (potential_volume)) available_ebs_vol_list.append(potential_volume) # Reverse our available_ebs_vol_list so that we can 'pop' from the beginning available_ebs_vol_list.reverse() # If we have any EBS volumes already mapped, then just pass them back. Do not create new ones, # and do not do anything with them. This script does not support handling multiple sets of EBS # volumes. if attached_ebs_vol_list.__len__() > 0: print("WARNING: EBS volumes are already attached to this host. Passing them back and not touching them.") return attached_ebs_vol_list # Make sure we have enough target devices available if volcount > available_ebs_vol_list.__len__(): print("ERROR: Do not have enough local volume targets available to attach the drives.") sys.exit(1) # For each volume.. for i in range(0, volcount): print("INFO: Requesting EBS volume creation (%s gb)..." % (individual_vol_size)) # 30:1 GB:IOP ratio, with a max of 4000 iops = individual_vol_size * 30 if iops > 4000: iops = 4000 if volume_type == 'io1': print("INFO: Requesting %s provisioned IOPS..." % iops) vol = ec2.create_volume(individual_vol_size, zone, volume_type=volume_type, iops=iops) else: vol = ec2.create_volume(individual_vol_size, zone, volume_type=volume_type) # Wait until the volume is 'available' before attaching while vol.status != u'available': time.sleep(1) print("INFO: Waiting for %s to become available..." % vol) vol.update() print("INFO: Volume %s status is now: %s..." % (vol, vol.status)) # Grab a volume off of our stack of available vols.. dest = available_ebs_vol_list.pop() # Attach the volume and wait for it to fully attach print("INFO: (%s) Attaching EBS volume to our instance ID (%s) to %s" % (vol.id, instanceid, dest)) try: vol.attach(instanceid, dest.replace('xvd', 'sd')) except: time.sleep(5) vol.attach(instanceid, dest.replace('xvd', 'sd')) while not hasattr(vol.attach_data, 'instance_id'): time.sleep(1) vol.update() while not str(vol.attach_data.instance_id) == instanceid or not os.path.exists(dest) == True: print("INFO: (%s) Volume attaching..." % (vol.id)) time.sleep(1) vol.update() # SLeep a few more seconds just to make sure the OS has seen the volume time.sleep(1) # Add the volume to our list of volumes that were created attached_ebs_vol_list.append(dest) print("INFO: (%s) Volume attached!" % (vol.id)) # Now, tag the volumes and move on tags = {} tags["Name"] = "%s:%s" % (socket.gethostname(), dest) print("INFO: (%s) Taggin EBS volume with these tags: %s" % (vol.id, tags)) ec2.create_tags(str(vol.id), tags) # All done. Return whatever volumes were created and attached. return attached_ebs_vol_list
# waiting took too long module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime()) #We do this after the loop ends so that we end up with one list for res in res_list: running_instances.extend(res.instances) # Enabled by default by Amazon if not source_dest_check: for inst in res.instances: inst.modify_attribute('sourceDestCheck', False) # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound if instance_tags: try: ec2.create_tags(instids, instance_tags) except boto.exception.EC2ResponseError, e: module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message)) instance_dict_array = [] created_instance_ids = [] for inst in running_instances: d = get_instance_info(inst) created_instance_ids.append(inst.id) instance_dict_array.append(d) return (instance_dict_array, created_instance_ids, changed) def terminate_instances(module, ec2, instance_ids): """
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.") else: module.fail_json(msg="timed out waiting for image to be recognized") # wait here until the image is created wait_timeout = time.time() + wait_timeout while wait and wait_timeout > time.time() and (img is None or img.state != 'available'): img = ec2.get_image(image_id) time.sleep(3) if wait and wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "timed out waiting for image to be created") if tags: try: ec2.create_tags(image_id, tags) except boto.exception.EC2ResponseError, e: module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message)) module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True) def deregister_image(module, ec2): """ Deregisters AMI """ image_id = module.params.get('image_id') delete_snapshot = module.params.get('delete_snapshot') wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout'))
}) is_named = False snap_name = "unnamed" is_tagged = False lincx_env = None lincx_type = None for tag in snap_tags: if tag.name == "Name": snap_name = tag.value is_named = True elif tag.name == "lincx_environment": lincx_env = tag.value is_tagged = True elif tag.name == "lincx_type": lincx_type = tag.value is_tagged = True if is_tagged: print "SNAPSHOT TAGGED:", snapshot.id, snap_name, lincx_env, lincx_type if not force_tagging: continue print "SNAPSHOT TAGGING:", snapshot.id, snap_name ec2.create_tags([snapshot.id], tags={ 'lincx_environment': 'legacy', 'lincx_type': '' }) # vim: set tabstop=4 shiftwidth=4 softtabstop=0 textwidth=0 expandtab :
def register_tags_if_any(module, ec2, tags, image_id): if tags: try: ec2.create_tags([image_id], tags) except Exception as e: module.fail_json(msg=str(e))
def create_instances(module, ec2, vpc, override_count=None): """ Creates new instances module : AnsibleModule object ec2: authenticated ec2 connection object Returns: A list of dictionaries with instance information about the instances that were launched """ key_name = module.params.get('key_name') id = module.params.get('id') group_name = module.params.get('group') group_id = module.params.get('group_id') zone = module.params.get('zone') instance_type = module.params.get('instance_type') tenancy = module.params.get('tenancy') spot_price = module.params.get('spot_price') spot_type = module.params.get('spot_type') image = module.params.get('image') if override_count: count = override_count else: count = module.params.get('count') monitoring = module.params.get('monitoring') kernel = module.params.get('kernel') ramdisk = module.params.get('ramdisk') wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) spot_wait_timeout = int(module.params.get('spot_wait_timeout')) placement_group = module.params.get('placement_group') user_data = module.params.get('user_data') instance_tags = module.params.get('instance_tags') vpc_subnet_id = module.params.get('vpc_subnet_id') assign_public_ip = module.boolean(module.params.get('assign_public_ip')) private_ip = module.params.get('private_ip') instance_profile_name = module.params.get('instance_profile_name') volumes = module.params.get('volumes') ebs_optimized = module.params.get('ebs_optimized') exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') source_dest_check = module.boolean(module.params.get('source_dest_check')) termination_protection = module.boolean( module.params.get('termination_protection')) network_interfaces = module.params.get('network_interfaces') spot_launch_group = module.params.get('spot_launch_group') instance_initiated_shutdown_behavior = module.params.get( 'instance_initiated_shutdown_behavior') # group_id and group_name are exclusive of each other if group_id and group_name: module.fail_json(msg=str( "Use only one type of parameter (group_name) or (group_id)")) vpc_id = None if vpc_subnet_id: if not vpc: module.fail_json(msg="region must be specified") else: vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id else: vpc_id = None try: # Here we try to lookup the group id from the security group name - if group is set. if group_name: if vpc_id: grp_details = ec2.get_all_security_groups( filters={'vpc_id': vpc_id}) else: grp_details = ec2.get_all_security_groups() if isinstance(group_name, basestring): group_name = [group_name] unmatched = set(group_name).difference( str(grp.name) for grp in grp_details) if len(unmatched) > 0: module.fail_json( msg="The following group names are not valid: %s" % ', '.join(unmatched)) group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] # Now we try to lookup the group id testing if group exists. elif group_id: #wrap the group_id in a list if it's not one already if isinstance(group_id, basestring): group_id = [group_id] grp_details = ec2.get_all_security_groups(group_ids=group_id) group_name = [grp_item.name for grp_item in grp_details] except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) # Lookup any instances that much our run id. running_instances = [] count_remaining = int(count) if id != None: filter_dict = {'client-token': id, 'instance-state-name': 'running'} previous_reservations = ec2.get_all_instances(None, filter_dict) for res in previous_reservations: for prev_instance in res.instances: running_instances.append(prev_instance) count_remaining = count_remaining - len(running_instances) # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want. if count_remaining == 0: changed = False else: changed = True try: params = { 'image_id': image, 'key_name': key_name, 'monitoring_enabled': monitoring, 'placement': zone, 'instance_type': instance_type, 'kernel_id': kernel, 'ramdisk_id': ramdisk, 'user_data': user_data } if ebs_optimized: params['ebs_optimized'] = ebs_optimized # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request if not spot_price: params['tenancy'] = tenancy if boto_supports_profile_name_arg(ec2): params['instance_profile_name'] = instance_profile_name else: if instance_profile_name is not None: module.fail_json( msg= "instance_profile_name parameter requires Boto version 2.5.0 or higher" ) if assign_public_ip: if not boto_supports_associate_public_ip_address(ec2): module.fail_json( msg= "assign_public_ip parameter requires Boto version 2.13.0 or higher." ) elif not vpc_subnet_id: module.fail_json( msg="assign_public_ip only available with vpc_subnet_id" ) else: if private_ip: interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id=vpc_subnet_id, private_ip_address=private_ip, groups=group_id, associate_public_ip_address=assign_public_ip) else: interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id=vpc_subnet_id, groups=group_id, associate_public_ip_address=assign_public_ip) interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( interface) params['network_interfaces'] = interfaces else: if network_interfaces: if isinstance(network_interfaces, basestring): network_interfaces = [network_interfaces] interfaces = [] for i, network_interface_id in enumerate( network_interfaces): interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( network_interface_id=network_interface_id, device_index=i) interfaces.append(interface) params['network_interfaces'] = \ boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces) else: params['subnet_id'] = vpc_subnet_id if vpc_subnet_id: params['security_group_ids'] = group_id else: params['security_groups'] = group_name if volumes: bdm = BlockDeviceMapping() for volume in volumes: if 'device_name' not in volume: module.fail_json( msg='Device name must be set for volume') # Minimum volume size is 1GB. We'll use volume size explicitly set to 0 # to be a signal not to create this volume if 'volume_size' not in volume or int( volume['volume_size']) > 0: bdm[volume['device_name']] = create_block_device( module, ec2, volume) params['block_device_map'] = bdm # check to see if we're using spot pricing first before starting instances if not spot_price: if assign_public_ip and private_ip: params.update( dict( min_count=count_remaining, max_count=count_remaining, client_token=id, placement_group=placement_group, )) else: params.update( dict( min_count=count_remaining, max_count=count_remaining, client_token=id, placement_group=placement_group, private_ip_address=private_ip, )) # For ordinary (not spot) instances, we can select 'stop' # (the default) or 'terminate' here. params[ 'instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop' res = ec2.run_instances(**params) instids = [i.id for i in res.instances] while True: try: ec2.get_all_instances(instids) break except boto.exception.EC2ResponseError as e: if "<Code>InvalidInstanceID.NotFound</Code>" in str(e): # there's a race between start and get an instance continue else: module.fail_json(msg=str(e)) # The instances returned through ec2.run_instances above can be in # terminated state due to idempotency. See commit 7f11c3d for a complete # explanation. terminated_instances = [ str(instance.id) for instance in res.instances if instance.state == 'terminated' ] if terminated_instances: module.fail_json( msg="Instances with id(s) %s " % terminated_instances + "were created previously but have since been terminated - " + "use a (possibly different) 'instanceid' parameter") else: if private_ip: module.fail_json( msg= 'private_ip only available with on-demand (non-spot) instances' ) if boto_supports_param_in_spot_request(ec2, 'placement_group'): params['placement_group'] = placement_group elif placement_group: module.fail_json( msg= "placement_group parameter requires Boto version 2.3.0 or higher." ) # You can't tell spot instances to 'stop'; they will always be # 'terminate'd. For convenience, we'll ignore the latter value. if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate': module.fail_json( msg= "instance_initiated_shutdown_behavior=stop is not supported for spot instances." ) if spot_launch_group and isinstance(spot_launch_group, basestring): params['launch_group'] = spot_launch_group params.update(dict( count=count_remaining, type=spot_type, )) res = ec2.request_spot_instances(spot_price, **params) # Now we have to do the intermediate waiting if wait: instids = await_spot_requests(module, ec2, res, count) except boto.exception.BotoServerError as e: module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message)) # wait here until the instances are up num_running = 0 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and num_running < len(instids): try: res_list = ec2.get_all_instances(instids) except boto.exception.BotoServerError as e: if e.error_code == 'InvalidInstanceID.NotFound': time.sleep(1) continue else: raise num_running = 0 for res in res_list: num_running += len( [i for i in res.instances if i.state == 'running']) if len(res_list) <= 0: # got a bad response of some sort, possibly due to # stale/cached data. Wait a second and then try again time.sleep(1) continue if wait and num_running < len(instids): time.sleep(5) else: break if wait and wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="wait for instances running timeout on %s" % time.asctime()) #We do this after the loop ends so that we end up with one list for res in res_list: running_instances.extend(res.instances) # Enabled by default by AWS if source_dest_check is False: for inst in res.instances: inst.modify_attribute('sourceDestCheck', False) # Disabled by default by AWS if termination_protection is True: for inst in res.instances: inst.modify_attribute('disableApiTermination', True) # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound if instance_tags: try: ec2.create_tags(instids, instance_tags) except boto.exception.EC2ResponseError as e: module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message)) instance_dict_array = [] created_instance_ids = [] for inst in running_instances: inst.update() d = get_instance_info(inst) created_instance_ids.append(inst.id) instance_dict_array.append(d) return (instance_dict_array, created_instance_ids, changed)
import json instance_data = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document") response_json = instance_data.json() region = response_json.get('region') instance_id = response_json.get('instanceId') ec2 = boto3.client('ec2', region_name=region) try: allocate_eip = ec2.associate_address(AllocationId='eipalloc-0f7526dce6e0f1db8', InstanceId=instance_id) except: print("Associate IP failed") try: create_tag = ec2.create_tags(Resources=[instance_id], Tags=[{'Key':'ElasticIp', 'Value':'eipalloc-0f7526dce6e0f1db8'}]) except: print("Create tag failed") runcmd: - [ pip, install, boto3 ] - [ python, /usr/bin/rename_instance.py ] - [ sleep, 15 ] - [ python, /usr/bin/allocate_eip.py ] - [ service, sensu-client, stop ] - [ salt-call, saltutil.sync_all ] - [ salt-call, write_ec2tags.write_to_disk ] - [ salt-call, write_ec2tags.write_minion_id ] - [ salt-call, saltutil.revoke_auth ] - [ service, salt-minion, stop ] - [ rm, -rf, /etc/salt/pki/minion ]
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( resource = dict(required=True), tags = dict(type='dict'), state = dict(default='present', choices=['present', 'absent', 'list']), ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') resource = module.params.get('resource') tags = module.params.get('tags') state = module.params.get('state') ec2 = ec2_connect(module) # We need a comparison here so that we can accurately report back changed status. # Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate. filters = {'resource-id' : resource} gettags = ec2.get_all_tags(filters=filters) dictadd = {} dictremove = {} baddict = {} tagdict = {} for tag in gettags: tagdict[tag.name] = tag.value if state == 'present': if not tags: module.fail_json(msg="tags argument is required when state is present") if set(tags.items()).issubset(set(tagdict.items())): module.exit_json(msg="Tags already exists in %s." %resource, changed=False) else: for (key, value) in set(tags.items()): if (key, value) not in set(tagdict.items()): dictadd[key] = value tagger = ec2.create_tags(resource, dictadd) gettags = ec2.get_all_tags(filters=filters) module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True) if state == 'absent': if not tags: module.fail_json(msg="tags argument is required when state is absent") for (key, value) in set(tags.items()): if (key, value) not in set(tagdict.items()): baddict[key] = value if set(baddict) == set(tags): module.exit_json(msg="Nothing to remove here. Move along.", changed=False) for (key, value) in set(tags.items()): if (key, value) in set(tagdict.items()): dictremove[key] = value tagger = ec2.delete_tags(resource, dictremove) gettags = ec2.get_all_tags(filters=filters) module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True) if state == 'list': module.exit_json(changed=False, tags=tagdict)
def tag_resource(ec2, resource, tags): print 'Tagging %s with %s.'%(resource, tags) ec2.create_tags(resource, tags)
def register_tags_if_any(module, ec2, tags, image_id): if tags: try: ec2.create_tags([image_id], tags) except Exception as e: module.fail_json(msg=str(e))
def tag_resource(ec2, resource_id, tags): return ec2.create_tags([resource_id], tags)
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.") else: module.fail_json(msg="timed out waiting for image to be recognized") # wait here until the image is created wait_timeout = time.time() + wait_timeout while wait and wait_timeout > time.time() and (img is None or img.state != 'available'): img = ec2.get_image(image_id) time.sleep(3) if wait and wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "timed out waiting for image to be created") if tags: try: ec2.create_tags(image_id, tags) except boto.exception.EC2ResponseError, e: module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message)) if launch_permissions: try: img = ec2.get_image(image_id) img.set_launch_permissions(**launch_permissions) except boto.exception.BotoServerError, e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id) module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True) def deregister_image(module, ec2): """ Deregisters AMI
ZONE = 'us-east-1a' DEVICE = '/dev/xvdcz' MOUNT = '/tmp/xvdcz' DISK_SZ = 10 instance_id = boto.utils.get_instance_metadata()['instance-id'] print("running on instance " + instance_id) ec2 = boto.ec2.connect_to_region(REGION) print("creating volume...") vol = ec2.create_volume(DISK_SZ, ZONE, volume_type='gp2') _status_err_exit(vol, 'available', 'creating volume') print("created volume " + vol.id) print("adding tags...") ec2.create_tags([vol.id], {"Name": 'jbox_user_disk_template'}) print("attaching at " + DEVICE + " ...") ec2.attach_volume(vol.id, instance_id, DEVICE) if (not _wait_for_status(vol, 'in-use')) or (not _wait_for_device(DEVICE)): _err_exit("attaching at " + DEVICE) _sh_err_exit(lambda: sh.sudo.mkfs(DEVICE, t="ext4"), 'making ext4 file system') if not os.path.exists(MOUNT): os.makedirs(MOUNT) _sh_err_exit(lambda: sh.sudo.mount(DEVICE, MOUNT), 'mounting device at ' + MOUNT) _sh_err_exit( lambda: sh.sudo.chown('-R',
running_instances.extend(res.instances) # Enabled by default by AWS if source_dest_check is False: for inst in res.instances: inst.modify_attribute('sourceDestCheck', False) # Disabled by default by AWS if termination_protection is True: for inst in res.instances: inst.modify_attribute('disableApiTermination', True) # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound if instance_tags: try: ec2.create_tags(instids, instance_tags) except boto.exception.EC2ResponseError, e: module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message)) instance_dict_array = [] created_instance_ids = [] for inst in running_instances: inst.update() d = get_instance_info(inst) created_instance_ids.append(inst.id) instance_dict_array.append(d) return (instance_dict_array, created_instance_ids, changed) def terminate_instances(module, ec2, instance_ids):
auth_url=options.os_auth_url, username=options.os_username, api_key=options.os_api_key, project_id=options.os_project_id) # AWS if AWS: filtering = {'tag:hostname': options.prefix + '*'} if options.overflow_aws_zone is not None: filtering['availability_zone'] = options.overflow_aws_zone res = ec2.get_all_instances(filters=filtering) for r in res: for i in r.instances: print "Doing", i.tags[ 'hostname'], "tag:", options.tag_key, "=>", options.tag_value ec2.create_tags([i.id], {options.tag_key: options.tag_value}) # OS # XXX: filtering on metadata in OpenStack is currently not working. Needs further investigation. if NOVA: res = nova.servers.list( search_opts={ 'metadata': u'{"hostname":"%s"}' % options.prefix + '*', 'all_tenants': 0 }) for i in res: if 'hostname' in i.metadata and options.prefix in i.metadata[ 'hostname']: print "Doing", i.name, "tag:", options.tag_key, "=>", options.tag_value nova.servers.set_meta(i.id, {options.tag_key: options.tag_value})
def create_image(module, ec2): """ Creates new AMI module : AnsibleModule object ec2: authenticated ec2 connection object """ instance_id = module.params.get('instance_id') name = module.params.get('name') wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) description = module.params.get('description') architecture = module.params.get('architecture') kernel_id = module.params.get('kernel_id') root_device_name = module.params.get('root_device_name') virtualization_type = module.params.get('virtualization_type') no_reboot = module.params.get('no_reboot') device_mapping = module.params.get('device_mapping') tags = module.params.get('tags') launch_permissions = module.params.get('launch_permissions') try: params = {'name': name, 'description': description} images = ec2.get_all_images(filters={'name': name}) if images and images[0]: # ensure that launch_permissions are up to date update_image(module, ec2, images[0].id) bdm = None if device_mapping: bdm = BlockDeviceMapping() for device in device_mapping: if 'device_name' not in device: module.fail_json(msg='Device name must be set for volume') device_name = device['device_name'] del device['device_name'] bd = BlockDeviceType(**device) bdm[device_name] = bd if instance_id: params['instance_id'] = instance_id params['no_reboot'] = no_reboot if bdm: params['block_device_mapping'] = bdm image_id = ec2.create_image(**params) else: params['architecture'] = architecture params['virtualization_type'] = virtualization_type if kernel_id: params['kernel_id'] = kernel_id if root_device_name: params['root_device_name'] = root_device_name if bdm: params['block_device_map'] = bdm image_id = ec2.register_image(**params) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) # Wait until the image is recognized. EC2 API has eventual consistency, # such that a successful CreateImage API call doesn't guarantee the success # of subsequent DescribeImages API call using the new image id returned. for i in range(wait_timeout): try: img = ec2.get_image(image_id) if img.state == 'available': break elif img.state == 'failed': module.fail_json( msg= "AMI creation failed, please see the AWS console for more details" ) except boto.exception.EC2ResponseError as e: if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1: module.fail_json( msg= "Error while trying to find the new image. Using wait=yes and/or a longer " "wait_timeout may help. %s: %s" % (e.error_code, e.error_message)) finally: time.sleep(1) if img.state != 'available': module.fail_json( msg= "Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help." ) if tags: try: ec2.create_tags(image_id, tags) except boto.exception.EC2ResponseError as e: module.fail_json(msg="Image tagging failed => %s: %s" % (e.error_code, e.error_message)) if launch_permissions: try: img = ec2.get_image(image_id) img.set_launch_permissions(**launch_permissions) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id) module.exit_json(msg="AMI creation operation complete", changed=True, **get_ami_info(img))
instance_type=instance_type, user_data=user_data, )[0] while True: eprint("Waiting. spot request status: '%s', state: '%s'" % (spot_request.state, spot_request.status.code)) if spot_request.state == 'active' and spot_request.status.code == 'fulfilled': break time.sleep(10) spot_request = ec2.get_all_spot_instance_requests( request_ids=[spot_request.id])[0] while True: instance = ec2.get_all_instances( instance_ids=[spot_request.instance_id])[0].instances[0] eprint("Waiting. spot instance state: '%s'" % instance.state) if instance.state == 'running': break time.sleep(10) ec2.create_tags([instance.id], {tag: ""}) global host instance = ec2.get_all_instances( instance_ids=[spot_request.instance_id])[0].instances[0] host = instance.ip_address print("%s" % host) if __name__ == "__main__": auto_vpn()
def tag_resource(ec2, resource_id, tags): return ec2.create_tags([resource_id], tags)
vol_tags = ec2.get_all_tags(filters={'resource-id': volume.id, 'resource-type': 'volume'}) is_archived = False is_named = False vol_name = 'unnamed' for tag in vol_tags: if tag.name == 'Name': vol_name = tag.value is_named = True elif tag.name == 'archived' and tag.value == 'yes': is_archived = True if is_archived: print "SKIPPING: Already archived:", volume.id, vol_name continue if do_archive: print "ARCHIVING:",volume.id, vol_name snap_description = "{0} - {1}".format(vol_name, snapshot_description_archive) else: snap_description = "{0} - created by robb@pandastrike".format(vol_name) # do snapshot print "Creating snapshot for volume {0}: '{1}'.".format(volume.id,snap_description) snapshot = volume.create_snapshot(description=snap_description) if do_archive: ec2.create_tags([volume.id, snapshot.id], tags={'archived':'yes'}) # vim: set tabstop=4 shiftwidth=4 softtabstop=0 textwidth=0 expandtab :