def destroy(args): """ Main entry point for the master utility. """ start = time.time() with shell.Step(1): print "Cloud setup validation:" if args.clean: raise shell.Step.Skipped('Not need to parse a configuration file') # Load master configuration print "* Parsing the master XML definition file" config = etree.parse(args.setup) assets = config.getroot().attrib machines = config.xpath('/cloud/setup/machine/@instance-id') # Validate the configuration file print "* Validating the master XML definition against the XML schema" conf.schema('cloud-instance').assertValid(config) #raise shell.Step.Skipped("Cloud XML instance definition schema not yet implemented") # Instantiate connections with shell.Step(2): print "Instantiation of the cloud manager connection:" print "* Connecting to the VPC manager" c = boto.VPCConnection(args.access_key_id, args.secret_key) if args.clean: with shell.Step(3): print "Deleting all security groups" grpcnt = 0 for group in c.get_all_security_groups(): if group.name.startswith('pop-'): print "* Deleting", group.name group.delete() grpcnt += 1 else: print "* Skipping", group.name with shell.Step(4): print "Deleting all key pairs:" kpcnt = 0 for key in c.get_all_key_pairs(): if key.name.startswith('pop-'): print "* Deleting", key.name key.delete() kpcnt += 1 else: print "* Skipping", key.name duration = int(time.time() - start) duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60) with shell.Wrapper(72): print print "Destroyed {0} groups and {1} key pairs in {2}.\n".format( grpcnt, kpcnt, duration) return with shell.Step(3): print "Stopping all instances:" terminating = [] for instance in machines: print "* Terminating instance with ID {0}".format(instance) try: instance = c.terminate_instances(instance)[0] except boto.ResponseError as e: if boto.AWSException(e).code == 'InvalidInstanceID.NotFound': print " └", shell.hilite("Instance not found", shell.YELLOW) else: raise else: if instance.state == 'terminated': print " └ Instance already terminated" else: terminating.append(instance) if terminating: print "* Waiting for {0} instances to terminate".format( len(terminating)) for instance in terminating: print " └ Waiting for instance with ID {0} to terminate".format( instance.id), shell.wait( instance, 'terminated', interval=.5, valid=('shutting-down', )) with shell.Step(4): print "Destroying security assets" print "* Destroying security group" try: time.sleep(8) # Let some time to AWS to disassociate the machines c.delete_security_group(assets['security-group']) except boto.ResponseError as e: if boto.AWSException(e).code == 'InvalidGroup.NotFound': print " └ Group already deleted" else: raise print "* Destroying key pair" c.delete_key_pair(assets['key-pair']) if os.path.exists(assets['key-filename']): print " └ Deleting local private key '{0}'".format( assets['key-filename']) try: os.remove(assets['key-filename']) print " └ Local private key deleted" except: print " └ Unable to delete local private key" with shell.Step(5): print "Freeing unused resources" print "* Releasing elastic IP address" try: address = c.get_all_addresses([assets['public-address']])[0] except boto.ResponseError as e: if boto.AWSException(e).code == 'InvalidAddress.NotFound': print " └ Address already released" else: raise else: address.release() print "* Deleting subnet" try: c.delete_subnet(assets['subnet']) except boto.ResponseError as e: if boto.AWSException(e).code == 'InvalidSubnetID.NotFound': print " └ Subnet already deleted" else: raise print print shell.hilite("The internet gateway and the VPC will not be " \ "deleted as they could still be needed by other " \ "applications and don't generate additional " \ "costs.", shell.MAGENTA) print duration = int(time.time() - start) duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60) with shell.Wrapper(72): print print "Cloud destroyed in {0}; all resources were correctly torn down.\n".format( duration)
def master(args): """ Main entry point for the master utility. """ start = time.time() with shell.Step(1): print "Cloud setup validation:" # Load master configuration print "* Parsing the master XML definition file" config = etree.parse(args.setup) # Validate the configuration file print "* Validating the master XML definition against the XML schema" conf.schema('master-image').assertValid(config) master = config.getroot().attrib manifests = config.xpath('/master/apply/@manifest') uploads = config.xpath('/master/upload/@asset') # Instantiate connections with shell.Step(2): print "Instantiation of the cloud manager connection:" print "* Choosing region" try: region = master['region'] except KeyError: region = DEFAULT_REGION print " └ Selected region '{0}'".format(region) print "* Connecting to the EC2 manager" c = ec2.connect_to_region( aws_access_key_id=args.access_key_id, aws_secret_access_key=args.secret_key, region_name=region, ) with shell.Step(3): print "Virtual setup initialization:" print "* Checking for duplicate image names" try: image = c.get_all_images(filters={'name': master['name']})[0] except IndexError: print " └ Name '{0}' not used yet".format(master['name']) else: print " └ Name '{0}' already used".format(master['name']) print " └ Checking for different user" iam_c = iam.IAMConnection( aws_access_key_id=args.access_key_id, aws_secret_access_key=args.secret_key, ) uid = iam_c.get_user( )['GetUserResponse']['GetUserResult']['User']['UserId'] if image.ownerId == uid: if not args.force: raise ValueError("The name '{0}' is already taken by the " \ "image '{1}'.".format(master['name'], image.id)) else: print " └ Same user but --force flag set, deregistering image" image.deregister() print print shell.hilite("Note that only the AMI was deregistered, " \ "the relative snapshot was left in place. Remove it " \ "manually if desired.", shell.MAGENTA) print print "* Creating temporary security group" group = c.create_security_group( 'pop-' + random_string(16), 'Temporary security group for POP master image creation') print " └ New security group named '{0.name}'".format(group) print " └ Authorizing external SSH access" group.authorize('tcp', 22, 22, "0.0.0.0/0") #group.authorize('tcp', 80, 80, "0.0.0.0/0") print "* Creating key pair" key = c.create_key_pair('pop-' + random_string(16)) print " └ New key pair named '{0.name}'".format(key) key_filename = 'pop-master-pk-' + random_string(8) + '.pem' with open(key_filename, 'w') as fh: fh.write(key.material) os.chmod(key_filename, stat.S_IRUSR | stat.S_IWUSR) print " └ Private key written to '{0}'".format(key_filename) print "* Getting base image" image = c.get_image(master['base']) print "* Launching new instance" res = image.run( key_name=key.name, security_groups=[group], instance_type=TYPES[image.architecture], ) print " └ New reservation with ID '{0}'".format(res.id) instance = res.instances[0] print "* Waiting for machine to boot", instance = shell.wait(instance, 'running', interval=.5) address = instance.dns_name print shell.nowrap(" └ Public address is '{0}'".format(address)) print "* Waiting for instance to come online", shell.wait(ConnectionAttempt(address, 22), 'connected', interval=.8) print print "Instance online; you can manually connect using this command:\n" print shell.nowrap( shell.hilite( "ssh -i {0} {1}@{2}".format(key_filename, USER, address), shell.MAGENTA)) if args.clean: print print "Note that the machine will be available only until the master " \ "image creation process successfully completes. If an error " \ "happens before completion, the availability of the instance " \ "will depend on the stage at which the error happened." print "If you want to access the machine after the image creation " \ "process completes, use the --no-clean flag." print else: print print "The --no-clean flag is set, the instance will remain " \ "available after the image creation process completes." print "Remember to terminate it manually once done with it." print with shell.Step(4): print "Instance customization:" with remote_machine(USER, address, key_filename, args.debug): print "* Configuring sources for VPC deployment" fab.use_vpc_sources() print "* Installing puppet" sudo('apt-get -y install puppet') sudo('update-rc.d -f puppet remove') base = os.path.dirname(os.path.realpath(args.setup.name)) upload_files(base, uploads, '/var/uploads') upload_files(base, manifests, '/var/manifests') print "* Applying manifests" for manifest in manifests: print " └ Applying '{0}'".format(manifest), shell.wait(Manifest(os.path.join('/var/manifests', manifest), args.debug), 'applied', interval=.8) if args.manual: print print "Base setup done, manual setup requested." op = confirm('Open an SSH connection now?', default=True) if op: stdout = sys.stdout while hasattr(sys.stdout, 'stdout'): sys.stdout = sys.stdout.stdout print print "-" * shell.size()[0] open_shell() print "-" * shell.size()[0] sys.stdout = stdout print sys.stdout.write( "Connection closed, press the return key once done.") sys.stdout.flush() raw_input() print else: print "Please manually setup the imahe and press the return" \ " key once done." raw_input() print print "* Cleaning up" sudo('rm -rf /var/manifests /var/uploads') with shell.Step(5): print "Image creation:" print "* Creating image from running instance" ami = c.create_image(instance.id, master['name'], master['description']) while True: try: image = c.get_image(ami) print " └ New AMI created with ID '{0}'".format(ami) break except: print " └ AMI not found, trying again".format(ami) pass print "* Waiting for image creation to complete", shell.wait(image, 'available', interval=.5) if 'public' in master: print "* Making image public" image.set_launch_permissions(group_names=['all']) with shell.Step(6): print "Resources cleanup:" if args.clean: print "* Terminating instance" instance.terminate() print "* Deleting key pair" c.delete_key_pair(key.name) os.remove(key_filename) print "* Deleting security group" group.delete() else: print "* The --no-clean flag is set, skipping cleanup" raise shell.Step.Skipped() duration = int(time.time() - start) duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60) with shell.Wrapper(72): print print "Master image creation completed in {0}; you can launch new " \ "instances of the just created image by specifying the " \ "following AMI ID:\n".format(duration) print shell.hilite(" {0}".format(ami), shell.MAGENTA)
def setup(args): """ Main entry point for the setup utility. """ start = time.time() SETUP_SUMMARY_FILE = os.path.splitext(os.path.basename(args.setup.name))[0] + '.cid' print SETUP_SUMMARY_FILE with shell.Step(1): print "Cloud setup validation:" # Load cloud configuration print "* Parsing the cloud XML definition file" config = etree.parse(args.setup) # Validate the configuration file print "* Validating the cloud XML definition against the XML schema" conf.schema('cloud-setup').assertValid(config) cloud = config.getroot().attrib # Raise an error if an unmanaged cloud is requested print "* Checking for supported setup type" if 'manager' not in cloud: raise NotImplementedError("Unmanaged clouds are not yet supported") # Instantiate connections with shell.Step(2): print "Instantiation of the cloud manager connection:" print "* Connecting to the VPC manager" c = boto.VPCConnection(args.access_key_id, args.secret_key) with shell.Step(3): print "Creation and setup of the virtual private cloud:" # Get max vpc size (16) using the cloud subnet IP range print "* Getting or creating the VPC" vpc, created = c.get_or_create(str(cidr.CIDR(cloud['cidr'], 16))) subnet_cidr = cidr.CIDR(cloud['cidr']) if created: print " └ New VPC created with ID '{0}'".format(vpc.id) print "* Waiting for VPC creation to complete", vpc = shell.wait(vpc, 'available', interval=0) else: print " └ Using existing VPC with ID '{0}'".format(vpc.id) print "* Checking for valid CIDR block of the existing VPC" vpc_cidr = cidr.CIDR(vpc.cidr_block) if subnet_cidr.base not in vpc_cidr: raise ValueError("The requested subnet CIDR block base " \ "address ({0}) falls outside the VPC CIDR " \ "block ({1!s}).\nAcceptable values are in " \ "the range {1.base} - {1.last}.".format( subnet_cidr.base, vpc_cidr)) if subnet_cidr.size > vpc_cidr.size: raise ValueError("The requested subnet CIDR size (/{0.block},"\ " {0.size} IPs) is too big for the " \ "existing VPC CIDR size (/{1.block}, {1.size}"\ " IPs).".format(subnet_cidr, vpc_cidr)) with shell.Step(4): print "Subnet, gateway, addressing and routing setup:" print "* Getting or creating subnet" subnet, created = vpc.get_or_create_subnet(str(subnet_cidr)) if created: print " └ New subnet created with ID '{0}'".format(subnet.id) else: print " └ Using existing subnet with ID '{0}'".format(subnet.id) print "* Getting or creating internet gateway" gateway, created = vpc.get_or_create_gateway() if created: print " └ New gateway created with ID '{0}'".format(gateway.id) else: print " └ Using existing gateway with ID '{0}'".format(gateway.id) print "* Getting public IP address" address, created = c.get_or_create_address() if created: print " └ New address created with IP '{0.public_ip}'".format( address ) else: print " └ Using existing address with IP '{0.public_ip}'".format( address ) print "* Setting up routing" print " └ Getting route table" route_table = c.get_all_route_tables()[0] print " └ Associating route table with subnet" route_table.associate(subnet) print " └ Creating route to internet gateway" route_table.route('0.0.0.0/0', gateway=gateway) with shell.Step(5): print "Security resources setup:" print "* Creating temporary security group" group = vpc.create_security_group( 'pop-' + random_string(16), 'Temporary security group for a POP application' ) print " └ New security group created with ID '{0.id}'".format(group) print "* Authorizing all internal traffic" group.authorize(-1, 0, 65535, src_group=group) print "* Authorizing external SSH access" group.authorize('tcp', 22, 22, "0.0.0.0/0") print "* Creating key pair" key = c.create_key_pair('pop-' + random_string(16)) print " └ New key pair created with name '{0.name}'".format(key) with shell.Step(6): print "Virtual machines boot process:" print "* Getting needed images" images = c.get_all_images(config.xpath('//setup/machine/@image')) images = dict([(image.id, image) for image in images]) print "* Launching instances" reservations = {} for machine in config.xpath('//setup/machine'): machine = machine.attrib image = images[machine['image']] res = image.run( key_name=key.name, security_groups=[group.id,], instance_type=machine.get('type', DEFAULT_MACHINE_TYPE), subnet_id=subnet.id, private_ip_address=machine['ip'], ) print " └ New reservation (ID: {0}, IP: {1})".format( res.id, machine['ip'] ) reservations[machine['ip']] = machine, res.instances[0] print "* Waiting for machines to boot" for ip, (machine, instance) in reservations.iteritems(): print " └ Waiting for machine @ {0} to boot".format(ip), shell.wait(instance, 'running', interval=.5) print "* Associating public IP address to POP application manager" address.associate(reservations[cloud['manager']][1]) print "* Waiting for manager to come online", shell.wait(ConnectionAttempt(address.public_ip, 22), 'connected', interval=.8) with shell.Step(7): print "Local environment setup:" print "* Saving private key to disk" with open(KEY_FILENAME, 'w') as fh: fh.write(key.material) os.chmod(KEY_FILENAME, stat.S_IRUSR | stat.S_IWUSR) print " └ Private key written to '{0}'".format(KEY_FILENAME) print "* Generating local fabfile" local = os.path.join(os.path.dirname(fabfiles.__file__), 'local.pyt') with open(local, 'r') as rfh: with open('fabfile.py', 'w') as wfh: wfh.write(rfh.read().format(**{ 'gendate': datetime.today(), 'mgraddress': address.public_ip, 'remoteuser': USER, 'cloudsetup': SETUP_SUMMARY_FILE, 'keyfilename': KEY_FILENAME, })) with open('cloud.i.xml', 'w') as fh: fh.write(xml.format_document(config)) print "* Saving cloud setup to XML file" cloud.update({ 'vpc': vpc.id, 'subnet': subnet.id, 'gateway': gateway.id, 'security-group': group.id, 'key-pair': key.name, 'public-address': address.public_ip, 'key-filename': KEY_FILENAME, }) for machine, instance in reservations.itervalues(): machine['instance-id'] = instance.id machine['launch-time'] = instance.launch_time with open(SETUP_SUMMARY_FILE, 'w') as fh: fh.write(xml.format_document(config)) print "* Removing old public key from known hosts (if present)" try: with open(KNOWN_HOSTS, 'r') as fh: known_hosts = fh.read() except: print " └ Could not read {0}".format(KNOWN_HOSTS) else: known_hosts, count = re.subn( '\n{0} .*'.format(re.escape(address.public_ip)), '', known_hosts ) if count: try: with open(KNOWN_HOSTS, 'w') as fh: fh.write(known_hosts) except: print " └ Could not write changes back to {0}".format( KNOWN_HOSTS ) else: print " └ Public key for IP {0} removed".format( address.public_ip ) else: print " └ No public key matching IP {0} found".format( address.public_ip ) duration = int(time.time() - start) duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60) with shell.Wrapper(72): print print "Cloud setup completed in {0}; you can manually connect to the "\ "manager using the following command:\n".format(duration) print shell.hilite( " ssh -i {0} {1}@{2}".format(KEY_FILENAME, USER, address.public_ip), shell.MAGENTA ) with shell.Wrapper(72): print print "Alternatively, you can use the commands already provided by " \ "the generated fabfile. To rapidly obtain some help about them,"\ " execute the following command in the directory where the " \ "fabfile is located (make sure you have a recent fabric " \ "installation):\n" print shell.hilite(" fab --list", shell.MAGENTA)
def master(args): """ Main entry point for the master utility. """ start = time.time() with shell.Step(1): print "Cloud setup validation:" # Load master configuration print "* Parsing the master XML definition file" config = etree.parse(args.setup) # Validate the configuration file print "* Validating the master XML definition against the XML schema" conf.schema('master-image').assertValid(config) master = config.getroot().attrib manifests = config.xpath('/master/apply/@manifest') uploads = config.xpath('/master/upload/@asset') # Instantiate connections with shell.Step(2): print "Instantiation of the cloud manager connection:" print "* Choosing region" try: region = master['region'] except KeyError: region = DEFAULT_REGION print " └ Selected region '{0}'".format(region) print "* Connecting to the EC2 manager" c = ec2.connect_to_region( aws_access_key_id=args.access_key_id, aws_secret_access_key=args.secret_key, region_name=region, ) with shell.Step(3): print "Virtual setup initialization:" print "* Checking for duplicate image names" try: image = c.get_all_images(filters={'name': master['name']})[0] except IndexError: print " └ Name '{0}' not used yet".format(master['name']) else: print " └ Name '{0}' already used".format(master['name']) print " └ Checking for different user" iam_c = iam.IAMConnection( aws_access_key_id=args.access_key_id, aws_secret_access_key=args.secret_key, ) uid = iam_c.get_user()['GetUserResponse']['GetUserResult']['User']['UserId'] if image.ownerId == uid: if not args.force: raise ValueError("The name '{0}' is already taken by the " \ "image '{1}'.".format(master['name'], image.id)) else: print " └ Same user but --force flag set, deregistering image" image.deregister() print print shell.hilite("Note that only the AMI was deregistered, " \ "the relative snapshot was left in place. Remove it " \ "manually if desired.", shell.MAGENTA) print print "* Creating temporary security group" group = c.create_security_group( 'pop-' + random_string(16), 'Temporary security group for POP master image creation' ) print " └ New security group named '{0.name}'".format(group) print " └ Authorizing external SSH access" group.authorize('tcp', 22, 22, "0.0.0.0/0") #group.authorize('tcp', 80, 80, "0.0.0.0/0") print "* Creating key pair" key = c.create_key_pair('pop-' + random_string(16)) print " └ New key pair named '{0.name}'".format(key) key_filename = 'pop-master-pk-' + random_string(8) + '.pem' with open(key_filename, 'w') as fh: fh.write(key.material) os.chmod(key_filename, stat.S_IRUSR | stat.S_IWUSR) print " └ Private key written to '{0}'".format(key_filename) print "* Getting base image" image = c.get_image(master['base']) print "* Launching new instance" res = image.run( key_name=key.name, security_groups=[group], instance_type=TYPES[image.architecture], ) print " └ New reservation with ID '{0}'".format(res.id) instance = res.instances[0] print "* Waiting for machine to boot", instance = shell.wait(instance, 'running', interval=.5) address = instance.dns_name print shell.nowrap(" └ Public address is '{0}'".format(address)) print "* Waiting for instance to come online", shell.wait(ConnectionAttempt(address, 22), 'connected', interval=.8) print print "Instance online; you can manually connect using this command:\n" print shell.nowrap(shell.hilite( "ssh -i {0} {1}@{2}".format(key_filename, USER, address), shell.MAGENTA )) if args.clean: print print "Note that the machine will be available only until the master " \ "image creation process successfully completes. If an error " \ "happens before completion, the availability of the instance " \ "will depend on the stage at which the error happened." print "If you want to access the machine after the image creation " \ "process completes, use the --no-clean flag." print else: print print "The --no-clean flag is set, the instance will remain " \ "available after the image creation process completes." print "Remember to terminate it manually once done with it." print with shell.Step(4): print "Instance customization:" with remote_machine(USER, address, key_filename, args.debug): print "* Configuring sources for VPC deployment" fab.use_vpc_sources() print "* Installing puppet" sudo('apt-get -y install puppet') sudo('update-rc.d -f puppet remove') base = os.path.dirname(os.path.realpath(args.setup.name)) upload_files(base, uploads, '/var/uploads') upload_files(base, manifests, '/var/manifests') print "* Applying manifests" for manifest in manifests: print " └ Applying '{0}'".format(manifest), shell.wait( Manifest(os.path.join('/var/manifests', manifest), args.debug), 'applied', interval=.8 ) if args.manual: print print "Base setup done, manual setup requested." op = confirm('Open an SSH connection now?', default=True) if op: stdout = sys.stdout while hasattr(sys.stdout, 'stdout'): sys.stdout = sys.stdout.stdout print print "-" * shell.size()[0] open_shell() print "-" * shell.size()[0] sys.stdout = stdout print sys.stdout.write("Connection closed, press the return key once done.") sys.stdout.flush() raw_input() print else: print "Please manually setup the imahe and press the return" \ " key once done." raw_input() print print "* Cleaning up" sudo('rm -rf /var/manifests /var/uploads') with shell.Step(5): print "Image creation:" print "* Creating image from running instance" ami = c.create_image(instance.id, master['name'], master['description']) while True: try: image = c.get_image(ami) print " └ New AMI created with ID '{0}'".format(ami) break except: print " └ AMI not found, trying again".format(ami) pass print "* Waiting for image creation to complete", shell.wait(image, 'available', interval=.5) if 'public' in master: print "* Making image public" image.set_launch_permissions(group_names=['all']) with shell.Step(6): print "Resources cleanup:" if args.clean: print "* Terminating instance" instance.terminate() print "* Deleting key pair" c.delete_key_pair(key.name) os.remove(key_filename) print "* Deleting security group" group.delete() else: print "* The --no-clean flag is set, skipping cleanup" raise shell.Step.Skipped() duration = int(time.time() - start) duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60) with shell.Wrapper(72): print print "Master image creation completed in {0}; you can launch new " \ "instances of the just created image by specifying the " \ "following AMI ID:\n".format(duration) print shell.hilite(" {0}".format(ami), shell.MAGENTA)
def destroy(args): """ Main entry point for the master utility. """ start = time.time() with shell.Step(1): print "Cloud setup validation:" if args.clean: raise shell.Step.Skipped('Not need to parse a configuration file') # Load master configuration print "* Parsing the master XML definition file" config = etree.parse(args.setup) assets = config.getroot().attrib machines = config.xpath('/cloud/setup/machine/@instance-id') # Validate the configuration file print "* Validating the master XML definition against the XML schema" conf.schema('cloud-instance').assertValid(config) #raise shell.Step.Skipped("Cloud XML instance definition schema not yet implemented") # Instantiate connections with shell.Step(2): print "Instantiation of the cloud manager connection:" print "* Connecting to the VPC manager" c = boto.VPCConnection(args.access_key_id, args.secret_key) if args.clean: with shell.Step(3): print "Deleting all security groups" grpcnt = 0 for group in c.get_all_security_groups(): if group.name.startswith('pop-'): print "* Deleting", group.name group.delete() grpcnt += 1 else: print "* Skipping", group.name with shell.Step(4): print "Deleting all key pairs:" kpcnt = 0 for key in c.get_all_key_pairs(): if key.name.startswith('pop-'): print "* Deleting", key.name key.delete() kpcnt += 1 else: print "* Skipping", key.name duration = int(time.time() - start) duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60) with shell.Wrapper(72): print print "Destroyed {0} groups and {1} key pairs in {2}.\n".format( grpcnt, kpcnt, duration) return with shell.Step(3): print "Stopping all instances:" terminating = [] for instance in machines: print "* Terminating instance with ID {0}".format(instance) try: instance = c.terminate_instances(instance)[0] except boto.ResponseError as e: if boto.AWSException(e).code == 'InvalidInstanceID.NotFound': print " └", shell.hilite("Instance not found", shell.YELLOW) else: raise else: if instance.state == 'terminated': print " └ Instance already terminated" else: terminating.append(instance) if terminating: print "* Waiting for {0} instances to terminate".format( len(terminating)) for instance in terminating: print " └ Waiting for instance with ID {0} to terminate".format( instance.id), shell.wait(instance, 'terminated', interval=.5, valid=('shutting-down', )) with shell.Step(4): print "Destroying security assets" print "* Destroying security group" try: time.sleep(8) # Let some time to AWS to disassociate the machines c.delete_security_group(assets['security-group']) except boto.ResponseError as e: if boto.AWSException(e).code == 'InvalidGroup.NotFound': print " └ Group already deleted" else: raise print "* Destroying key pair" c.delete_key_pair(assets['key-pair']) if os.path.exists(assets['key-filename']): print " └ Deleting local private key '{0}'".format( assets['key-filename']) try: os.remove(assets['key-filename']) print " └ Local private key deleted" except: print " └ Unable to delete local private key" with shell.Step(5): print "Freeing unused resources" print "* Releasing elastic IP address" try: address = c.get_all_addresses([assets['public-address']])[0] except boto.ResponseError as e: if boto.AWSException(e).code == 'InvalidAddress.NotFound': print " └ Address already released" else: raise else: address.release() print "* Deleting subnet" try: c.delete_subnet(assets['subnet']) except boto.ResponseError as e: if boto.AWSException(e).code == 'InvalidSubnetID.NotFound': print " └ Subnet already deleted" else: raise print print shell.hilite("The internet gateway and the VPC will not be " \ "deleted as they could still be needed by other " \ "applications and don't generate additional " \ "costs.", shell.MAGENTA) print duration = int(time.time() - start) duration = '{0:.0f}m {1:.0f}s'.format(duration // 60, duration % 60) with shell.Wrapper(72): print print "Cloud destroyed in {0}; all resources were correctly torn down.\n".format( duration)