def backup_done(instance_id): ec2() conn = boto.ec2.connect_to_region( env.backup_ec2_region, aws_access_key_id=env.aws_access_key_id, aws_secret_access_key=env.aws_secret_access_key, security_token=env.aws_security_token, ) reservation = conn.get_all_instances(instance_ids=[instance_id])[0] instance = reservation.instances[0] volume_ids = [] for k in instance.block_device_mapping: volume_ids.append(instance.block_device_mapping[k].volume_id) volumes = conn.get_all_volumes(volume_ids=volume_ids) print "Terminating the temporary EC2 backup instance" instance.terminate() time.sleep(5) reservation = conn.get_all_instances(instance_ids=[instance_id])[0] instance = reservation.instances[0] while instance.state != 'terminated': time.sleep(1) print '.', reservation = conn.get_all_instances(instance_ids=[instance_id])[0] instance = reservation.instances[0] print "Deleting temporary EBS volumes made from the AMI/snapshots" for v in volumes: v.delete() print "----------" print "EC2 cleanup done. The AMI and snapshots have *not* been deleted, as they're good to keep around." print "Consider manually cleaning out old AMIs and snapshots every once and a while!"
def backup_done(instance_id): ec2() conn = boto.ec2.connect_to_region(env.backup_ec2_region, aws_access_key_id=env.aws_access_key_id, aws_secret_access_key=env.aws_secret_access_key, security_token=env.aws_security_token, ) reservation = conn.get_all_instances(instance_ids=[instance_id])[0] instance = reservation.instances[0] volume_ids = [] for k in instance.block_device_mapping: volume_ids.append(instance.block_device_mapping[k].volume_id) volumes = conn.get_all_volumes(volume_ids=volume_ids) print "Terminating the temporary EC2 backup instance" instance.terminate() time.sleep(5) reservation = conn.get_all_instances(instance_ids=[instance_id])[0] instance = reservation.instances[0] while instance.state != 'terminated': time.sleep(1) print '.', reservation = conn.get_all_instances(instance_ids=[instance_id])[0] instance = reservation.instances[0] print "Deleting temporary EBS volumes made from the AMI/snapshots" for v in volumes: v.delete() print "----------" print "EC2 cleanup done. The AMI and snapshots have *not* been deleted, as they're good to keep around." print "Consider manually cleaning out old AMIs and snapshots every once and a while!"
def create_ami(): """ Creates an AMI from our primary instance. Includes the boot EBS volume as well as the data EBS volume. """ ec2() conn = boto.ec2.connect_to_region( env.ec2_region, aws_access_key_id=env.aws_access_key_id, aws_secret_access_key=env.aws_secret_access_key, security_token=env.aws_security_token, ) ami_id = conn.create_image(env.ec2_instance_id, 'localwiki-main-%s' % (int(time.time())), no_reboot=True) ami = conn.get_all_images(image_ids=[ami_id])[0] print "Waiting for instance AMI to be created" while ami.state != 'available': print '.', sys.stdout.flush() time.sleep(5) ami = conn.get_all_images(image_ids=[ami_id])[0] return ami
def create_ec2(ami_id=None, instance_type='m1.medium'): ec2() if not ami_id: ami_id = get_ec2_ami(env.ec2_region) conn = boto.ec2.connect_to_region(env.ec2_region, aws_access_key_id=env.aws_access_key_id, aws_secret_access_key=env.aws_secret_access_key, security_token=env.aws_security_token, ) # Don't delete root EBS volume on termination root_device = boto.ec2.blockdevicemapping.BlockDeviceType( delete_on_termination=False, ) block_device_map = boto.ec2.blockdevicemapping.BlockDeviceMapping() block_device_map['/dev/sda1'] = root_device res = conn.run_instances(ami_id, key_name=env.ec2_key_name, instance_type=instance_type, block_device_map=block_device_map, security_groups=[env.ec2_security_group] ) instance = res.instances[0] exact_region = instance.placement # Create EBS volume for data storage print "Waiting for EBS volume to be created.." data_vol = conn.create_volume(300, exact_region) cur_vol = conn.get_all_volumes([data_vol.id])[0] while cur_vol.status != 'available': time.sleep(1) print ".", sys.stdout.flush() cur_vol = conn.get_all_volumes([data_vol.id])[0] print "Spinning up instance. Waiting for it to start. " while instance.state != 'running': time.sleep(1) instance.update() print ".", sys.stdout.flush() print "Instance running." print "Hostname: %s" % instance.public_dns_name print "Attaching EBS volume to instance at AWS level.." conn.attach_volume (data_vol.id, instance.id, "/dev/sdh") print "Waiting for instance to finish booting up. " time.sleep(20) print "Instance ready to receive connections. " env.hosts = [instance.public_dns_name]
def create_ec2(ami_id=None, instance_type='m1.medium'): ec2() if not ami_id: ami_id = get_ec2_ami(env.ec2_region) conn = boto.ec2.connect_to_region( env.ec2_region, aws_access_key_id=env.aws_access_key_id, aws_secret_access_key=env.aws_secret_access_key, security_token=env.aws_security_token, ) # Don't delete root EBS volume on termination root_device = boto.ec2.blockdevicemapping.BlockDeviceType( delete_on_termination=False, ) block_device_map = boto.ec2.blockdevicemapping.BlockDeviceMapping() block_device_map['/dev/sda1'] = root_device res = conn.run_instances(ami_id, key_name=env.ec2_key_name, instance_type=instance_type, block_device_map=block_device_map, security_groups=[env.ec2_security_group]) instance = res.instances[0] exact_region = instance.placement # Create EBS volume for data storage print "Waiting for EBS volume to be created.." data_vol = conn.create_volume(300, exact_region) cur_vol = conn.get_all_volumes([data_vol.id])[0] while cur_vol.status != 'available': time.sleep(1) print ".", sys.stdout.flush() cur_vol = conn.get_all_volumes([data_vol.id])[0] print "Spinning up instance. Waiting for it to start. " while instance.state != 'running': time.sleep(1) instance.update() print ".", sys.stdout.flush() print "Instance running." print "Hostname: %s" % instance.public_dns_name print "Attaching EBS volume to instance at AWS level.." conn.attach_volume(data_vol.id, instance.id, "/dev/sdh") print "Waiting for instance to finish booting up. " time.sleep(20) print "Instance ready to receive connections. " env.hosts = [instance.public_dns_name]
def run_backup(instance_type='m1.medium'): """ You'll need to update the `ec2_instance_id` in your secrets.json for this to work. """ ec2() ami = create_ami_and_move_to_backup_region() conn = boto.ec2.connect_to_region(env.backup_ec2_region, aws_access_key_id=env.aws_access_key_id, aws_secret_access_key=env.aws_secret_access_key, security_token=env.aws_security_token, ) res = conn.run_instances(ami.id, placement=env.backup_ec2_region + 'a', key_name=env.ec2_key_name, instance_type=instance_type, security_groups=[env.ec2_security_group] ) instance = res.instances[0] exact_region = instance.placement print "Spinning up backup instance. Waiting for it to start. " while instance.state != 'running': time.sleep(1) instance.update() print ".", sys.stdout.flush() print "Instance running." print "Hostname: %s" % instance.public_dns_name print "Waiting for instance to finish booting up. " time.sleep(20) print "Instance ready to receive connections. " env.roledefs['backup_host'] = ['ubuntu@' + instance.public_dns_name] env.host_string = 'ubuntu@' + instance.public_dns_name collect_backup(instance)
def run_backup(instance_type='m1.medium'): """ You'll need to update the `ec2_instance_id` in your secrets.json for this to work. """ ec2() ami = create_ami_and_move_to_backup_region() conn = boto.ec2.connect_to_region( env.backup_ec2_region, aws_access_key_id=env.aws_access_key_id, aws_secret_access_key=env.aws_secret_access_key, security_token=env.aws_security_token, ) res = conn.run_instances(ami.id, placement=env.backup_ec2_region + 'a', key_name=env.ec2_key_name, instance_type=instance_type, security_groups=[env.ec2_security_group]) instance = res.instances[0] exact_region = instance.placement print "Spinning up backup instance. Waiting for it to start. " while instance.state != 'running': time.sleep(1) instance.update() print ".", sys.stdout.flush() print "Instance running." print "Hostname: %s" % instance.public_dns_name print "Waiting for instance to finish booting up. " time.sleep(20) print "Instance ready to receive connections. " env.roledefs['backup_host'] = ['ubuntu@' + instance.public_dns_name] env.host_string = 'ubuntu@' + instance.public_dns_name collect_backup(instance)
def create_ami(): """ Creates an AMI from our primary instance. Includes the boot EBS volume as well as the data EBS volume. """ ec2() conn = boto.ec2.connect_to_region(env.ec2_region, aws_access_key_id=env.aws_access_key_id, aws_secret_access_key=env.aws_secret_access_key, security_token=env.aws_security_token, ) ami_id = conn.create_image(env.ec2_instance_id, 'localwiki-main-%s' % (int(time.time())), no_reboot=True) ami = conn.get_all_images(image_ids=[ami_id])[0] print "Waiting for instance AMI to be created" while ami.state != 'available': print '.', sys.stdout.flush() time.sleep(5) ami = conn.get_all_images(image_ids=[ami_id])[0] return ami
a = ['ec2-xxxxxx.ap-northeast-1.compute.amazonaws.com'] for x in a: print x db.public_dns_name.insert({ 'public_dns_name': x }) #if res: # for inst in res: # name = inst.instances[0].tags['Name'] # logging.warning(name) # if name == 'ClientMaster2': # if inst.instances[0].public_dns_name != None: # logging.warning(name) # print inst.instances[0].public_dns_name # db.public_dns_name.insert({ # 'public_dns_name': inst.instances[0].public_dns_name # }) con.close() def run(): aws_access_key_id = '' aws_secret_access_key = '' kw_params = {} kw_params.update({'aws_access_key_id':aws_access_key_id}) kw_params.update({'aws_secret_access_key':aws_secret_access_key}) conn = boto.ec2.connect_to_region('ap-northeast-1', **kw_params) conn.run_instances('ami-xxxxxxxx', key_name='', instance_type='m1.medium', security_groups=['default', 'quick-start-1'], max_count='20') ec2()
#if res: # for inst in res: # name = inst.instances[0].tags['Name'] # logging.warning(name) # if name == 'ClientMaster2': # if inst.instances[0].public_dns_name != None: # logging.warning(name) # print inst.instances[0].public_dns_name # db.public_dns_name.insert({ # 'public_dns_name': inst.instances[0].public_dns_name # }) con.close() def run(): aws_access_key_id = '' aws_secret_access_key = '' kw_params = {} kw_params.update({'aws_access_key_id': aws_access_key_id}) kw_params.update({'aws_secret_access_key': aws_secret_access_key}) conn = boto.ec2.connect_to_region('ap-northeast-1', **kw_params) conn.run_instances('ami-xxxxxxxx', key_name='', instance_type='m1.medium', security_groups=['default', 'quick-start-1'], max_count='20') ec2()