def new_security_group(region, name=None, description=None): """Create Security Groups with SSH access.""" s_g = get_region_conn(region.name).create_security_group( name or INST_SPECIFIC_SG_PREFIX + timestamp(), description or 'Created for using with specific instance') s_g.authorize('tcp', 22, 22, '0.0.0.0/0') return s_g
def new_security_group(region, name=None, description=None): """Create Security Groups with SSH access.""" s_g = get_region_conn(region.name).create_security_group( name or INST_SPECIFIC_SG_PREFIX + timestamp(), description or "Created for using with specific instance" ) s_g.authorize("tcp", 22, 22, "0.0.0.0/0") return s_g
def create_snapshot(vol, description='', tags=None, synchronously=True, consistent=False): """Return new snapshot for the volume. vol volume to snapshot; synchronously wait for successful completion; description description for snapshot. Will be compiled from instnace parameters by default; tags tags to be added to snapshot. Will be cloned from volume and from instance by default. consistent if consistent True, script will try to freeze fs mountpoint and create snapshot while it's freezed with all buffers dumped to disk. """ if vol.attach_data: inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id) else: inst = None if not description and inst: description = dumps({ 'Volume': vol.id, 'Region': vol.region.name, 'Device': vol.attach_data.device, 'Instance': inst.id, 'Type': inst.instance_type, 'Arch': inst.architecture, 'Root_dev_name': inst.root_device_name, 'Time': timestamp(), }) def freeze_volume(): key_filename = config.get(inst.region.name, 'KEY_FILENAME') try: _user = config.get('SYNC', 'USERNAME') except: _user = USERNAME with settings(host_string=inst.public_dns_name, key_filename=key_filename, user=_user): run('sync', shell=False) run('for i in {1..20}; do sync; sleep 1; done &') def initiate_snapshot(): if consistent: if inst.state == 'running': try: freeze_volume() except: logger.info('FS NOT FREEZED! ' 'Do you have access to this server?') snapshot = vol.create_snapshot(description) if tags: add_tags(snapshot, tags) else: add_tags(snapshot, vol.tags) if inst: add_tags(snapshot, inst.tags) logger.info('{0} started from {1} in {0.region}'.format(snapshot, vol)) return snapshot if synchronously: while True: # Iterate unless success and delete failed snapshots. snapshot = initiate_snapshot() try: wait_for(snapshot, '100%', limit=SNAP_TIME) assert snapshot.status == 'completed', ( 'completed with wrong status {0}'.format(snapshot.status)) except (StateNotChangedError, AssertionError) as err: logger.error(str(err) + ' - deleting') snapshot.delete() else: break else: snapshot = initiate_snapshot() return snapshot
def create_encrypted_instance( region_name, release='lucid', volume_size='8', architecture=None, type='t1.micro', name='encr_root', pw1=None, pw2=None, security_groups=''): """ Creates ubuntu instance with luks-encryted root volume. region_name Region where you want to create instance; release Ubuntu release name (lucid or natty). "lucid" by default; volume_size Size of volume in Gb (always remember, that script creates boot volume with size 1Gb, so minimal size of whole volume is 3Gb (1Gb for /boot 2Gb for /)). 8 by default; architecture "i386" or "x86_64". type Type of instance. 't1.micro' by default; name Name of luks encrypted volume. 'encr_root' by default; pw1, pw2 You can specify passwords in parameters to suppress password prompt; security_groups List of AWS Security Groups names formatted as string separated with semicolon ';'. To unlock go to https://ip_address_of_instance (only after reboot or shutdown). You can set up to 8 passwords. Defaut boot.key and boot.crt created for .amazonaws.com so must work for all instances. Process of creation is about 20 minutes long.""" assert volume_size >= 3, '1 GiB for /boot and 2 GiB for /' conn = get_region_conn(region_name) with config_temp_ssh(conn) as key_filename: key_pair = os.path.splitext(os.path.split(key_filename)[1])[0] zn = conn.get_all_zones()[-1] with create_temp_inst(zone=zn, key_pair=key_pair) as inst: vol = conn.create_volume(size=volume_size, zone=zn) dev = get_avail_dev_encr(inst) vol.attach(inst.id, dev) arch = architecture or config.get('DEFAULT', 'ARCHITECTURE') ubuntu_arch = 'amd64' if arch == 'x86_64' else arch make_encrypted_ubuntu(inst.public_dns_name, key_filename, 'ubuntu', ubuntu_arch, dev, name, release, pw1, pw2) description = dumps({ 'Volume': vol.id, 'Region': vol.region.name, 'Device': '/dev/sda', 'Type': type, 'Arch': arch, 'Root_dev_name': '/dev/sda1', 'Time': timestamp(), }) snap = vol.create_snapshot(description) wait_for(snap, '100%', limit=SNAP_TIME) vol.detach(force=True) wait_for(vol, 'available', limit=DETACH_TIME) vol.delete() HTTPS_SG = config.get('DEFAULT', 'HTTPS_SECURITY_GROUP') security_groups = ';'.join([security_groups, HTTPS_SG]) img, new_instance = create_ami(region_name, snap.id, 'RUN', security_groups=security_groups) logger.info('\nTo unlock go to:\n https://{0}\n' .format(new_instance.public_dns_name)) img.deregister() snap.delete()
def create_ami(region, snap_id, force=None, root_dev='/dev/sda1', zone_name=None, default_arch=None, default_type='t1.micro', security_groups=''): """ Creates AMI image from given snapshot. Force option removes prompt request and creates new instance from created ami image. region, snap_id specify snapshot to be processed. Snapshot description in json format will be used to restore instance with same parameters. Will automaticaly process snapshots for same instance with near time (10 minutes or shorter), but for other devices (/dev/sdb, /dev/sdc, etc); force Run instance from ami after creation without confirmation. To enable set value to "RUN"; default_arch architecture to use if not mentioned in snapshot description; default_type instance type to use if not mentioned in snapshot description. Used only if ``force`` is "RUN"; security_groups list of AWS Security Groups names formatted as string separated with semicolon ';'. Used only if ``force`` is "RUN". """ conn = get_region_conn(region) snap = conn.get_all_snapshots(snapshot_ids=[snap_id, ])[0] instance_id = get_snap_instance(snap) _device = get_snap_device(snap) snaps = conn.get_all_snapshots(owner='self') snapshots = [snp for snp in snaps if get_snap_instance(snp) == instance_id and get_snap_device(snp) != _device and abs(get_snap_time(snap) - get_snap_time(snp)) <= timedelta(minutes=10)] snapshot = sorted(snapshots, key=get_snap_time, reverse=True) if snapshots else None # setup for building an EBS boot snapshot default_arch = default_arch or config.get('DEFAULT', 'ARCHITECTURE') arch = get_descr_attr(snap, 'Arch') or default_arch kernel = config.get(conn.region.name, 'KERNEL' + arch.upper()) dev = re.match(r'^/dev/sda$', _device) # if our instance encrypted if dev: kernel = config.get(conn.region.name, 'KERNEL_ENCR_' + arch.upper()) ebs = EBSBlockDeviceType() ebs.snapshot_id = snap_id ebs.delete_on_termination = True block_map = BlockDeviceMapping() block_map[_device] = ebs sdb = BlockDeviceType() sdb.ephemeral_name = 'ephemeral0' block_map['/dev/sdb'] = sdb if snapshot: for s in snapshot: s_dev = get_snap_device(s) s_ebs = EBSBlockDeviceType() s_ebs.delete_on_termination = True s_ebs.snapshot_id = s.id block_map[s_dev] = s_ebs name = 'Created {0} using access key {1}'.format(timestamp(), conn.access_key) name = name.replace(":", ".").replace(" ", "_") # create the new AMI all options from snap JSON description: wait_for(snap, '100%', limit=SNAP_TIME) result = conn.register_image( name=name, description=snap.description, architecture=get_descr_attr(snap, 'Arch') or default_arch, root_device_name=get_descr_attr(snap, 'Root_dev_name') or root_dev, block_device_map=block_map, kernel_id=kernel) sleep(2) image = conn.get_all_images(image_ids=[result, ])[0] wait_for(image, 'available', limit=10 * 60) add_tags(image, snap.tags) logger.info('The new AMI ID = {0}'.format(result)) new_instance = None if force == 'RUN': instance_type = get_descr_attr(snap, 'Type') or default_type new_instance = launch_instance_from_ami( region, image.id, inst_type=instance_type, security_groups=security_groups, zone_name=zone_name) return image, new_instance
def replicate_security_groups(filters=None): """ Replicate updates of Security Groups among regions. :param filters: restrict replication to subset of Security Groups, see available options at http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSecurityGroups.html. Not available while running as Fabric task because it should be of `dict` type. :type filters: dict Per-instance Security Groups without additional rules won't be replicated. Raises warnings about synchronization issues that requires manual resolution. """ HASH, TIMESTAMP = "Hash", "Version" # Tag names. def get_hash(s_g): """ Return unique hash for Security Group rules. Granted Security Groups will be respected identical if them belongs to identical owner and identically named irrespectively to region. """ return sha256(str(regroup_rules(s_g).items())).hexdigest() def was_updated(s_g): """Returns True if Security Group was modified or just created.""" return HASH not in s_g.tags or get_hash(s_g) != s_g.tags[HASH] regions = get_region_conn().get_all_regions() blank_group = new_security_group(regions[0]) security_groups = [] for reg in regions: for s_g in get_region_conn(reg.name).get_all_security_groups(filters=filters): security_groups.append(s_g) name = attrgetter("name") grp_by_name = groupby(sorted(security_groups, key=name), key=name) for name, grp_in_regions in grp_by_name: grp_in_regions = list(grp_in_regions) versions = set(get_hash(s_g) for s_g in grp_in_regions) old_vers = [s_g for s_g in grp_in_regions if not was_updated(s_g)] if len(set(s_g.tags[HASH] for s_g in old_vers)) > 1: warn("Old versions of {0} should be synced manually".format(name)) continue if len(versions) == 2 and old_vers: # Update olds to new version. new = [grp for grp in grp_in_regions if was_updated(grp)][0] for prev in old_vers: sync_rules(new, prev) elif not len(versions) == 1: warn("More than 1 new versions of {0} found. Synchronization " "can't be applied.".format(name)) continue # Clone to all regions if not yet cloned. if len(grp_in_regions) < len(regions) and not ( name.startswith(INST_SPECIFIC_SG_PREFIX) and get_hash(grp_in_regions[0]) == get_hash(blank_group) ): s_g_regions = set(s_g.region.name for s_g in grp_in_regions) for reg_name in set(reg.name for reg in regions) - s_g_regions: region = get_region_conn(reg_name).region sync_rules(grp_in_regions[0], dst_region=region) # Update tags. mark = timestamp() for s_g in grp_in_regions: s_g.add_tag(HASH, get_hash(s_g)) s_g.add_tag(TIMESTAMP, mark) blank_group.delete()
def create_snapshot(vol, description='', tags=None, synchronously=True, consistent=False): """Return new snapshot for the volume. vol volume to snapshot; synchronously wait for successful completion; description description for snapshot. Will be compiled from instnace parameters by default; tags tags to be added to snapshot. Will be cloned from volume and from instance by default. consistent if consistent True, script will try to freeze fs mountpoint and create snapshot while it's freezed with all buffers dumped to disk. """ if vol.attach_data: inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id) else: inst = None if not description and inst: description = dumps({ 'Volume': vol.id, 'Region': vol.region.name, 'Device': vol.attach_data.device, 'Instance': inst.id, 'Type': inst.instance_type, 'Arch': inst.architecture, 'Root_dev_name': inst.root_device_name, 'Time': timestamp(), }) def freeze_volume(): key_filename = config.get(inst.region.name, 'KEY_FILENAME') try: _user = config.get('SYNC', 'USERNAME') except: _user = USERNAME with settings(host_string=inst.public_dns_name, key_filename=key_filename, user=_user): wait_for_sudo('sync', shell=False) run('for i in {1..20}; do sudo sync; sleep 1; done &') def initiate_snapshot(): if consistent: if inst.state == 'running': try: freeze_volume() except: logger.info('FS NOT FREEZED! ' 'Do you have access to this server?') snapshot = vol.create_snapshot(description) if tags: add_tags(snapshot, tags) else: add_tags(snapshot, vol.tags) if inst: add_tags(snapshot, inst.tags) logger.info('{0} started from {1} in {0.region}'.format(snapshot, vol)) return snapshot if synchronously: while True: # Iterate unless success and delete failed snapshots. snapshot = initiate_snapshot() try: wait_for(snapshot, '100%', limit=SNAP_TIME) assert snapshot.status == 'completed', ( 'completed with wrong status {0}'.format(snapshot.status)) except (StateNotChangedError, AssertionError) as err: logger.error(str(err) + ' - deleting') snapshot.delete() else: break else: snapshot = initiate_snapshot() return snapshot
def replicate_security_groups(filters=None): """ Replicate updates of Security Groups among regions. :param filters: restrict replication to subset of Security Groups, see available options at http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSecurityGroups.html. Not available while running as Fabric task because it should be of `dict` type. :type filters: dict Per-instance Security Groups without additional rules won't be replicated. Raises warnings about synchronization issues that requires manual resolution. """ HASH, TIMESTAMP = 'Hash', 'Version' # Tag names. def get_hash(s_g): """ Return unique hash for Security Group rules. Granted Security Groups will be respected identical if them belongs to identical owner and identically named irrespectively to region. """ return sha256(str(regroup_rules(s_g).items())).hexdigest() def was_updated(s_g): """Returns True if Security Group was modified or just created.""" return HASH not in s_g.tags or get_hash(s_g) != s_g.tags[HASH] regions = get_region_conn().get_all_regions() blank_group = new_security_group(regions[0]) security_groups = [] for reg in regions: for s_g in get_region_conn( reg.name).get_all_security_groups(filters=filters): security_groups.append(s_g) name = attrgetter('name') grp_by_name = groupby(sorted(security_groups, key=name), key=name) for name, grp_in_regions in grp_by_name: grp_in_regions = list(grp_in_regions) versions = set(get_hash(s_g) for s_g in grp_in_regions) old_vers = [s_g for s_g in grp_in_regions if not was_updated(s_g)] if len(set(s_g.tags[HASH] for s_g in old_vers)) > 1: warn('Old versions of {0} should be synced manually'.format(name)) continue if len(versions) == 2 and old_vers: # Update olds to new version. new = [grp for grp in grp_in_regions if was_updated(grp)][0] for prev in old_vers: sync_rules(new, prev) elif not len(versions) == 1: warn('More than 1 new versions of {0} found. Synchronization ' 'can\'t be applied.'.format(name)) continue # Clone to all regions if not yet cloned. if (len(grp_in_regions) < len(regions) and not (name.startswith(INST_SPECIFIC_SG_PREFIX) and get_hash( grp_in_regions[0]) == get_hash(blank_group))): s_g_regions = set(s_g.region.name for s_g in grp_in_regions) for reg_name in set(reg.name for reg in regions) - s_g_regions: region = get_region_conn(reg_name).region sync_rules(grp_in_regions[0], dst_region=region) # Update tags. mark = timestamp() for s_g in grp_in_regions: s_g.add_tag(HASH, get_hash(s_g)) s_g.add_tag(TIMESTAMP, mark) blank_group.delete()
def create_encrypted_instance(region_name, release='lucid', volume_size='8', architecture=None, type='t1.micro', name='encr_root', pw1=None, pw2=None, security_groups=''): """ Creates ubuntu instance with luks-encryted root volume. region_name Region where you want to create instance; release Ubuntu release name (lucid or natty). "lucid" by default; volume_size Size of volume in Gb (always remember, that script creates boot volume with size 1Gb, so minimal size of whole volume is 3Gb (1Gb for /boot 2Gb for /)). 8 by default; architecture "i386" or "x86_64". type Type of instance. 't1.micro' by default; name Name of luks encrypted volume. 'encr_root' by default; pw1, pw2 You can specify passwords in parameters to suppress password prompt; security_groups List of AWS Security Groups names formatted as string separated with semicolon ';'. To unlock go to https://ip_address_of_instance (only after reboot or shutdown). You can set up to 8 passwords. Defaut boot.key and boot.crt created for .amazonaws.com so must work for all instances. Process of creation is about 20 minutes long.""" assert volume_size >= 3, '1 GiB for /boot and 2 GiB for /' conn = get_region_conn(region_name) with config_temp_ssh(conn) as key_filename: key_pair = os.path.splitext(os.path.split(key_filename)[1])[0] zn = conn.get_all_zones()[-1] with create_temp_inst(zone=zn, key_pair=key_pair) as inst: vol = conn.create_volume(size=volume_size, zone=zn) dev = get_avail_dev_encr(inst) vol.attach(inst.id, dev) arch = architecture or config.get('DEFAULT', 'ARCHITECTURE') ubuntu_arch = 'amd64' if arch == 'x86_64' else arch make_encrypted_ubuntu(inst.public_dns_name, key_filename, 'ubuntu', ubuntu_arch, dev, name, release, pw1, pw2) description = dumps({ 'Volume': vol.id, 'Region': vol.region.name, 'Device': '/dev/sda', 'Type': type, 'Arch': arch, 'Root_dev_name': '/dev/sda1', 'Time': timestamp(), }) snap = vol.create_snapshot(description) wait_for(snap, '100%', limit=SNAP_TIME) vol.detach(force=True) wait_for(vol, 'available', limit=DETACH_TIME) vol.delete() HTTPS_SG = config.get('DEFAULT', 'HTTPS_SECURITY_GROUP') security_groups = ';'.join([security_groups, HTTPS_SG]) img, new_instance = create_ami(region_name, snap.id, 'RUN', security_groups=security_groups) logger.info('\nTo unlock go to:\n https://{0}\n'.format( new_instance.public_dns_name)) img.deregister() snap.delete()
def create_ami(region, snap_id, force=None, root_dev='/dev/sda1', zone_name=None, default_arch=None, default_type='t1.micro', security_groups=''): """ Creates AMI image from given snapshot. Force option removes prompt request and creates new instance from created ami image. region, snap_id specify snapshot to be processed. Snapshot description in json format will be used to restore instance with same parameters. Will automaticaly process snapshots for same instance with near time (10 minutes or shorter), but for other devices (/dev/sdb, /dev/sdc, etc); force Run instance from ami after creation without confirmation. To enable set value to "RUN"; default_arch architecture to use if not mentioned in snapshot description; default_type instance type to use if not mentioned in snapshot description. Used only if ``force`` is "RUN"; security_groups list of AWS Security Groups names formatted as string separated with semicolon ';'. Used only if ``force`` is "RUN". """ conn = get_region_conn(region) snap = conn.get_all_snapshots(snapshot_ids=[ snap_id, ])[0] instance_id = get_snap_instance(snap) _device = get_snap_device(snap) snaps = conn.get_all_snapshots(owner='self') snapshots = [ snp for snp in snaps if get_snap_instance(snp) == instance_id and get_snap_device(snp) != _device and abs(get_snap_time(snap) - get_snap_time(snp)) <= timedelta(minutes=10) ] snapshot = sorted(snapshots, key=get_snap_time, reverse=True) if snapshots else None # setup for building an EBS boot snapshot default_arch = default_arch or config.get('DEFAULT', 'ARCHITECTURE') arch = get_descr_attr(snap, 'Arch') or default_arch kernel = config.get(conn.region.name, 'KERNEL' + arch.upper()) dev = re.match(r'^/dev/sda$', _device) # if our instance encrypted if dev: kernel = config.get(conn.region.name, 'KERNEL_ENCR_' + arch.upper()) ebs = EBSBlockDeviceType() ebs.snapshot_id = snap_id ebs.delete_on_termination = True block_map = BlockDeviceMapping() block_map[_device] = ebs sdb = BlockDeviceType() sdb.ephemeral_name = 'ephemeral0' block_map['/dev/sdb'] = sdb if snapshot: for s in snapshot: s_dev = get_snap_device(s) s_ebs = EBSBlockDeviceType() s_ebs.delete_on_termination = True s_ebs.snapshot_id = s.id block_map[s_dev] = s_ebs name = 'Created {0} using access key {1}'.format(timestamp(), conn.access_key) name = name.replace(":", ".").replace(" ", "_") # create the new AMI all options from snap JSON description: wait_for(snap, '100%', limit=SNAP_TIME) result = conn.register_image( name=name, description=snap.description, architecture=get_descr_attr(snap, 'Arch') or default_arch, root_device_name=get_descr_attr(snap, 'Root_dev_name') or root_dev, block_device_map=block_map, kernel_id=kernel) sleep(2) image = conn.get_all_images(image_ids=[ result, ])[0] wait_for(image, 'available', limit=10 * 60) add_tags(image, snap.tags) logger.info('The new AMI ID = {0}'.format(result)) info = ('\nEnter RUN if you want to launch instance using ' 'just created {0}: '.format(image)) new_instance = None if force == 'RUN' or raw_input(info).strip() == 'RUN': instance_type = get_descr_attr(snap, 'Type') or default_type new_instance = launch_instance_from_ami( region, image.id, inst_type=instance_type, security_groups=security_groups, zone_name=zone_name) return image, new_instance