def mount_snapshot(region_name, snap_id, inst_id=None): """Mount snapshot to temporary created instance or inst_id. region_name, snap_id specify snapshot. inst_id attach to existing instance. Will be created temporary if None.""" conn = get_region_conn(region_name) inst = get_inst_by_id(conn.region.name, inst_id) if inst_id else None snap = conn.get_all_snapshots(snapshot_ids=[snap_id, ])[0] info = ('\nYou may now SSH into the {inst} server, using:' '\n ssh -i {key} {user}@{inst.public_dns_name}') with attach_snapshot(snap, inst=inst) as (vol, mountpoint): if mountpoint: info += ('\nand browse snapshot, mounted at {mountpoint}.') else: info += ('\nand mount {device}. NOTE: device name may be ' 'altered by system.') key_file = config.get(conn.region.name, 'KEY_FILENAME') inst = get_inst_by_id(conn.region.name, vol.attach_data.instance_id) assert inst logger.info(info.format(inst=inst, user=env.user, key=key_file, device=vol.attach_data.device, mountpoint=mountpoint)) info = ('\nEnter FINISHED if you are finished looking at the ' 'backup and would like to cleanup: ') while raw_input(info).strip() != 'FINISHED': pass
def update_snap(src_vol, src_mnt, dst_vol, dst_mnt, encr, delete_old=False): """Update destination region from `src_vol`. Create new snapshot with same description and tags. Delete previous snapshot (if exists) of the same volume in destination region if ``delete_old`` is True.""" src_inst = get_inst_by_id(src_vol.region.name, src_vol.attach_data.instance_id) dst_inst = get_inst_by_id(dst_vol.region.name, dst_vol.attach_data.instance_id) rsync_mountpoints(src_inst, src_vol, src_mnt, dst_inst, dst_vol, dst_mnt, encr) if dst_vol.snapshot_id: old_snap = dst_vol.connection.get_all_snapshots( [dst_vol.snapshot_id])[0] else: old_snap = None src_snap = src_vol.connection.get_all_snapshots([src_vol.snapshot_id])[0] create_snapshot(dst_vol, description=src_snap.description, tags=src_snap.tags, synchronously=False) if old_snap and delete_old: logger.info('Deleting previous {0} in {1}'.format(old_snap, dst_vol.region)) old_snap.delete()
def backup_instance(region_name, instance_id=None, instance=None, synchronously=False, consistent=False): """ Return list of created snapshots for specified instance. region_name instance location; instance, instance_id either `instance_id` or `instance` argument should be specified; synchronously wait for successful completion. False by default. consistent if True, then FS mountpoint will be frozen before snapshotting. False by default. """ assert bool(instance_id) ^ bool(instance), ('Either instance_id or ' 'instance should be specified') conn = get_region_conn(region_name) if instance_id: instance = get_inst_by_id(conn.region.name, instance_id) snapshots = [] for dev in instance.block_device_mapping: vol_id = instance.block_device_mapping[dev].volume_id vol = conn.get_all_volumes([vol_id])[0] snapshots.append(create_snapshot(vol, synchronously=synchronously, consistent=consistent)) return snapshots
def deluser(name, region=None, instance_ids=None): """ Removes user <name> with deluser from "host1;host2" list in <region> If region and instance_ids not set - script takes hosts amd key values from command line (-H and -i). """ if instance_ids and region: instances_ids = list(unicode(instance_ids).split(';')) for inst in instances_ids: if inst: _instance = get_inst_by_id(region, inst) if not env.key_filename: key_filename = config.get(_instance.region.name, 'KEY_FILENAME') env.update({ 'key_filename': key_filename, 'warn_only': True }) env.update({'host_string': _instance.public_dns_name}) env.username = name _sudo('deluser %(username)s' % env) else: env.update({'warn_only': True}) env.username = name _sudo('deluser %(username)s' % env)
def modify_kernel(region, instance_id): """ Modify old kernel for stopped instance (needed for make pv-grub working) .. note:: install grub-legacy-ec2 and upgrades before run this. region specify instance region; instance_id specify instance id for kernel change Kernels list: ap-southeast-1 x86_64 aki-11d5aa43 ap-southeast-1 i386 aki-13d5aa41 eu-west-1 x86_64 aki-4feec43b eu-west-1 i386 aki-4deec439 us-east-1 x86_64 aki-427d952b us-east-1 i386 aki-407d9529 us-west-1 x86_64 aki-9ba0f1de us-west-1 i386 aki-99a0f1dc""" key_filename = config.get(region, 'KEY_FILENAME') conn = get_region_conn(region) instance = get_inst_by_id(conn.region.name, instance_id) env.update({ 'host_string': instance.public_dns_name, 'key_filename': key_filename, }) sudo('env DEBIAN_FRONTEND=noninteractive apt-get update && ' 'env DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade && ' 'env DEBIAN_FRONTEND=noninteractive apt-get install grub-legacy-ec2') kernel = config.get(conn.region.name, 'KERNEL' + instance.architecture.upper()) instance.stop() wait_for(instance, 'stopped') instance.modify_attribute('kernel', kernel) instance.start()
def adduser(username, region=None, instance_ids=None, passwordless=None, sudo=None): """ creates new <username> with public SSH key on "host1;host2" list in <region>. If you want to create passwordless account - set any value to <passwrdless> variable, if you want sudo rights - set any value to <sudo>. File with public key must be in same directory. If region and instance_ids not set - script takes hosts amd key values from command line (-H and -i). Usage: 1. WIthout aws api keys and config present: :<username>,<passwordless=1>,<sudo=1> - in this case you have to specify hosts list in -H and your own account in -u fabric parameters. 2. With aws api keys and config entries: :<username>,<region>,"instance1;instance2",<passwordless>,<sudo> Extracts IP's from instance description. """ if instance_ids and region: instances_ids = list(unicode(instance_ids).split(';')) for inst in instances_ids: if inst: _instance = get_inst_by_id(region, inst) if not env.key_filename: key_filename = config.get(_instance.region.name, 'KEY_FILENAME') env.update({'key_filename': key_filename}) env.update({'host_string': _instance.public_dns_name}) _create_account(username, region, instance_ids, passwordless, sudo) else: _create_account(username, region, instance_ids, passwordless, sudo)
def get_vol_dev(vol): """Return OS-specific volume representation as attached device.""" assert vol.attach_data.instance_id inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id) assert inst.public_dns_name, 'Instance is down' key_filename = config.get(vol.region.name, 'KEY_FILENAME') attached_dev = vol.attach_data.device natty_dev = attached_dev.replace('sd', 'xvd') with settings(host_string=inst.public_dns_name, key_filename=key_filename): logger.debug(env, output) for dev in [attached_dev, natty_dev]: if wait_for_exists(dev): return dev
def update_snap(src_vol, src_mnt, dst_vol, dst_mnt, encr, delete_old=False): """Update destination region from `src_vol`. Create new snapshot with same description and tags. Delete previous snapshot (if exists) of the same volume in destination region if ``delete_old`` is True.""" src_inst = get_inst_by_id(src_vol.region.name, src_vol.attach_data.instance_id) dst_inst = get_inst_by_id(dst_vol.region.name, dst_vol.attach_data.instance_id) rsync_mountpoints(src_inst, src_vol, src_mnt, dst_inst, dst_vol, dst_mnt, encr) src_snap = src_vol.connection.get_all_snapshots([src_vol.snapshot_id])[0] create_snapshot(dst_vol, description=src_snap.description, tags=src_snap.tags, synchronously=False) if delete_old and dst_vol.snapshot_id: old_snap = dst_vol.connection.get_all_snapshots( [dst_vol.snapshot_id])[0] logger.info('Deleting previous {0} in {1}'.format(old_snap, dst_vol.region)) old_snap.delete()
def mount_snapshot(region_name, snap_id, inst_id=None): """Mount snapshot to temporary created instance or inst_id. region_name, snap_id specify snapshot. inst_id attach to existing instance. Will be created temporary if None.""" conn = get_region_conn(region_name) inst = get_inst_by_id(conn.region.name, inst_id) if inst_id else None snap = conn.get_all_snapshots(snapshot_ids=[ snap_id, ])[0] info = ('\nYou may now SSH into the {inst} server, using:' '\n ssh -i {key} {user}@{inst.public_dns_name}') with attach_snapshot(snap, inst=inst) as (vol, mountpoint): if mountpoint: info += ('\nand browse snapshot, mounted at {mountpoint}.') else: info += ('\nand mount {device}. NOTE: device name may be ' 'altered by system.') key_file = config.get(conn.region.name, 'KEY_FILENAME') inst = get_inst_by_id(conn.region.name, vol.attach_data.instance_id) assert inst logger.info( info.format(inst=inst, user=env.user, key=key_file, device=vol.attach_data.device, mountpoint=mountpoint)) info = ('\nEnter FINISHED if you are finished looking at the ' 'backup and would like to cleanup: ') while raw_input(info).strip() != 'FINISHED': pass
def modify_instance_termination(region, instance_id): """Mark production instnaces as uneligible for termination. region name of region where instance is located; instance_id instance to be updated; You must change value of preconfigured tag_name and run this command before terminating production instance via API.""" conn = get_region_conn(region) inst = get_inst_by_id(conn.region.name, instance_id) prod_tag = config.get('DEFAULT', 'TAG_NAME') prod_val = config.get('DEFAULT', 'TAG_VALUE') inst_tag_val = inst.tags.get(prod_tag) inst.modify_attribute('disableApiTermination', inst_tag_val == prod_val)
def get_vol_dev(vol): """Return OS-specific volume representation as attached device.""" assert vol.attach_data.instance_id inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id) assert inst.public_dns_name, 'Instance is down' key_filename = config.get(vol.region.name, 'KEY_FILENAME') attached_dev = vol.attach_data.device natty_dev = attached_dev.replace('sd', 'xvd') representations = [attached_dev, natty_dev] with settings(host_string=inst.public_dns_name, key_filename=key_filename): logger.debug(env, output) for dev in representations: if wait_for_exists(dev): return dev raise NoDevFoundError( 'Nothing from {variants} was located at {host.state} {host} for ' '{vol} with {vol.attach_data.__dict__}'.format( host=inst, vol=vol, variants=representations))
def mount_volume(vol, mkfs=False): """Mount the device by SSH. Return mountpoint on success. vol volume to be mounted on the instance it is attached to.""" wait_for(vol, 'attached', ['attach_data', 'status']) inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id) key_filename = config.get(vol.region.name, 'KEY_FILENAME') with settings(host_string=inst.public_dns_name, key_filename=key_filename): dev = get_vol_dev(vol) mountpoint = dev.replace('/dev/', '/media/') wait_for_sudo('mkdir -p {0}'.format(mountpoint)) if mkfs: sudo('mkfs.ext3 {dev}'.format(dev=dev)) sudo('mount {dev} {mnt}'.format(dev=dev, mnt=mountpoint)) if mkfs: sudo('chown -R {user}:{user} {mnt}'.format(user=env.user, mnt=mountpoint)) logger.debug('Mounted {0} to {1} at {2}'.format(vol, inst, mountpoint)) return mountpoint
def deluser(name, region=None, instance_ids=None): """ Removes user <name> with deluser from "host1;host2" list in <region> If region and instance_ids not set - script takes hosts amd key values from command line (-H and -i). """ if instance_ids and region: instances_ids = list(unicode(instance_ids).split(';')) for inst in instances_ids: if inst: _instance = get_inst_by_id(region, inst) if not env.key_filename: key_filename = config.get(_instance.region.name, 'KEY_FILENAME') env.update({'key_filename': key_filename, 'warn_only': True}) env.update({'host_string': _instance.public_dns_name}) env.username = name _sudo('deluser %(username)s' % env) else: env.update({'warn_only': True}) env.username = name _sudo('deluser %(username)s' % env)
def create_snapshot(vol, description='', tags=None, synchronously=True, consistent=False): """Return new snapshot for the volume. vol volume to snapshot; synchronously wait for successful completion; description description for snapshot. Will be compiled from instnace parameters by default; tags tags to be added to snapshot. Will be cloned from volume and from instance by default. consistent if consistent True, script will try to freeze fs mountpoint and create snapshot while it's freezed with all buffers dumped to disk. """ if vol.attach_data: inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id) else: inst = None if not description and inst: description = dumps({ 'Volume': vol.id, 'Region': vol.region.name, 'Device': vol.attach_data.device, 'Instance': inst.id, 'Type': inst.instance_type, 'Arch': inst.architecture, 'Root_dev_name': inst.root_device_name, 'Time': timestamp(), }) def freeze_volume(): key_filename = config.get(inst.region.name, 'KEY_FILENAME') try: _user = config.get('SYNC', 'USERNAME') except: _user = USERNAME with settings(host_string=inst.public_dns_name, key_filename=key_filename, user=_user): run('sync', shell=False) run('for i in {1..20}; do sync; sleep 1; done &') def initiate_snapshot(): if consistent: if inst.state == 'running': try: freeze_volume() except: logger.info('FS NOT FREEZED! ' 'Do you have access to this server?') snapshot = vol.create_snapshot(description) if tags: add_tags(snapshot, tags) else: add_tags(snapshot, vol.tags) if inst: add_tags(snapshot, inst.tags) logger.info('{0} started from {1} in {0.region}'.format(snapshot, vol)) return snapshot if synchronously: while True: # Iterate unless success and delete failed snapshots. snapshot = initiate_snapshot() try: wait_for(snapshot, '100%', limit=SNAP_TIME) assert snapshot.status == 'completed', ( 'completed with wrong status {0}'.format(snapshot.status)) except (StateNotChangedError, AssertionError) as err: logger.error(str(err) + ' - deleting') snapshot.delete() else: break else: snapshot = initiate_snapshot() return snapshot
def create_snapshot(vol, description='', tags=None, synchronously=True, consistent=False): """Return new snapshot for the volume. vol volume to snapshot; synchronously wait for successful completion; description description for snapshot. Will be compiled from instnace parameters by default; tags tags to be added to snapshot. Will be cloned from volume and from instance by default. consistent if consistent True, script will try to freeze fs mountpoint and create snapshot while it's freezed with all buffers dumped to disk. """ if vol.attach_data: inst = get_inst_by_id(vol.region.name, vol.attach_data.instance_id) else: inst = None if not description and inst: description = dumps({ 'Volume': vol.id, 'Region': vol.region.name, 'Device': vol.attach_data.device, 'Instance': inst.id, 'Type': inst.instance_type, 'Arch': inst.architecture, 'Root_dev_name': inst.root_device_name, 'Time': timestamp(), }) def freeze_volume(): key_filename = config.get(inst.region.name, 'KEY_FILENAME') try: _user = config.get('SYNC', 'USERNAME') except: _user = USERNAME with settings(host_string=inst.public_dns_name, key_filename=key_filename, user=_user): wait_for_sudo('sync', shell=False) run('for i in {1..20}; do sudo sync; sleep 1; done &') def initiate_snapshot(): if consistent: if inst.state == 'running': try: freeze_volume() except: logger.info('FS NOT FREEZED! ' 'Do you have access to this server?') snapshot = vol.create_snapshot(description) if tags: add_tags(snapshot, tags) else: add_tags(snapshot, vol.tags) if inst: add_tags(snapshot, inst.tags) logger.info('{0} started from {1} in {0.region}'.format(snapshot, vol)) return snapshot if synchronously: while True: # Iterate unless success and delete failed snapshots. snapshot = initiate_snapshot() try: wait_for(snapshot, '100%', limit=SNAP_TIME) assert snapshot.status == 'completed', ( 'completed with wrong status {0}'.format(snapshot.status)) except (StateNotChangedError, AssertionError) as err: logger.error(str(err) + ' - deleting') snapshot.delete() else: break else: snapshot = initiate_snapshot() return snapshot