def is_group_in(region, group_name):
     try:
         get_region_conn(region.name).get_all_security_groups([group_name])
     except EC2ResponseError:
         return False
     else:
         return True
Пример #2
0
def cleanup_security_groups(delete=False):
    """
    Delete unused AWS Security Groups.

    :type delete: boolean
    :param delete: notify only (i.e. False) by default.

    If security group with the same name is used at least in one region,
    it is treated as used.
    """
    groups = defaultdict(lambda: {})
    used_groups = set(["default", config.get("DEFAULT", "HTTPS_SECURITY_GROUP")])
    regions = get_region_conn().get_all_regions()
    for reg in regions:
        for s_g in get_region_conn(reg.name).get_all_security_groups():
            groups[s_g.name][reg] = s_g
            if s_g.instances():  # Security Group is used by instance.
                used_groups.add(s_g.name)
            for rule in s_g.rules:
                for grant in rule.grants:
                    if grant.name and grant.owner_id == s_g.owner_id:
                        used_groups.add(grant.name)  # SG is used by group.
    for grp in used_groups:
        del groups[grp]

    for grp in sorted(groups):
        if delete:
            for reg in groups[grp]:
                s_g = groups[grp][reg]
                logger.info("Deleting {0} in {1}".format(s_g, reg))
                s_g.delete()
        else:
            msg = '"SecurityGroup:{grp}" should be removed from {regs}'
            logger.info(msg.format(grp=grp, regs=groups[grp].keys()))
Пример #3
0
def rsync_region(
        src_region_name, dst_region_name, tag_name=DEFAULT_TAG_NAME,
        tag_value=DEFAULT_TAG_VALUE, native_only=True):
    """Duplicates latest snapshots with given tag into dst_region.

    src_region_name, dst_region_name
        every latest volume snapshot from src_region will be rsynced
        to the dst_region;
    tag_name, tag_value
        snapshots will be filtered by tag. Tag will be fetched from
        config by default;
    native_only
        sync only snapshots, created in the src_region_name. True by
        default."""
    src_conn = get_region_conn(src_region_name)
    dst_conn = get_region_conn(dst_region_name)
    snaps = get_relevant_snapshots(src_conn, tag_name, tag_value, native_only)
    if not snaps:
        return
    with nested(create_temp_inst(src_conn.region),
                create_temp_inst(dst_conn.region)) as (src_inst, dst_inst):
        snaps = sorted(snaps, key=get_snap_vol)    # Prepare for grouping.
        for vol, vol_snaps in groupby(snaps, get_snap_vol):
            latest_snap = sorted(vol_snaps, key=get_snap_time)[-1]
            for inst in src_inst, dst_inst:
                logger.debug('Rebooting {0} in {0.region} '
                             'to refresh attachments'.format(inst))
                inst.reboot()
            args = (src_region_name, latest_snap.id, dst_region_name, src_inst,
                    dst_inst)
            try:
                rsync_snapshot(*args)
            except:
                logger.exception('rsync of {1} from {0} to {2} failed'.format(
                    *args))
Пример #4
0
def backup_instances_by_tag(
        region_name=None, tag_name=DEFAULT_TAG_NAME,
        tag_value=DEFAULT_TAG_VALUE, synchronously=False, consistent=False):
    """Creates backup for all instances with given tag in region.

    region_name
        will be applied across all regions by default;
    tag_name, tag_value
        will be fetched from config by default;
    synchronously
        will be accomplished with assuring successful result. False by
        default;
    consistent
        if True, then FS mountpoint will be frozen before snapshotting.

    .. note:: when ``create_ami`` task compiles AMI from several
              snapshots it restricts snapshot start_time difference with
              10 minutes interval at most. Snapshot completion may take
              much more time and due to this only asynchronously
              generated snapshots will be assembled assurely."""
    if region_name:
        regions = [get_region_conn(region_name).region]
    else:
        regions = get_region_conn().get_all_regions()
    for reg in regions:
        conn = get_region_conn(reg.name)
        filters = {'resource-type': 'instance', 'key': tag_name,
                   'tag-value': tag_value}
        for tag in conn.get_all_tags(filters=filters):
            backup_instance(reg.name, instance_id=tag.res_id,
                            synchronously=synchronously, consistent=consistent)
Пример #5
0
def rsync_region(
        src_region_name, dst_region_name, tag_name=DEFAULT_TAG_NAME,
        tag_value=DEFAULT_TAG_VALUE, native_only=True):
    """Duplicates latest snapshots with given tag into dst_region.

    src_region_name, dst_region_name
        every latest volume snapshot from src_region will be rsynced
        to the dst_region;
    tag_name, tag_value
        snapshots will be filtered by tag. Tag will be fetched from
        config by default;
    native_only
        sync only snapshots, created in the src_region_name. True by
        default."""
    src_conn = get_region_conn(src_region_name)
    dst_conn = get_region_conn(dst_region_name)
    snaps = get_relevant_snapshots(src_conn, tag_name, tag_value, native_only)
    if not snaps:
        return
    with nested(create_temp_inst(src_conn.region),
                create_temp_inst(dst_conn.region)) as (src_inst, dst_inst):
        snaps = sorted(snaps, key=get_snap_vol)    # Prepare for grouping.
        for vol, vol_snaps in groupby(snaps, get_snap_vol):
            latest_snap = sorted(vol_snaps, key=get_snap_time)[-1]
            for inst in src_inst, dst_inst:
                logger.debug('Rebooting {0} in {0.region} '
                             'to refresh attachments'.format(inst))
                inst.reboot()
            args = (src_region_name, latest_snap.id, dst_region_name, src_inst,
                    dst_inst)
            try:
                rsync_snapshot(*args)
            except:
                logger.exception('rsync of {1} from {0} to {2} failed'.format(
                    *args))
Пример #6
0
def create_temp_inst(region=None, zone=None, key_pair=None, security_groups='',
                     synchronously=False):
    if region and zone:
        assert zone in get_region_conn(region.name).get_all_zones(), (
            '{0} doesn\'t belong to {1}'.format(zone, region))

    def create_inst_in_zone(zone, key_pair, sec_grps):
        inst = create_instance(zone.region.name, zone.name, key_pair=key_pair,
                               security_groups=sec_grps)
        inst.add_tag(config.get('DEFAULT', 'TAG_NAME'), 'temporary')
        return inst

    if zone:
        inst = create_inst_in_zone(zone, key_pair, security_groups)
    else:
        for zone in get_region_conn(region.name).get_all_zones():
            try:
                inst = create_inst_in_zone(zone, key_pair, security_groups)
            except BotoServerError as err:
                logging.debug(format_exc())
                logging.error('{0} in {1}'.format(err, zone))
                continue
            else:
                break
    try:
        yield inst
    finally:
        logger.info('Terminating the {0} in {0.region}...'.format(inst))
        inst.terminate()
        if synchronously:
            wait_for(inst, 'terminated')
def cleanup_security_groups(delete=False):
    """
    Delete unused AWS Security Groups.

    :type delete: boolean
    :param delete: notify only (i.e. False) by default.

    If security group with the same name is used at least in one region,
    it is treated as used.
    """
    groups = defaultdict(lambda: {})
    used_groups = set(
        ['default', config.get('DEFAULT', 'HTTPS_SECURITY_GROUP')])
    regions = get_region_conn().get_all_regions()
    for reg in regions:
        for s_g in get_region_conn(reg.name).get_all_security_groups():
            groups[s_g.name][reg] = s_g
            if s_g.instances():  # Security Group is used by instance.
                used_groups.add(s_g.name)
            for rule in s_g.rules:
                for grant in rule.grants:
                    if grant.name and grant.owner_id == s_g.owner_id:
                        used_groups.add(grant.name)  # SG is used by group.
    for grp in used_groups:
        del groups[grp]

    for grp in sorted(groups):
        if delete:
            for reg in groups[grp]:
                s_g = groups[grp][reg]
                logger.info('Deleting {0} in {1}'.format(s_g, reg))
                s_g.delete()
        else:
            msg = '"SecurityGroup:{grp}" should be removed from {regs}'
            logger.info(msg.format(grp=grp, regs=groups[grp].keys()))
Пример #8
0
 def is_group_in(region, group_name):
     try:
         get_region_conn(region.name).get_all_security_groups([group_name])
     except EC2ResponseError:
         return False
     else:
         return True
Пример #9
0
def backup_instances_by_tag(
        region_name=None, tag_name=DEFAULT_TAG_NAME,
        tag_value=DEFAULT_TAG_VALUE, synchronously=False, consistent=False):
    """Creates backup for all instances with given tag in region.

    region_name
        will be applied across all regions by default;
    tag_name, tag_value
        will be fetched from config by default;
    synchronously
        will be accomplished with assuring successful result. False by
        default;
    consistent
        if True, then FS mountpoint will be frozen before snapshotting.

    .. note:: when ``create_ami`` task compiles AMI from several
              snapshots it restricts snapshot start_time difference with
              10 minutes interval at most. Snapshot completion may take
              much more time and due to this only asynchronously
              generated snapshots will be assembled assurely."""
    if region_name:
        regions = [get_region_conn(region_name).region]
    else:
        regions = get_region_conn().get_all_regions()
    for reg in regions:
        conn = get_region_conn(reg.name)
        filters = {'resource-type': 'instance', 'key': tag_name,
                   'tag-value': tag_value}
        for tag in conn.get_all_tags(filters=filters):
            backup_instance(reg.name, instance_id=tag.res_id,
                            synchronously=synchronously, consistent=consistent)
Пример #10
0
def delete_broken_snapshots():
    """Delete snapshots with status 'error'."""
    for region in get_region_conn().get_all_regions():
        conn = get_region_conn(region.name)
        filters = {'status': 'error'}
        snaps = conn.get_all_snapshots(owner='self', filters=filters)
        for snp in snaps:
            logger.info('Deleting broken {0}'.format(snp))
            snp.delete()
Пример #11
0
def delete_broken_snapshots():
    """Delete snapshots with status 'error'."""
    for region in get_region_conn().get_all_regions():
        conn = get_region_conn(region.name)
        filters = {'status': 'error'}
        snaps = conn.get_all_snapshots(owner='self', filters=filters)
        for snp in snaps:
            logger.info('Deleting broken {0}'.format(snp))
            snp.delete()
Пример #12
0
def rsync_snapshot(src_region_name, snapshot_id, dst_region_name,
                   src_inst=None, dst_inst=None):

    """Duplicate the snapshot into dst_region.

    src_region_name, dst_region_name
        Amazon region names. Allowed to be contracted, e.g.
        `ap-southeast-1` will be recognized in `ap-south` or even
        `ap-s`;
    snapshot_id
        snapshot to duplicate;
    src_inst, dst_inst
        will be used instead of creating new for temporary.

    You'll need to open port 60000 for encrypted instances replication."""
    src_conn = get_region_conn(src_region_name)
    src_snap = src_conn.get_all_snapshots([snapshot_id])[0]
    dst_conn = get_region_conn(dst_region_name)
    _src_device = get_snap_device(src_snap)
    _src_dev = re.match(r'^/dev/sda$', _src_device)  # check for encryption
    if _src_dev:
        encr = True
        logger.info('Found traces of encryption')
    else:
        encr = None

    info = 'Going to transmit {snap.volume_size} GiB {snap} {snap.description}'
    if src_snap.tags.get('Name'):
        info += ' of {name}'
    info += ' from {snap.region} to {dst}'
    logger.info(info.format(snap=src_snap, dst=dst_conn.region,
                            name=src_snap.tags.get('Name')))

    dst_snaps = dst_conn.get_all_snapshots(owner='self')
    dst_snaps = [snp for snp in dst_snaps if not snp.status == 'error']
    src_vol = get_snap_vol(src_snap)
    vol_snaps = [snp for snp in dst_snaps if get_snap_vol(snp) == src_vol]

    if vol_snaps:
        dst_snap = sorted(vol_snaps, key=get_snap_time)[-1]
        if get_snap_time(dst_snap) >= get_snap_time(src_snap):
            kwargs = dict(src=src_snap, dst=dst_snap, dst_reg=dst_conn.region)
            logger.info('Stepping over {src} - it\'s not newer than {dst} '
                        '{dst.description} in {dst_reg}'.format(**kwargs))
            return
    else:
        dst_snap = create_empty_snapshot(dst_conn.region, src_snap.volume_size)

    with nested(attach_snapshot(src_snap, inst=src_inst, encr=encr),
                attach_snapshot(dst_snap, inst=dst_inst, encr=encr)) as (
                (src_vol, src_mnt), (dst_vol, dst_mnt)):
        update_snap(src_vol, src_mnt, dst_vol, dst_mnt, encr,
                    delete_old=not vol_snaps)  # Delete only empty snapshots.
Пример #13
0
def trim_snapshots(region_name=None, dry_run=False):
    """Delete old snapshots logarithmically back in time.

    region_name
        by default process all regions;
    dry_run
        boolean, only print info about old snapshots to be deleted."""
    delete_broken_snapshots()
    if region_name:
        regions = [get_region_conn(region_name).region]
    else:
        regions = get_region_conn().get_all_regions()
    for reg in regions:
        logger.info('Processing {0}'.format(reg))
        _trim_snapshots(reg, dry_run=dry_run)
Пример #14
0
def trim_snapshots(region_name=None, dry_run=False):
    """Delete old snapshots logarithmically back in time.

    region_name
        by default process all regions;
    dry_run
        boolean, only print info about old snapshots to be deleted."""
    delete_broken_snapshots()
    if region_name:
        regions = [get_region_conn(region_name).region]
    else:
        regions = get_region_conn().get_all_regions()
    for reg in regions:
        logger.info('Processing {0}'.format(reg))
        _trim_snapshots(reg, dry_run=dry_run)
Пример #15
0
def modify_kernel(region, instance_id):
    """
    Modify old kernel for stopped instance (needed for make pv-grub working)

    .. note:: install grub-legacy-ec2 and upgrades before run this.

    region
        specify instance region;
    instance_id
        specify instance id for kernel change
    Kernels list:
        ap-southeast-1      x86_64  aki-11d5aa43
        ap-southeast-1  i386    aki-13d5aa41
        eu-west-1       x86_64  aki-4feec43b
        eu-west-1       i386    aki-4deec439
        us-east-1       x86_64  aki-427d952b
        us-east-1       i386    aki-407d9529
        us-west-1       x86_64  aki-9ba0f1de
        us-west-1       i386    aki-99a0f1dc"""
    key_filename = config.get(region, 'KEY_FILENAME')
    conn = get_region_conn(region)
    instance = get_inst_by_id(conn.region.name, instance_id)
    env.update({
        'host_string': instance.public_dns_name,
        'key_filename': key_filename,
    })
    sudo('env DEBIAN_FRONTEND=noninteractive apt-get update && '
         'env DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade && '
         'env DEBIAN_FRONTEND=noninteractive apt-get install grub-legacy-ec2')
    kernel = config.get(conn.region.name,
                        'KERNEL' + instance.architecture.upper())
    instance.stop()
    wait_for(instance, 'stopped')
    instance.modify_attribute('kernel', kernel)
    instance.start()
Пример #16
0
def new_security_group(region, name=None, description=None):
    """Create Security Groups with SSH access."""
    s_g = get_region_conn(region.name).create_security_group(
        name or INST_SPECIFIC_SG_PREFIX + timestamp(), description or "Created for using with specific instance"
    )
    s_g.authorize("tcp", 22, 22, "0.0.0.0/0")
    return s_g
Пример #17
0
def mount_snapshot(region_name, snap_id, inst_id=None):

    """Mount snapshot to temporary created instance or inst_id.

    region_name, snap_id
        specify snapshot.
    inst_id
        attach to existing instance. Will be created temporary if
        None."""

    conn = get_region_conn(region_name)
    inst = get_inst_by_id(conn.region.name, inst_id) if inst_id else None
    snap = conn.get_all_snapshots(snapshot_ids=[snap_id, ])[0]

    info = ('\nYou may now SSH into the {inst} server, using:'
            '\n ssh -i {key} {user}@{inst.public_dns_name}')
    with attach_snapshot(snap, inst=inst) as (vol, mountpoint):
        if mountpoint:
            info += ('\nand browse snapshot, mounted at {mountpoint}.')
        else:
            info += ('\nand mount {device}. NOTE: device name may be '
                     'altered by system.')
        key_file = config.get(conn.region.name, 'KEY_FILENAME')
        inst = get_inst_by_id(conn.region.name, vol.attach_data.instance_id)
        assert inst
        logger.info(info.format(inst=inst, user=env.user, key=key_file,
            device=vol.attach_data.device, mountpoint=mountpoint))

        info = ('\nEnter FINISHED if you are finished looking at the '
                'backup and would like to cleanup: ')
        while raw_input(info).strip() != 'FINISHED':
            pass
Пример #18
0
def backup_instance(region_name, instance_id=None, instance=None,
                    synchronously=False, consistent=False):
    """
    Return list of created snapshots for specified instance.

    region_name
        instance location;
    instance, instance_id
        either `instance_id` or `instance` argument should be specified;
    synchronously
        wait for successful completion. False by default.
    consistent
        if True, then FS mountpoint will be frozen before snapshotting.
        False by default.
    """
    assert bool(instance_id) ^ bool(instance), ('Either instance_id or '
        'instance should be specified')
    conn = get_region_conn(region_name)
    if instance_id:
        instance = get_inst_by_id(conn.region.name, instance_id)
    snapshots = []
    for dev in instance.block_device_mapping:
        vol_id = instance.block_device_mapping[dev].volume_id
        vol = conn.get_all_volumes([vol_id])[0]
        snapshots.append(create_snapshot(vol, synchronously=synchronously,
                         consistent=consistent))
    return snapshots
def new_security_group(region, name=None, description=None):
    """Create Security Groups with SSH access."""
    s_g = get_region_conn(region.name).create_security_group(
        name or INST_SPECIFIC_SG_PREFIX + timestamp(), description
        or 'Created for using with specific instance')
    s_g.authorize('tcp', 22, 22, '0.0.0.0/0')
    return s_g
Пример #20
0
def backup_instance(region_name, instance_id=None, instance=None,
                    synchronously=False, consistent=False):
    """
    Return list of created snapshots for specified instance.

    region_name
        instance location;
    instance, instance_id
        either `instance_id` or `instance` argument should be specified;
    synchronously
        wait for successful completion. False by default.
    consistent
        if True, then FS mountpoint will be frozen before snapshotting.
        False by default.
    """
    assert bool(instance_id) ^ bool(instance), ('Either instance_id or '
        'instance should be specified')
    conn = get_region_conn(region_name)
    if instance_id:
        instance = get_inst_by_id(conn.region.name, instance_id)
    snapshots = []
    for dev in instance.block_device_mapping:
        vol_id = instance.block_device_mapping[dev].volume_id
        vol = conn.get_all_volumes([vol_id])[0]
        snapshots.append(create_snapshot(vol, synchronously=synchronously,
                         consistent=consistent))
    return snapshots
Пример #21
0
def modify_kernel(region, instance_id):
    """
    Modify old kernel for stopped instance (needed for make pv-grub working)

    .. note:: install grub-legacy-ec2 and upgrades before run this.

    region
        specify instance region;
    instance_id
        specify instance id for kernel change
    Kernels list:
        ap-southeast-1      x86_64  aki-11d5aa43
        ap-southeast-1  i386    aki-13d5aa41
        eu-west-1       x86_64  aki-4feec43b
        eu-west-1       i386    aki-4deec439
        us-east-1       x86_64  aki-427d952b
        us-east-1       i386    aki-407d9529
        us-west-1       x86_64  aki-9ba0f1de
        us-west-1       i386    aki-99a0f1dc"""
    key_filename = config.get(region, 'KEY_FILENAME')
    conn = get_region_conn(region)
    instance = get_inst_by_id(conn.region.name, instance_id)
    env.update({
        'host_string': instance.public_dns_name,
        'key_filename': key_filename,
    })
    sudo('env DEBIAN_FRONTEND=noninteractive apt-get update && '
         'env DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade && '
         'env DEBIAN_FRONTEND=noninteractive apt-get install grub-legacy-ec2')
    kernel = config.get(conn.region.name,
                        'KERNEL' + instance.architecture.upper())
    instance.stop()
    wait_for(instance, 'stopped')
    instance.modify_attribute('kernel', kernel)
    instance.start()
Пример #22
0
def create_instance(
        region_name='us-east-1', zone_name=None, key_pair=None,
        security_groups='', architecture=None, user_data=None, inst_type=None):
    """
    Create AWS EC2 instance.

    Return created instance.

    region_name
        by default will be created in the us-east-1 region;
    zone_name
        string-formatted name, may be omitted;
    key_pair
        name of key_pair to be granted access. Will be fetched from
        config by default, may be configured per region;
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';';
    architecture
        "i386" or "x86_64";
    inst_type
        by default will be fetched from AMI description or used
        't1.micro' if not mentioned in the description.
    """
    conn = get_region_conn(region_name)

    ami_ptrn = config.get(conn.region.name, 'AMI_PTRN')
    architecture = architecture or config.get('DEFAULT', 'ARCHITECTURE')
    ubuntu_aws_account = config.get('DEFAULT', 'UBUNTU_AWS_ACCOUNT')
    filters = {'owner_id': ubuntu_aws_account, 'architecture': architecture,
             'name': ami_ptrn, 'image_type': 'machine',
             'root_device_type': 'ebs'}
    images = conn.get_all_images(filters=filters)
    # Filter AMI by latest version.
    ptrn = re.compile(config.get(conn.region.name, 'AMI_REGEXP'))
    versions = set([ptrn.search(img.name).group('version') for img in images])

    def complement(year_month):
        return '0' + year_month if len(year_month) == 4 else year_month

    latest_version = sorted(set(filter(complement, versions)))[-1]  # XXX Y3K.
    ami_ptrn_with_version = config.get(
        conn.region.name, 'AMI_PTRN_WITH_VERSION')
    name_with_version = ami_ptrn_with_version.format(version=latest_version)
    filters.update({'name': name_with_version})
    images = conn.get_all_images(filters=filters)
    # Filter AMI by latest release date.
    dates = set([ptrn.search(img.name).group('released_at') for img in images])
    latest_date = sorted(set(dates))[-1]
    ami_ptrn_with_release_date = config.get(
        conn.region.name, 'AMI_PTRN_WITH_RELEASE_DATE')
    name_with_version_and_release = ami_ptrn_with_release_date.format(
        version=latest_version, released_at=latest_date)
    filters.update({'name': name_with_version_and_release})
    image = conn.get_all_images(filters=filters)[0]
    return launch_instance_from_ami(
        region_name, image.id, security_groups=security_groups,
        key_pair=key_pair, zone_name=zone_name, user_data=user_data,
        inst_type=inst_type)
Пример #23
0
def sync_rules_by_id(src_reg_name, src_grp_id, dst_reg_name, dst_grp_id):
    """Update Security Group rules from other Security Group.

    Works across regions as well. The sole exception is granted groups,
    owned by another user - such groups can't be copied.

    :param src_reg_name: region name
    :type src_reg_name: str
    :param src_grp_id: group ID
    :type src_grp_id: str
    :param dst_reg_name: region name
    :type dst_reg_name: str
    :param dst_grp_id: group ID
    :type dst_grp_id: str"""
    src_grp = get_region_conn(src_reg_name).get_all_security_groups(filters={"group-id": src_grp_id})[0]
    dst_grp = get_region_conn(dst_reg_name).get_all_security_groups(filters={"group-id": dst_grp_id})[0]
    sync_rules(src_grp, dst_grp)
Пример #24
0
def rsync_region(src_region_name, dst_region_name, tag_name=None,
                 tag_value=None, native_only=True):
    """Duplicates latest snapshots with given tag into dst_region.

    src_region_name, dst_region_name
        every latest volume snapshot from src_region will be rsynced
        to the dst_region;
    tag_name, tag_value
        snapshots will be filtered by tag. Tag will be fetched from
        config by default, may be configured per region;
    native_only
        sync only snapshots, created in the src_region_name. True by
        default."""
    src_conn = get_region_conn(src_region_name)
    dst_conn = get_region_conn(dst_region_name)
    tag_name = tag_name or config.get(src_conn.region.name, 'TAG_NAME')
    tag_value = tag_value or config.get(src_conn.region.name, 'TAG_VALUE')
    filters = {'tag-key': tag_name, 'tag-value': tag_value}
    snaps = src_conn.get_all_snapshots(owner='self', filters=filters)
    snaps = [snp for snp in snaps if not snp.status == 'error']
    _is_described = lambda snap: get_snap_vol(snap) and get_snap_time(snap)
    snaps = [snp for snp in snaps if _is_described(snp)]
    if native_only:

        def is_native(snap, region):
            return get_descr_attr(snap, 'Region') == region.name
        snaps = [snp for snp in snaps if is_native(snp, src_conn.region)]

    with nested(create_temp_inst(src_conn.region),
                create_temp_inst(dst_conn.region)) as (src_inst, dst_inst):
        snaps = sorted(snaps, key=get_snap_vol)    # Prepare for grouping.
        for vol, vol_snaps in groupby(snaps, get_snap_vol):
            latest_snap = sorted(vol_snaps, key=get_snap_time)[-1]
            for inst in src_inst, dst_inst:
                logger.debug('Rebooting {0} in {0.region} '
                             'to refresh attachments'.format(inst))
                inst.reboot()
            args = (src_region_name, latest_snap.id, dst_region_name, src_inst,
                    dst_inst)
            try:
                rsync_snapshot(*args)
            except:
                logger.exception('rsync of {1} from {0} to {2} failed'.format(
                    *args))
def sync_rules_by_id(src_reg_name, src_grp_id, dst_reg_name, dst_grp_id):
    """Update Security Group rules from other Security Group.

    Works across regions as well. The sole exception is granted groups,
    owned by another user - such groups can't be copied.

    :param src_reg_name: region name
    :type src_reg_name: str
    :param src_grp_id: group ID
    :type src_grp_id: str
    :param dst_reg_name: region name
    :type dst_reg_name: str
    :param dst_grp_id: group ID
    :type dst_grp_id: str"""
    src_grp = get_region_conn(src_reg_name).get_all_security_groups(
        filters={'group-id': src_grp_id})[0]
    dst_grp = get_region_conn(dst_reg_name).get_all_security_groups(
        filters={'group-id': dst_grp_id})[0]
    sync_rules(src_grp, dst_grp)
Пример #26
0
def rsync_all_regions(primary_backup_region, secondary_backup_region):
    """
    Replicates snapshots across all regions.

    Sync snapshots from all but primary regions into primary. And then
    snapshots from primary regions will be replicated into secondary
    backup region.

    :param primary_backup_region: AWS region name that keeps all
        snapshots clones;
    :type primary_backup_region: str
    :param secondary_backup_region: AWS region name that keeps clones of
        snapshots from `primary_backup_region`.
    :type secondary_backup_region: str
    """
    pri_name = get_region_conn(primary_backup_region).region.name
    all_regs = get_region_conn().get_all_regions()
    for reg in (reg for reg in all_regs if reg.name != pri_name):
        rsync_region(reg.name, primary_backup_region)
    rsync_region(primary_backup_region, secondary_backup_region)
Пример #27
0
def rsync_all_regions(primary_backup_region, secondary_backup_region):
    """
    Replicates snapshots across all regions.

    Sync snapshots from all but primary regions into primary. And then
    snapshots from primary regions will be replicated into secondary
    backup region.

    :param primary_backup_region: AWS region name that keeps all
        snapshots clones;
    :type primary_backup_region: str
    :param secondary_backup_region: AWS region name that keeps clones of
        snapshots from `primary_backup_region`.
    :type secondary_backup_region: str
    """
    pri_name = get_region_conn(primary_backup_region).region.name
    all_regs = get_region_conn().get_all_regions()
    for reg in (reg for reg in all_regs if reg.name != pri_name):
        rsync_region(reg.name, primary_backup_region)
    rsync_region(primary_backup_region, secondary_backup_region)
Пример #28
0
def launch_instance_from_ami(region_name,
                             ami_id,
                             inst_type=None,
                             security_groups='',
                             key_pair=None,
                             zone_name=None,
                             user_data=None):
    """Create instance from specified AMI.

    region_name
        location of the AMI and new instance;
    ami_id
        "ami-..."
    inst_type
        by default will be fetched from AMI description or used
        't1.micro' if not mentioned in the description;
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'
    key_pair
        name of key_pair to be granted access. Will be fetched from
        config by default, may be configured per region;
    zone_name
        in string format;
    user_data
        string with OS configuration commands."""
    conn = get_region_conn(region_name)
    image = conn.get_all_images([ami_id])[0]
    inst_type = inst_type or get_descr_attr(image, 'Type') or 't1.micro'
    security_groups = filter(None, security_groups.strip(';').split(';'))
    security_groups.append(new_security_group(conn.region))
    logger.info('Launching new instance in {reg} using {image}'.format(
        reg=conn.region, image=image))
    inst = image.run(key_name=key_pair
                     or config.get(conn.region.name, 'KEY_PAIR'),
                     security_groups=security_groups,
                     instance_type=inst_type,
                     user_data=user_data
                     or config.get('user_data', 'USER_DATA'),
                     placement=zone_name).instances[0]
    wait_for(inst, 'running', limit=10 * 60)
    groups = [grp.name for grp in inst.groups]
    inst.add_tag('Security Groups', dumps(groups, separators=(',', ':')))
    add_tags(inst, image.tags)
    modify_instance_termination(conn.region.name, inst.id)
    logger.info('{inst} created in {inst.placement}'.format(inst=inst))
    info = ('\nYou may now SSH into the {inst} server, using:'
            '\n ssh -i {key} {user}@{inst.public_dns_name}')
    key_file = config.get(conn.region.name, 'KEY_FILENAME')
    logger.info(info.format(inst=inst, user=env.user, key=key_file))
    return inst
Пример #29
0
def create_tmp_volume(region, size):
    """Format new filesystem."""
    with create_temp_inst(region) as inst:
        earmarking_tag = config.get(region.name, 'TAG_NAME')
        try:
            vol = get_region_conn(region.name).create_volume(size,
                                                             inst.placement)
            vol.add_tag(earmarking_tag, 'temporary')
            vol.attach(inst.id, get_avail_dev(inst))
            yield vol, mount_volume(vol, mkfs=True)
        finally:
            vol.detach(force=True)
            wait_for(vol, 'available', limit=DETACH_TIME)
            vol.delete()
Пример #30
0
def create_empty_snapshot(region, size):
    """Format new filesystem."""
    with create_temp_inst(region) as inst:
        vol = get_region_conn(region.name).create_volume(size, inst.placement)
        earmarking_tag = config.get(region.name, 'TAG_NAME')
        vol.add_tag(earmarking_tag, 'temporary')
        vol.attach(inst.id, get_avail_dev(inst))
        mount_volume(vol, mkfs=True)
        snap = vol.create_snapshot()
        snap.add_tag(earmarking_tag, 'temporary')
        vol.detach(True)
        wait_for(vol, 'available')
        vol.delete()
        return snap
Пример #31
0
def create_tmp_volume(region, size):
    """Format new filesystem."""
    with create_temp_inst(region) as inst:
        earmarking_tag = config.get(region.name, 'TAG_NAME')
        try:
            vol = get_region_conn(region.name).create_volume(size,
                                                             inst.placement)
            vol.add_tag(earmarking_tag, 'temporary')
            vol.attach(inst.id, get_avail_dev(inst))
            yield vol, mount_volume(vol, mkfs=True)
        finally:
            vol.detach(force=True)
            wait_for(vol, 'available', limit=DETACH_TIME)
            vol.delete()
Пример #32
0
def create_temp_inst(region=None,
                     zone=None,
                     key_pair=None,
                     security_groups='',
                     synchronously=False):
    if region and zone:
        assert zone in get_region_conn(
            region.name).get_all_zones(), ('{0} doesn\'t belong to {1}'.format(
                zone, region))

    def create_inst_in_zone(zone, key_pair, sec_grps):
        inst = create_instance(zone.region.name,
                               zone.name,
                               key_pair=key_pair,
                               security_groups=sec_grps)
        inst.add_tag(config.get('DEFAULT', 'TAG_NAME'), 'temporary')
        return inst

    if zone:
        inst = create_inst_in_zone(zone, key_pair, security_groups)
    else:
        for zone in get_region_conn(region.name).get_all_zones():
            try:
                inst = create_inst_in_zone(zone, key_pair, security_groups)
            except BotoServerError as err:
                logging.debug(format_exc())
                logging.error('{0} in {1}'.format(err, zone))
                continue
            else:
                break
    try:
        yield inst
    finally:
        logger.info('Terminating the {0} in {0.region}...'.format(inst))
        inst.terminate()
        if synchronously:
            wait_for(inst, 'terminated')
Пример #33
0
def launch_instance_from_ami(
    region_name, ami_id, inst_type=None, security_groups='', key_pair=None,
    zone_name=None):
    """Create instance from specified AMI.

    region_name
        location of the AMI and new instance;
    ami_id
        "ami-..."
    inst_type
        by default will be fetched from AMI description or used
        't1.micro' if not mentioned in the description;
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'
    key_pair
        name of key_pair to be granted access. Will be fetched from
        config by default, may be configured per region;
    zone_name
        in string format."""
    try:
        user_data = config.get('user_data', 'USER_DATA')
    except:
        user_data = None
    conn = get_region_conn(region_name)
    image = conn.get_all_images([ami_id])[0]
    inst_type = inst_type or get_descr_attr(image, 'Type') or 't1.micro'
    security_groups = filter(None, security_groups.strip(';').split(';'))
    security_groups.append(new_security_group(conn.region))
    logger.info('Launching new instance in {reg} using {image}'
                .format(reg=conn.region, image=image))
    inst = image.run(
        key_name=key_pair or config.get(conn.region.name, 'KEY_PAIR'),
        security_groups=security_groups,
        instance_type=inst_type,
        user_data=user_data,
        placement=zone_name).instances[0]
    wait_for(inst, 'running')
    groups = [grp.name for grp in inst.groups]
    inst.add_tag('Security Groups', dumps(groups, separators=(',', ':')))
    add_tags(inst, image.tags)
    modify_instance_termination(conn.region.name, inst.id)
    logger.info('{inst} created in {inst.placement}'.format(inst=inst))
    info = ('\nYou may now SSH into the {inst} server, using:'
            '\n ssh -i {key} {user}@{inst.public_dns_name}')
    key_file = config.get(conn.region.name, 'KEY_FILENAME')
    logger.info(info.format(inst=inst, user=env.user, key=key_file))
    return inst
Пример #34
0
def modify_instance_termination(region, instance_id):
    """Mark production instnaces as uneligible for termination.

    region
        name of region where instance is located;
    instance_id
        instance to be updated;

    You must change value of preconfigured tag_name and run this command
    before terminating production instance via API."""
    conn = get_region_conn(region)
    inst = get_inst_by_id(conn.region.name, instance_id)
    prod_tag = config.get('DEFAULT', 'TAG_NAME')
    prod_val = config.get('DEFAULT', 'TAG_VALUE')
    inst_tag_val = inst.tags.get(prod_tag)
    inst.modify_attribute('disableApiTermination', inst_tag_val == prod_val)
Пример #35
0
def modify_instance_termination(region, instance_id):
    """Mark production instnaces as uneligible for termination.

    region
        name of region where instance is located;
    instance_id
        instance to be updated;

    You must change value of preconfigured tag_name and run this command
    before terminating production instance via API."""
    conn = get_region_conn(region)
    inst = get_inst_by_id(conn.region.name, instance_id)
    prod_tag = config.get('DEFAULT', 'TAG_NAME')
    prod_val = config.get('DEFAULT', 'TAG_VALUE')
    inst_tag_val = inst.tags.get(prod_tag)
    inst.modify_attribute('disableApiTermination', inst_tag_val == prod_val)
Пример #36
0
def mount_snapshot(region_name, snap_id, inst_id=None):
    """Mount snapshot to temporary created instance or inst_id.

    region_name, snap_id
        specify snapshot.
    inst_id
        attach to existing instance. Will be created temporary if
        None."""

    conn = get_region_conn(region_name)
    inst = get_inst_by_id(conn.region.name, inst_id) if inst_id else None
    snap = conn.get_all_snapshots(snapshot_ids=[
        snap_id,
    ])[0]

    info = ('\nYou may now SSH into the {inst} server, using:'
            '\n ssh -i {key} {user}@{inst.public_dns_name}')
    with attach_snapshot(snap, inst=inst) as (vol, mountpoint):
        if mountpoint:
            info += ('\nand browse snapshot, mounted at {mountpoint}.')
        else:
            info += ('\nand mount {device}. NOTE: device name may be '
                     'altered by system.')
        key_file = config.get(conn.region.name, 'KEY_FILENAME')
        inst = get_inst_by_id(conn.region.name, vol.attach_data.instance_id)
        assert inst
        logger.info(
            info.format(inst=inst,
                        user=env.user,
                        key=key_file,
                        device=vol.attach_data.device,
                        mountpoint=mountpoint))

        info = ('\nEnter FINISHED if you are finished looking at the '
                'backup and would like to cleanup: ')
        while raw_input(info).strip() != 'FINISHED':
            pass
Пример #37
0
def _trim_snapshots(region, dry_run=False):

    """Delete snapshots back in time in logarithmic manner.

    dry_run
        just print snapshot to be deleted.

    Modified version of the `boto.ec2.connection.trim_snapshots
    <http://pypi.python.org/pypi/boto/2.0>_`. Licensed under MIT license
    by Mitch Garnaat, 2011."""
    hourly_backups = config.getint('purge_backups', 'HOURLY_BACKUPS')
    daily_backups = config.getint('purge_backups', 'DAILY_BACKUPS')
    weekly_backups = config.getint('purge_backups', 'WEEKLY_BACKUPS')
    monthly_backups = config.getint('purge_backups', 'MONTHLY_BACKUPS')
    quarterly_backups = config.getint('purge_backups', 'QUARTERLY_BACKUPS')
    yearly_backups = config.getint('purge_backups', 'YEARLY_BACKUPS')

    # work with UTC time, which is what the snapshot start time is reported in
    now = datetime.utcnow()
    last_hour = datetime(now.year, now.month, now.day, now.hour)
    last_midnight = datetime(now.year, now.month, now.day)
    last_sunday = datetime(now.year, now.month,
          now.day) - timedelta(days=(now.weekday() + 1) % 7)
    last_month = datetime.now() - relativedelta(months=1)
    last_year = datetime.now() - relativedelta(years=1)
    other_years = datetime.now() - relativedelta(years=2)
    start_of_month = datetime(now.year, now.month, 1)

    target_backup_times = []
    # there are no snapshots older than 1/1/2000
    oldest_snapshot_date = datetime(2000, 1, 1)

    for hour in range(0, hourly_backups):
        target_backup_times.append(last_hour - timedelta(hours=hour))

    for day in range(0, daily_backups):
        target_backup_times.append(last_midnight - timedelta(days=day))

    for week in range(0, weekly_backups):
        target_backup_times.append(last_sunday - timedelta(weeks=week))

    for month in range(0, monthly_backups):
        target_backup_times.append(last_month - relativedelta(months=month))

    for quart in range(0, quarterly_backups):
        target_backup_times.append(last_year - relativedelta(months=4 * quart))

    for year in range(0, yearly_backups):
        target_backup_times.append(other_years - relativedelta(years=year))

    one_day = timedelta(days=1)
    while start_of_month > oldest_snapshot_date:
        # append the start of the month to the list of snapshot dates to save:
        target_backup_times.append(start_of_month)
        # there's no timedelta setting for one month, so instead:
        # decrement the day by one,
        #so we go to the final day of the previous month...
        start_of_month -= one_day
        # ... and then go to the first day of that previous month:
        start_of_month = datetime(start_of_month.year,
                               start_of_month.month, 1)

    temp = []

    for t in target_backup_times:
        if temp.__contains__(t) == False:
            temp.append(t)

    target_backup_times = temp
    target_backup_times.reverse()  # make the oldest date first

    # get all the snapshots, sort them by date and time,
    #and organize them into one array for each volume:
    conn = get_region_conn(region.name)
    all_snapshots = conn.get_all_snapshots(owner='self')
    # oldest first
    all_snapshots.sort(cmp=lambda x, y: cmp(x.start_time, y.start_time))

    snaps_for_each_volume = {}
    for snap in all_snapshots:
        # the snapshot name and the volume name are the same.
        # The snapshot name is set from the volume
        # name at the time the snapshot is taken
        volume_name = get_snap_vol(snap)

        if volume_name:
            # only examine snapshots that have a volume name
            snaps_for_volume = snaps_for_each_volume.get(volume_name)

            if not snaps_for_volume:
                snaps_for_volume = []
                snaps_for_each_volume[volume_name] = snaps_for_volume
            snaps_for_volume.append(snap)

    # Do a running comparison of snapshot dates to desired time periods,
    # keeping the oldest snapshot in each
    # time period and deleting the rest:
    for volume_name in snaps_for_each_volume:
        snaps = snaps_for_each_volume[volume_name]
        snaps = snaps[:-1]
        # never delete the newest snapshot, so remove it from consideration

        time_period_num = 0
        snap_found_for_this_time_period = False
        for snap in snaps:
            check_this_snap = True

            while (check_this_snap and
                   time_period_num < target_backup_times.__len__()):

                if get_snap_time(snap) < target_backup_times[time_period_num]:
                    # the snap date is before the cutoff date.
                    # Figure out if it's the first snap in this
                    # date range and act accordingly
                    #(since both date the date ranges and the snapshots
                    # are sorted chronologically, we know this
                    #snapshot isn't in an earlier date range):
                    if snap_found_for_this_time_period:
                        if not snap.tags.get('preserve_snapshot'):
                            if dry_run:
                                logger.info('Dry-trimmed {0} {1} from {2}'
                                    .format(snap, snap.description,
                                    snap.start_time))
                            else:
                                # as long as the snapshot wasn't marked with
                                # the 'preserve_snapshot' tag, delete it:
                                try:
                                    conn.delete_snapshot(snap.id)
                                except EC2ResponseError as err:
                                    logger.exception(str(err))
                                else:
                                    logger.info('Trimmed {0} {1} from {2}'
                                        .format(snap, snap.description,
                                        snap.start_time))
                       # go on and look at the next snapshot,
                       # leaving the time period alone
                    else:
                        # this was the first snapshot found for this time
                        # period. Leave it alone and look at the next snapshot:
                        snap_found_for_this_time_period = True
                    check_this_snap = False
                else:
                    # the snap is after the cutoff date.
                    # Check it against the next cutoff date
                    time_period_num += 1
                    snap_found_for_this_time_period = False
Пример #38
0
def create_ami(region,
               snap_id,
               force=None,
               root_dev='/dev/sda1',
               zone_name=None,
               default_arch=None,
               default_type='t1.micro',
               security_groups=''):
    """
    Creates AMI image from given snapshot.

    Force option removes prompt request and creates new instance from
    created ami image.

    region, snap_id
        specify snapshot to be processed. Snapshot description in json
        format will be used to restore instance with same parameters.
        Will automaticaly process snapshots for same instance with near
        time (10 minutes or shorter), but for other devices (/dev/sdb,
        /dev/sdc, etc);
    force
        Run instance from ami after creation without confirmation. To
        enable set value to "RUN";
    default_arch
        architecture to use if not mentioned in snapshot description;
    default_type
        instance type to use if not mentioned in snapshot description.
        Used only if ``force`` is "RUN";
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'. Used only if ``force`` is "RUN".
    """
    conn = get_region_conn(region)
    snap = conn.get_all_snapshots(snapshot_ids=[
        snap_id,
    ])[0]
    instance_id = get_snap_instance(snap)
    _device = get_snap_device(snap)
    snaps = conn.get_all_snapshots(owner='self')
    snapshots = [
        snp for snp in snaps if get_snap_instance(snp) == instance_id
        and get_snap_device(snp) != _device and
        abs(get_snap_time(snap) - get_snap_time(snp)) <= timedelta(minutes=10)
    ]
    snapshot = sorted(snapshots, key=get_snap_time,
                      reverse=True) if snapshots else None
    # setup for building an EBS boot snapshot
    default_arch = default_arch or config.get('DEFAULT', 'ARCHITECTURE')
    arch = get_descr_attr(snap, 'Arch') or default_arch
    kernel = config.get(conn.region.name, 'KERNEL' + arch.upper())
    dev = re.match(r'^/dev/sda$', _device)  # if our instance encrypted
    if dev:
        kernel = config.get(conn.region.name, 'KERNEL_ENCR_' + arch.upper())
    ebs = EBSBlockDeviceType()
    ebs.snapshot_id = snap_id
    ebs.delete_on_termination = True
    block_map = BlockDeviceMapping()
    block_map[_device] = ebs
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    block_map['/dev/sdb'] = sdb

    if snapshot:
        for s in snapshot:
            s_dev = get_snap_device(s)
            s_ebs = EBSBlockDeviceType()
            s_ebs.delete_on_termination = True
            s_ebs.snapshot_id = s.id
            block_map[s_dev] = s_ebs

    name = 'Created {0} using access key {1}'.format(timestamp(),
                                                     conn.access_key)
    name = name.replace(":", ".").replace(" ", "_")

    # create the new AMI all options from snap JSON description:
    wait_for(snap, '100%', limit=SNAP_TIME)
    result = conn.register_image(
        name=name,
        description=snap.description,
        architecture=get_descr_attr(snap, 'Arch') or default_arch,
        root_device_name=get_descr_attr(snap, 'Root_dev_name') or root_dev,
        block_device_map=block_map,
        kernel_id=kernel)
    sleep(2)
    image = conn.get_all_images(image_ids=[
        result,
    ])[0]
    wait_for(image, 'available', limit=10 * 60)
    add_tags(image, snap.tags)

    logger.info('The new AMI ID = {0}'.format(result))

    info = ('\nEnter RUN if you want to launch instance using '
            'just created {0}: '.format(image))
    new_instance = None
    if force == 'RUN' or raw_input(info).strip() == 'RUN':
        instance_type = get_descr_attr(snap, 'Type') or default_type
        new_instance = launch_instance_from_ami(
            region,
            image.id,
            inst_type=instance_type,
            security_groups=security_groups,
            zone_name=zone_name)
    return image, new_instance
Пример #39
0
def create_encrypted_instance(region_name,
                              release='lucid',
                              volume_size='8',
                              architecture=None,
                              type='t1.micro',
                              name='encr_root',
                              pw1=None,
                              pw2=None,
                              security_groups=''):
    """
    Creates ubuntu instance with luks-encryted root volume.

    region_name
        Region where you want to create instance;
    release
        Ubuntu release name (lucid or natty). "lucid" by default;
    volume_size
        Size of volume in Gb (always remember, that script creates boot volume
        with size 1Gb, so minimal size of whole volume is 3Gb (1Gb for /boot
        2Gb for /)). 8 by default;
    architecture
        "i386" or "x86_64".
    type
        Type of instance. 't1.micro' by default;
    name
        Name of luks encrypted volume. 'encr_root' by default;
    pw1, pw2
        You can specify passwords in parameters to suppress password prompt;
    security_groups
        List of AWS Security Groups names formatted as string separated
        with semicolon ';'.

    To unlock go to https://ip_address_of_instance (only after reboot
    or shutdown).
    You can set up to 8 passwords. Defaut boot.key and boot.crt created
    for .amazonaws.com so must work for all instances. Process of
    creation is about 20 minutes long."""
    assert volume_size >= 3, '1 GiB for /boot and 2 GiB for /'
    conn = get_region_conn(region_name)

    with config_temp_ssh(conn) as key_filename:
        key_pair = os.path.splitext(os.path.split(key_filename)[1])[0]
        zn = conn.get_all_zones()[-1]
        with create_temp_inst(zone=zn, key_pair=key_pair) as inst:
            vol = conn.create_volume(size=volume_size, zone=zn)
            dev = get_avail_dev_encr(inst)
            vol.attach(inst.id, dev)
            arch = architecture or config.get('DEFAULT', 'ARCHITECTURE')
            ubuntu_arch = 'amd64' if arch == 'x86_64' else arch
            make_encrypted_ubuntu(inst.public_dns_name, key_filename, 'ubuntu',
                                  ubuntu_arch, dev, name, release, pw1, pw2)
            description = dumps({
                'Volume': vol.id,
                'Region': vol.region.name,
                'Device': '/dev/sda',
                'Type': type,
                'Arch': arch,
                'Root_dev_name': '/dev/sda1',
                'Time': timestamp(),
            })
            snap = vol.create_snapshot(description)
            wait_for(snap, '100%', limit=SNAP_TIME)
            vol.detach(force=True)
            wait_for(vol, 'available', limit=DETACH_TIME)
            vol.delete()
            HTTPS_SG = config.get('DEFAULT', 'HTTPS_SECURITY_GROUP')
            security_groups = ';'.join([security_groups, HTTPS_SG])
            img, new_instance = create_ami(region_name,
                                           snap.id,
                                           'RUN',
                                           security_groups=security_groups)
            logger.info('\nTo unlock go to:\n   https://{0}\n'.format(
                new_instance.public_dns_name))
            img.deregister()
            snap.delete()
Пример #40
0
def sync_rules(src_grp, dst_grp=None, dst_region=None):
    """
    Copy Security Group rules.

    Works across regions as well. The sole exception that won't be
    synced is granted groups, owned by another user - such groups can't
    be copied recursively.
    """
    assert bool(dst_grp) ^ bool(dst_region), "Only dst_grp or dst_region " "should be provided"
    if dst_region:
        dst_grp = new_security_group(dst_region, src_grp.name, src_grp.description)

    def is_group_in(region, group_name):
        try:
            get_region_conn(region.name).get_all_security_groups([group_name])
        except EC2ResponseError:
            return False
        else:
            return True

    src_rules = regroup_rules(src_grp)
    # Assure granted group represented in destination region.
    src_grants = chain(*src_rules.values())
    for grant in dict((grant.name, grant) for grant in src_grants).values():
        if grant.name and grant.owner_id == src_grp.owner_id and not is_group_in(dst_grp.region, grant.name):
            src_conn = get_region_conn(src_grp.region.name)
            grant_grp = src_conn.get_all_security_groups([grant.name])[0]
            sync_rules(grant_grp, dst_region=dst_grp.region)
    dst_rules = regroup_rules(dst_grp)
    # Remove rules absent in src_grp.
    for ports in set(dst_rules.keys()) - set(src_rules.keys()):
        for grant in dst_rules[ports]:
            args = ports + ((None, grant) if grant.name else (grant, None))
            dst_grp.revoke(*args)
    # Add rules absent in dst_grp.
    for ports in set(src_rules.keys()) - set(dst_rules.keys()):
        for grant in src_rules[ports]:
            if grant.name and not is_group_in(dst_grp.region, grant.name):
                continue  # Absent other's granted group.
            args = ports + ((None, grant) if grant.name else (grant, None))
            dst_grp.authorize(*args)
    # Refresh `dst_rules` from updated `dst_grp`.
    dst_rules = regroup_rules(dst_grp)

    @contextmanager
    def patch_grouporcidr():
        """XXX Patching `boto.ec2.securitygroup.GroupOrCIDR` cmp and hash."""
        from boto.ec2.securitygroup import GroupOrCIDR

        original_cmp = getattr(GroupOrCIDR, "__cmp__", None)
        GroupOrCIDR.__cmp__ = lambda self, other: cmp(str(self), str(other))
        original_hash = GroupOrCIDR.__hash__
        GroupOrCIDR.__hash__ = lambda self: hash(str(self))
        try:
            yield
        finally:
            if original_cmp:
                GroupOrCIDR.__cmp__ = original_cmp
            else:
                del GroupOrCIDR.__cmp__
            GroupOrCIDR.__hash__ = original_hash

    # Sync grants in common rules.
    with patch_grouporcidr():
        for ports in src_rules:
            # Remove grants absent in src_grp rules.
            for grant in set(dst_rules[ports]) - set(src_rules[ports]):
                args = ports + ((None, grant) if grant.name else (grant, None))
                dst_grp.revoke(*args)
            # Add grants absent in dst_grp rules.
            for grant in set(src_rules[ports]) - set(dst_rules[ports]):
                if grant.name and not is_group_in(dst_grp.region, grant.name):
                    continue  # Absent other's granted group.
                args = ports + ((None, grant) if grant.name else (grant, None))
                dst_grp.authorize(*args)
Пример #41
0
def create_instance(region_name='us-east-1',
                    zone_name=None,
                    key_pair=None,
                    security_groups='',
                    architecture=None,
                    user_data=None,
                    inst_type=None):
    """
    Create AWS EC2 instance.

    Return created instance.

    region_name
        by default will be created in the us-east-1 region;
    zone_name
        string-formatted name, may be omitted;
    key_pair
        name of key_pair to be granted access. Will be fetched from
        config by default, may be configured per region;
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';';
    architecture
        "i386" or "x86_64";
    inst_type
        by default will be fetched from AMI description or used
        't1.micro' if not mentioned in the description.
    """
    conn = get_region_conn(region_name)

    ami_ptrn = config.get(conn.region.name, 'AMI_PTRN')
    architecture = architecture or config.get('DEFAULT', 'ARCHITECTURE')
    ubuntu_aws_account = config.get('DEFAULT', 'UBUNTU_AWS_ACCOUNT')
    filters = {
        'owner_id': ubuntu_aws_account,
        'architecture': architecture,
        'name': ami_ptrn,
        'image_type': 'machine',
        'root_device_type': 'ebs'
    }
    images = conn.get_all_images(filters=filters)
    # Filter AMI by latest version.
    ptrn = re.compile(config.get(conn.region.name, 'AMI_REGEXP'))
    versions = set([ptrn.search(img.name).group('version') for img in images])

    def complement(year_month):
        return '0' + year_month if len(year_month) == 4 else year_month

    latest_version = sorted(set(filter(complement, versions)))[-1]  # XXX Y3K.
    ami_ptrn_with_version = config.get(conn.region.name,
                                       'AMI_PTRN_WITH_VERSION')
    name_with_version = ami_ptrn_with_version.format(version=latest_version)
    filters.update({'name': name_with_version})
    images = conn.get_all_images(filters=filters)
    # Filter AMI by latest release date.
    dates = set([ptrn.search(img.name).group('released_at') for img in images])
    latest_date = sorted(set(dates))[-1]
    ami_ptrn_with_release_date = config.get(conn.region.name,
                                            'AMI_PTRN_WITH_RELEASE_DATE')
    name_with_version_and_release = ami_ptrn_with_release_date.format(
        version=latest_version, released_at=latest_date)
    filters.update({'name': name_with_version_and_release})
    image = conn.get_all_images(filters=filters)[0]
    return launch_instance_from_ami(region_name,
                                    image.id,
                                    security_groups=security_groups,
                                    key_pair=key_pair,
                                    zone_name=zone_name,
                                    user_data=user_data,
                                    inst_type=inst_type)
def replicate_security_groups(filters=None):
    """
    Replicate updates of Security Groups among regions.

    :param filters: restrict replication to subset of Security Groups,
        see available options at
        http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSecurityGroups.html.
        Not available while running as Fabric task because it should be
        of `dict` type.
    :type filters: dict


    Per-instance Security Groups without additional rules won't be
    replicated.

    Raises warnings about synchronization issues that requires manual
    resolution.
    """
    HASH, TIMESTAMP = 'Hash', 'Version'  # Tag names.

    def get_hash(s_g):
        """
        Return unique hash for Security Group rules.

        Granted Security Groups will be respected identical if them
        belongs to identical owner and identically named irrespectively
        to region.
        """
        return sha256(str(regroup_rules(s_g).items())).hexdigest()

    def was_updated(s_g):
        """Returns True if Security Group was modified or just created."""
        return HASH not in s_g.tags or get_hash(s_g) != s_g.tags[HASH]

    regions = get_region_conn().get_all_regions()
    blank_group = new_security_group(regions[0])
    security_groups = []
    for reg in regions:
        for s_g in get_region_conn(
                reg.name).get_all_security_groups(filters=filters):
            security_groups.append(s_g)
    name = attrgetter('name')
    grp_by_name = groupby(sorted(security_groups, key=name), key=name)
    for name, grp_in_regions in grp_by_name:
        grp_in_regions = list(grp_in_regions)
        versions = set(get_hash(s_g) for s_g in grp_in_regions)
        old_vers = [s_g for s_g in grp_in_regions if not was_updated(s_g)]
        if len(set(s_g.tags[HASH] for s_g in old_vers)) > 1:
            warn('Old versions of {0} should be synced manually'.format(name))
            continue
        if len(versions) == 2 and old_vers:  # Update olds to new version.
            new = [grp for grp in grp_in_regions if was_updated(grp)][0]
            for prev in old_vers:
                sync_rules(new, prev)
        elif not len(versions) == 1:
            warn('More than 1 new versions of {0} found. Synchronization '
                 'can\'t be applied.'.format(name))
            continue
        # Clone to all regions if not yet cloned.
        if (len(grp_in_regions) < len(regions)
                and not (name.startswith(INST_SPECIFIC_SG_PREFIX) and get_hash(
                    grp_in_regions[0]) == get_hash(blank_group))):
            s_g_regions = set(s_g.region.name for s_g in grp_in_regions)
            for reg_name in set(reg.name for reg in regions) - s_g_regions:
                region = get_region_conn(reg_name).region
                sync_rules(grp_in_regions[0], dst_region=region)
        # Update tags.
        mark = timestamp()
        for s_g in grp_in_regions:
            s_g.add_tag(HASH, get_hash(s_g))
            s_g.add_tag(TIMESTAMP, mark)
    blank_group.delete()
def sync_rules(src_grp, dst_grp=None, dst_region=None):
    """
    Copy Security Group rules.

    Works across regions as well. The sole exception that won't be
    synced is granted groups, owned by another user - such groups can't
    be copied recursively.
    """
    assert bool(dst_grp) ^ bool(dst_region), ('Only dst_grp or dst_region '
                                              'should be provided')
    if dst_region:
        dst_grp = new_security_group(dst_region, src_grp.name,
                                     src_grp.description)

    def is_group_in(region, group_name):
        try:
            get_region_conn(region.name).get_all_security_groups([group_name])
        except EC2ResponseError:
            return False
        else:
            return True

    src_rules = regroup_rules(src_grp)
    # Assure granted group represented in destination region.
    src_grants = chain(*src_rules.values())
    for grant in dict((grant.name, grant) for grant in src_grants).values():
        if (grant.name and grant.owner_id == src_grp.owner_id
                and not is_group_in(dst_grp.region, grant.name)):
            src_conn = get_region_conn(src_grp.region.name)
            grant_grp = src_conn.get_all_security_groups([grant.name])[0]
            sync_rules(grant_grp, dst_region=dst_grp.region)
    dst_rules = regroup_rules(dst_grp)
    # Remove rules absent in src_grp.
    for ports in set(dst_rules.keys()) - set(src_rules.keys()):
        for grant in dst_rules[ports]:
            args = ports + ((None, grant) if grant.name else (grant, None))
            dst_grp.revoke(*args)
    # Add rules absent in dst_grp.
    for ports in set(src_rules.keys()) - set(dst_rules.keys()):
        for grant in src_rules[ports]:
            if grant.name and not is_group_in(dst_grp.region, grant.name):
                continue  # Absent other's granted group.
            args = ports + ((None, grant) if grant.name else (grant, None))
            dst_grp.authorize(*args)
    # Refresh `dst_rules` from updated `dst_grp`.
    dst_rules = regroup_rules(dst_grp)

    @contextmanager
    def patch_grouporcidr():
        """XXX Patching `boto.ec2.securitygroup.GroupOrCIDR` cmp and hash."""
        from boto.ec2.securitygroup import GroupOrCIDR
        original_cmp = getattr(GroupOrCIDR, '__cmp__', None)
        GroupOrCIDR.__cmp__ = lambda self, other: cmp(str(self), str(other))
        original_hash = GroupOrCIDR.__hash__
        GroupOrCIDR.__hash__ = lambda self: hash(str(self))
        try:
            yield
        finally:
            if original_cmp:
                GroupOrCIDR.__cmp__ = original_cmp
            else:
                del GroupOrCIDR.__cmp__
            GroupOrCIDR.__hash__ = original_hash

    # Sync grants in common rules.
    with patch_grouporcidr():
        for ports in src_rules:
            # Remove grants absent in src_grp rules.
            for grant in set(dst_rules[ports]) - set(src_rules[ports]):
                args = ports + ((None, grant) if grant.name else (grant, None))
                dst_grp.revoke(*args)
            # Add grants absent in dst_grp rules.
            for grant in set(src_rules[ports]) - set(dst_rules[ports]):
                if grant.name and not is_group_in(dst_grp.region, grant.name):
                    continue  # Absent other's granted group.
                args = ports + ((None, grant) if grant.name else (grant, None))
                dst_grp.authorize(*args)
Пример #44
0
def _trim_snapshots(region, dry_run=False):

    """Delete snapshots back in time in logarithmic manner.

    dry_run
        just print snapshot to be deleted.

    Modified version of the `boto.ec2.connection.trim_snapshots
    <http://pypi.python.org/pypi/boto/2.0>_`. Licensed under MIT license
    by Mitch Garnaat, 2011."""
    hourly_backups = config.getint('purge_backups', 'HOURLY_BACKUPS')
    daily_backups = config.getint('purge_backups', 'DAILY_BACKUPS')
    weekly_backups = config.getint('purge_backups', 'WEEKLY_BACKUPS')
    monthly_backups = config.getint('purge_backups', 'MONTHLY_BACKUPS')
    quarterly_backups = config.getint('purge_backups', 'QUARTERLY_BACKUPS')
    yearly_backups = config.getint('purge_backups', 'YEARLY_BACKUPS')

    # work with UTC time, which is what the snapshot start time is reported in
    now = datetime.utcnow()
    last_hour = datetime(now.year, now.month, now.day, now.hour)
    last_midnight = datetime(now.year, now.month, now.day)
    last_sunday = datetime(now.year, now.month,
          now.day) - timedelta(days=(now.weekday() + 1) % 7)
    last_month = datetime.now() - relativedelta(months=1)
    last_year = datetime.now() - relativedelta(years=1)
    other_years = datetime.now() - relativedelta(years=2)
    start_of_month = datetime(now.year, now.month, 1)

    target_backup_times = []
    # there are no snapshots older than 1/1/2000
    oldest_snapshot_date = datetime(2000, 1, 1)

    for hour in range(0, hourly_backups):
        target_backup_times.append(last_hour - timedelta(hours=hour))

    for day in range(0, daily_backups):
        target_backup_times.append(last_midnight - timedelta(days=day))

    for week in range(0, weekly_backups):
        target_backup_times.append(last_sunday - timedelta(weeks=week))

    for month in range(0, monthly_backups):
        target_backup_times.append(last_month - relativedelta(months=month))

    for quart in range(0, quarterly_backups):
        target_backup_times.append(last_year - relativedelta(months=4 * quart))

    for year in range(0, yearly_backups):
        target_backup_times.append(other_years - relativedelta(years=year))

    one_day = timedelta(days=1)
    while start_of_month > oldest_snapshot_date:
        # append the start of the month to the list of snapshot dates to save:
        target_backup_times.append(start_of_month)
        # there's no timedelta setting for one month, so instead:
        # decrement the day by one,
        #so we go to the final day of the previous month...
        start_of_month -= one_day
        # ... and then go to the first day of that previous month:
        start_of_month = datetime(start_of_month.year,
                               start_of_month.month, 1)

    temp = []

    for t in target_backup_times:
        if temp.__contains__(t) == False:
            temp.append(t)

    target_backup_times = temp
    target_backup_times.reverse()  # make the oldest date first

    # get all the snapshots, sort them by date and time,
    #and organize them into one array for each volume:
    conn = get_region_conn(region.name)
    all_snapshots = conn.get_all_snapshots(owner='self')
    # oldest first
    all_snapshots.sort(cmp=lambda x, y: cmp(x.start_time, y.start_time))

    snaps_for_each_volume = {}
    for snap in all_snapshots:
        # the snapshot name and the volume name are the same.
        # The snapshot name is set from the volume
        # name at the time the snapshot is taken
        volume_name = get_snap_vol(snap)

        if volume_name:
            # only examine snapshots that have a volume name
            snaps_for_volume = snaps_for_each_volume.get(volume_name)

            if not snaps_for_volume:
                snaps_for_volume = []
                snaps_for_each_volume[volume_name] = snaps_for_volume
            snaps_for_volume.append(snap)

    # Do a running comparison of snapshot dates to desired time periods,
    # keeping the oldest snapshot in each
    # time period and deleting the rest:
    for volume_name in snaps_for_each_volume:
        snaps = snaps_for_each_volume[volume_name]
        snaps = snaps[:-1]
        # never delete the newest snapshot, so remove it from consideration

        time_period_num = 0
        snap_found_for_this_time_period = False
        for snap in snaps:
            check_this_snap = True

            while (check_this_snap and
                   time_period_num < target_backup_times.__len__()):

                if get_snap_time(snap) < target_backup_times[time_period_num]:
                    # the snap date is before the cutoff date.
                    # Figure out if it's the first snap in this
                    # date range and act accordingly
                    #(since both date the date ranges and the snapshots
                    # are sorted chronologically, we know this
                    #snapshot isn't in an earlier date range):
                    if snap_found_for_this_time_period:
                        if not snap.tags.get('preserve_snapshot'):
                            if dry_run:
                                logger.info('Dry-trimmed {0} {1} from {2}'
                                    .format(snap, snap.description,
                                    snap.start_time))
                            else:
                                # as long as the snapshot wasn't marked with
                                # the 'preserve_snapshot' tag, delete it:
                                try:
                                    conn.delete_snapshot(snap.id)
                                except EC2ResponseError as err:
                                    logger.exception(str(err))
                                else:
                                    logger.info('Trimmed {0} {1} from {2}'
                                        .format(snap, snap.description,
                                        snap.start_time))
                       # go on and look at the next snapshot,
                       # leaving the time period alone
                    else:
                        # this was the first snapshot found for this time
                        # period. Leave it alone and look at the next snapshot:
                        snap_found_for_this_time_period = True
                    check_this_snap = False
                else:
                    # the snap is after the cutoff date.
                    # Check it against the next cutoff date
                    time_period_num += 1
                    snap_found_for_this_time_period = False
Пример #45
0
def rsync_snapshot(src_region_name, snapshot_id, dst_region_name,
                   src_inst=None, dst_inst=None, force=False):

    """Duplicate the snapshot into dst_region.

    src_region_name, dst_region_name
        Amazon region names. Allowed to be contracted, e.g.
        `ap-southeast-1` will be recognized in `ap-south` or even
        `ap-s`;
    snapshot_id
        snapshot to duplicate;
    src_inst, dst_inst
        will be used instead of creating new for temporary;
    force
        rsync snapshot even if newer version exist.

    You'll need to open port 60000 for encrypted instances replication."""
    src_conn = get_region_conn(src_region_name)
    src_snap = src_conn.get_all_snapshots([snapshot_id])[0]
    dst_conn = get_region_conn(dst_region_name)
    _src_device = get_snap_device(src_snap)
    _src_dev = re.match(r'^/dev/sda$', _src_device)  # check for encryption
    if _src_dev:
        encr = True
        logger.info('Found traces of encryption')
    else:
        encr = None

    info = 'Going to transmit {snap.volume_size} GiB {snap} {snap.description}'
    if src_snap.tags.get('Name'):
        info += ' of {name}'
    info += ' from {snap.region} to {dst}'
    logger.info(info.format(snap=src_snap, dst=dst_conn.region,
                            name=src_snap.tags.get('Name')))

    src_vol = get_snap_vol(src_snap)
    dst_snaps = get_relevant_snapshots(dst_conn, native_only=False)
    vol_snaps = [snp for snp in dst_snaps if get_snap_vol(snp) == src_vol]

    def sync_mountpoints(src_snap, src_vol, src_mnt, dst_vol, dst_mnt):
        # Marking temporary volume with snapshot's description.
        dst_vol.add_tag(DESCRIPTION_TAG, src_snap.description)
        snaps, vols = get_replicas(src_snap.description, dst_vol.connection)
        if not force and snaps:
            raise ReplicationCollisionError(
                'Stepping over {snap} - it\'s already replicated as {snaps} '
                'in {snaps[0].region}'.format(snap=src_snap, snaps=snaps))
        if not force and len(vols) > 1:
            timeout = src_snap.volume_size / REPLICATION_SPEED
            get_vol_time = lambda vol: parse(vol.create_time)

            def not_outdated(vol, now):
                age = now - get_vol_time(vol)
                return age.days * 24 * 60 * 60 + age.seconds < timeout

            now = datetime.utcnow().replace(tzinfo=tzutc())
            actual_vols = [vol for vol in vols if not_outdated(vol, now)]
            hunged_vols = set(vols) - set(actual_vols)
            if len(actual_vols) > 1:
                oldest = sorted(actual_vols, key=get_vol_time)[0]
                if dst_vol.id != oldest.id:
                    raise ReplicationCollisionError(
                        'Stepping over {snap} - it\'s already replicating to '
                        '{vol} in {vol.region}'.format(snap=src_snap,
                                                       vol=oldest))
            if len(hunged_vols) > 1:
                logger.warn(
                    'Replication to temporary {vols} created during '
                    'transmitting {snap} to {reg} qualified as hunged up. '
                    'Starting new replication process.'.format(
                        snap=src_snap, vols=hunged_vols, reg=dst_vol.region))
        update_snap(src_vol, src_mnt, dst_vol, dst_mnt, encr)

    if vol_snaps:
        dst_snap = sorted(vol_snaps, key=get_snap_time)[-1]
        with nested(
                attach_snapshot(src_snap, inst=src_inst, encr=encr),
                attach_snapshot(dst_snap, inst=dst_inst, encr=encr)) as (
                    (src_vol, src_mnt), (dst_vol, dst_mnt)):
            sync_mountpoints(src_snap, src_vol, src_mnt, dst_vol, dst_mnt)
    else:
        with nested(
                attach_snapshot(src_snap, inst=src_inst, encr=encr),
                create_tmp_volume(dst_conn.region, src_snap.volume_size)) as (
                    (src_vol, src_mnt), (dst_vol, dst_mnt)):
            sync_mountpoints(src_snap, src_vol, src_mnt, dst_vol, dst_mnt)
Пример #46
0
def rsync_snapshot(src_region_name, snapshot_id, dst_region_name,
                   src_inst=None, dst_inst=None, force=False):

    """Duplicate the snapshot into dst_region.

    src_region_name, dst_region_name
        Amazon region names. Allowed to be contracted, e.g.
        `ap-southeast-1` will be recognized in `ap-south` or even
        `ap-s`;
    snapshot_id
        snapshot to duplicate;
    src_inst, dst_inst
        will be used instead of creating new for temporary;
    force
        rsync snapshot even if newer version exist.

    You'll need to open port 60000 for encrypted instances replication."""
    src_conn = get_region_conn(src_region_name)
    src_snap = src_conn.get_all_snapshots([snapshot_id])[0]
    dst_conn = get_region_conn(dst_region_name)
    _src_device = get_snap_device(src_snap)
    _src_dev = re.match(r'^/dev/sda$', _src_device)  # check for encryption
    if _src_dev:
        encr = True
        logger.info('Found traces of encryption')
    else:
        encr = None

    info = 'Going to transmit {snap.volume_size} GiB {snap} {snap.description}'
    if src_snap.tags.get('Name'):
        info += ' of {name}'
    info += ' from {snap.region} to {dst}'
    logger.info(info.format(snap=src_snap, dst=dst_conn.region,
                            name=src_snap.tags.get('Name')))

    src_vol = get_snap_vol(src_snap)
    dst_snaps = get_relevant_snapshots(dst_conn, native_only=False)
    vol_snaps = [snp for snp in dst_snaps if get_snap_vol(snp) == src_vol]

    def sync_mountpoints(src_snap, src_vol, src_mnt, dst_vol, dst_mnt):
        # Marking temporary volume with snapshot's description.
        dst_vol.add_tag(DESCRIPTION_TAG, src_snap.description)
        snaps, vols = get_replicas(src_snap.description, dst_vol.connection)
        if not force and snaps:
            raise ReplicationCollisionError(
                'Stepping over {snap} - it\'s already replicated as {snaps} '
                'in {snaps[0].region}'.format(snap=src_snap, snaps=snaps))
        if not force and len(vols) > 1:
            timeout = src_snap.volume_size / REPLICATION_SPEED
            get_vol_time = lambda vol: parse(vol.create_time)

            def not_outdated(vol, now):
                age = now - get_vol_time(vol)
                return age.days * 24 * 60 * 60 + age.seconds < timeout

            now = datetime.utcnow().replace(tzinfo=tzutc())
            actual_vols = [vol for vol in vols if not_outdated(vol, now)]
            hunged_vols = set(vols) - set(actual_vols)
            if len(actual_vols) > 1:
                oldest = sorted(actual_vols, key=get_vol_time)[0]
                if dst_vol.id != oldest.id:
                    raise ReplicationCollisionError(
                        'Stepping over {snap} - it\'s already replicating to '
                        '{vol} in {vol.region}'.format(snap=src_snap,
                                                       vol=oldest))
            if len(hunged_vols) > 1:
                logger.warn(
                    'Replication to temporary {vols} created during '
                    'transmitting {snap} to {reg} qualified as hunged up. '
                    'Starting new replication process.'.format(
                        snap=src_snap, vols=hunged_vols, reg=dst_vol.region))
        update_snap(src_vol, src_mnt, dst_vol, dst_mnt, encr)

    if vol_snaps:
        dst_snap = sorted(vol_snaps, key=get_snap_time)[-1]
        with nested(
                attach_snapshot(src_snap, inst=src_inst, encr=encr),
                attach_snapshot(dst_snap, inst=dst_inst, encr=encr)) as (
                    (src_vol, src_mnt), (dst_vol, dst_mnt)):
            sync_mountpoints(src_snap, src_vol, src_mnt, dst_vol, dst_mnt)
    else:
        with nested(
                attach_snapshot(src_snap, inst=src_inst, encr=encr),
                create_tmp_volume(dst_conn.region, src_snap.volume_size)) as (
                    (src_vol, src_mnt), (dst_vol, dst_mnt)):
            sync_mountpoints(src_snap, src_vol, src_mnt, dst_vol, dst_mnt)
Пример #47
0
def create_ami(region, snap_id, force=None, root_dev='/dev/sda1', zone_name=None,
               default_arch=None, default_type='t1.micro', security_groups=''):
    """
    Creates AMI image from given snapshot.

    Force option removes prompt request and creates new instance from
    created ami image.

    region, snap_id
        specify snapshot to be processed. Snapshot description in json
        format will be used to restore instance with same parameters.
        Will automaticaly process snapshots for same instance with near
        time (10 minutes or shorter), but for other devices (/dev/sdb,
        /dev/sdc, etc);
    force
        Run instance from ami after creation without confirmation. To
        enable set value to "RUN";
    default_arch
        architecture to use if not mentioned in snapshot description;
    default_type
        instance type to use if not mentioned in snapshot description.
        Used only if ``force`` is "RUN";
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'. Used only if ``force`` is "RUN".
    """
    conn = get_region_conn(region)
    snap = conn.get_all_snapshots(snapshot_ids=[snap_id, ])[0]
    instance_id = get_snap_instance(snap)
    _device = get_snap_device(snap)
    snaps = conn.get_all_snapshots(owner='self')
    snapshots = [snp for snp in snaps if
        get_snap_instance(snp) == instance_id and
        get_snap_device(snp) != _device and
        abs(get_snap_time(snap) - get_snap_time(snp)) <= timedelta(minutes=10)]
    snapshot = sorted(snapshots, key=get_snap_time,
                      reverse=True) if snapshots else None
    # setup for building an EBS boot snapshot
    default_arch = default_arch or config.get('DEFAULT', 'ARCHITECTURE')
    arch = get_descr_attr(snap, 'Arch') or default_arch
    kernel = config.get(conn.region.name, 'KERNEL' + arch.upper())
    dev = re.match(r'^/dev/sda$', _device)  # if our instance encrypted
    if dev:
        kernel = config.get(conn.region.name, 'KERNEL_ENCR_' + arch.upper())
    ebs = EBSBlockDeviceType()
    ebs.snapshot_id = snap_id
    ebs.delete_on_termination = True
    block_map = BlockDeviceMapping()
    block_map[_device] = ebs
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    block_map['/dev/sdb'] = sdb

    if snapshot:
        for s in snapshot:
            s_dev = get_snap_device(s)
            s_ebs = EBSBlockDeviceType()
            s_ebs.delete_on_termination = True
            s_ebs.snapshot_id = s.id
            block_map[s_dev] = s_ebs

    name = 'Created {0} using access key {1}'.format(timestamp(),
                                                     conn.access_key)
    name = name.replace(":", ".").replace(" ", "_")

    # create the new AMI all options from snap JSON description:
    wait_for(snap, '100%', limit=SNAP_TIME)
    result = conn.register_image(
        name=name,
        description=snap.description,
        architecture=get_descr_attr(snap, 'Arch') or default_arch,
        root_device_name=get_descr_attr(snap, 'Root_dev_name') or root_dev,
        block_device_map=block_map, kernel_id=kernel)
    sleep(2)
    image = conn.get_all_images(image_ids=[result, ])[0]
    wait_for(image, 'available', limit=10 * 60)
    add_tags(image, snap.tags)

    logger.info('The new AMI ID = {0}'.format(result))

    new_instance = None
    if force == 'RUN':
        instance_type = get_descr_attr(snap, 'Type') or default_type
        new_instance = launch_instance_from_ami(
            region, image.id, inst_type=instance_type,
            security_groups=security_groups, zone_name=zone_name)
    return image, new_instance
Пример #48
0
def create_encrypted_instance(
    region_name, release='lucid', volume_size='8', architecture=None,
    type='t1.micro', name='encr_root', pw1=None, pw2=None, security_groups=''):
    """
    Creates ubuntu instance with luks-encryted root volume.

    region_name
        Region where you want to create instance;
    release
        Ubuntu release name (lucid or natty). "lucid" by default;
    volume_size
        Size of volume in Gb (always remember, that script creates boot volume
        with size 1Gb, so minimal size of whole volume is 3Gb (1Gb for /boot
        2Gb for /)). 8 by default;
    architecture
        "i386" or "x86_64".
    type
        Type of instance. 't1.micro' by default;
    name
        Name of luks encrypted volume. 'encr_root' by default;
    pw1, pw2
        You can specify passwords in parameters to suppress password prompt;
    security_groups
        List of AWS Security Groups names formatted as string separated
        with semicolon ';'.

    To unlock go to https://ip_address_of_instance (only after reboot
    or shutdown).
    You can set up to 8 passwords. Defaut boot.key and boot.crt created
    for .amazonaws.com so must work for all instances. Process of
    creation is about 20 minutes long."""
    assert volume_size >= 3, '1 GiB for /boot and 2 GiB for /'
    conn = get_region_conn(region_name)

    with config_temp_ssh(conn) as key_filename:
        key_pair = os.path.splitext(os.path.split(key_filename)[1])[0]
        zn = conn.get_all_zones()[-1]
        with create_temp_inst(zone=zn, key_pair=key_pair) as inst:
            vol = conn.create_volume(size=volume_size, zone=zn)
            dev = get_avail_dev_encr(inst)
            vol.attach(inst.id, dev)
            arch = architecture or config.get('DEFAULT', 'ARCHITECTURE')
            ubuntu_arch = 'amd64' if arch == 'x86_64' else arch
            make_encrypted_ubuntu(inst.public_dns_name, key_filename, 'ubuntu',
                                  ubuntu_arch, dev, name, release, pw1, pw2)
            description = dumps({
                'Volume': vol.id,
                'Region': vol.region.name,
                'Device': '/dev/sda',
                'Type': type,
                'Arch': arch,
                'Root_dev_name': '/dev/sda1',
                'Time': timestamp(),
            })
            snap = vol.create_snapshot(description)
            wait_for(snap, '100%', limit=SNAP_TIME)
            vol.detach(force=True)
            wait_for(vol, 'available', limit=DETACH_TIME)
            vol.delete()
            HTTPS_SG = config.get('DEFAULT', 'HTTPS_SECURITY_GROUP')
            security_groups = ';'.join([security_groups, HTTPS_SG])
            img, new_instance = create_ami(region_name, snap.id, 'RUN',
                                           security_groups=security_groups)
            logger.info('\nTo unlock go to:\n   https://{0}\n'
                        .format(new_instance.public_dns_name))
            img.deregister()
            snap.delete()
Пример #49
0
def replicate_security_groups(filters=None):
    """
    Replicate updates of Security Groups among regions.

    :param filters: restrict replication to subset of Security Groups,
        see available options at
        http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSecurityGroups.html.
        Not available while running as Fabric task because it should be
        of `dict` type.
    :type filters: dict


    Per-instance Security Groups without additional rules won't be
    replicated.

    Raises warnings about synchronization issues that requires manual
    resolution.
    """
    HASH, TIMESTAMP = "Hash", "Version"  # Tag names.

    def get_hash(s_g):
        """
        Return unique hash for Security Group rules.

        Granted Security Groups will be respected identical if them
        belongs to identical owner and identically named irrespectively
        to region.
        """
        return sha256(str(regroup_rules(s_g).items())).hexdigest()

    def was_updated(s_g):
        """Returns True if Security Group was modified or just created."""
        return HASH not in s_g.tags or get_hash(s_g) != s_g.tags[HASH]

    regions = get_region_conn().get_all_regions()
    blank_group = new_security_group(regions[0])
    security_groups = []
    for reg in regions:
        for s_g in get_region_conn(reg.name).get_all_security_groups(filters=filters):
            security_groups.append(s_g)
    name = attrgetter("name")
    grp_by_name = groupby(sorted(security_groups, key=name), key=name)
    for name, grp_in_regions in grp_by_name:
        grp_in_regions = list(grp_in_regions)
        versions = set(get_hash(s_g) for s_g in grp_in_regions)
        old_vers = [s_g for s_g in grp_in_regions if not was_updated(s_g)]
        if len(set(s_g.tags[HASH] for s_g in old_vers)) > 1:
            warn("Old versions of {0} should be synced manually".format(name))
            continue
        if len(versions) == 2 and old_vers:  # Update olds to new version.
            new = [grp for grp in grp_in_regions if was_updated(grp)][0]
            for prev in old_vers:
                sync_rules(new, prev)
        elif not len(versions) == 1:
            warn("More than 1 new versions of {0} found. Synchronization " "can't be applied.".format(name))
            continue
        # Clone to all regions if not yet cloned.
        if len(grp_in_regions) < len(regions) and not (
            name.startswith(INST_SPECIFIC_SG_PREFIX) and get_hash(grp_in_regions[0]) == get_hash(blank_group)
        ):
            s_g_regions = set(s_g.region.name for s_g in grp_in_regions)
            for reg_name in set(reg.name for reg in regions) - s_g_regions:
                region = get_region_conn(reg_name).region
                sync_rules(grp_in_regions[0], dst_region=region)
        # Update tags.
        mark = timestamp()
        for s_g in grp_in_regions:
            s_g.add_tag(HASH, get_hash(s_g))
            s_g.add_tag(TIMESTAMP, mark)
    blank_group.delete()