Example #1
0
def rsync_snapshot(src_region_name, snapshot_id, dst_region_name,
                   src_inst=None, dst_inst=None):

    """Duplicate the snapshot into dst_region.

    src_region_name, dst_region_name
        Amazon region names. Allowed to be contracted, e.g.
        `ap-southeast-1` will be recognized in `ap-south` or even
        `ap-s`;
    snapshot_id
        snapshot to duplicate;
    src_inst, dst_inst
        will be used instead of creating new for temporary.

    You'll need to open port 60000 for encrypted instances replication."""
    src_conn = get_region_conn(src_region_name)
    src_snap = src_conn.get_all_snapshots([snapshot_id])[0]
    dst_conn = get_region_conn(dst_region_name)
    _src_device = get_snap_device(src_snap)
    _src_dev = re.match(r'^/dev/sda$', _src_device)  # check for encryption
    if _src_dev:
        encr = True
        logger.info('Found traces of encryption')
    else:
        encr = None

    info = 'Going to transmit {snap.volume_size} GiB {snap} {snap.description}'
    if src_snap.tags.get('Name'):
        info += ' of {name}'
    info += ' from {snap.region} to {dst}'
    logger.info(info.format(snap=src_snap, dst=dst_conn.region,
                            name=src_snap.tags.get('Name')))

    dst_snaps = dst_conn.get_all_snapshots(owner='self')
    dst_snaps = [snp for snp in dst_snaps if not snp.status == 'error']
    src_vol = get_snap_vol(src_snap)
    vol_snaps = [snp for snp in dst_snaps if get_snap_vol(snp) == src_vol]

    if vol_snaps:
        dst_snap = sorted(vol_snaps, key=get_snap_time)[-1]
        if get_snap_time(dst_snap) >= get_snap_time(src_snap):
            kwargs = dict(src=src_snap, dst=dst_snap, dst_reg=dst_conn.region)
            logger.info('Stepping over {src} - it\'s not newer than {dst} '
                        '{dst.description} in {dst_reg}'.format(**kwargs))
            return
    else:
        dst_snap = create_empty_snapshot(dst_conn.region, src_snap.volume_size)

    with nested(attach_snapshot(src_snap, inst=src_inst, encr=encr),
                attach_snapshot(dst_snap, inst=dst_inst, encr=encr)) as (
                (src_vol, src_mnt), (dst_vol, dst_mnt)):
        update_snap(src_vol, src_mnt, dst_vol, dst_mnt, encr,
                    delete_old=not vol_snaps)  # Delete only empty snapshots.
Example #2
0
def get_relevant_snapshots(
        conn, tag_name=DEFAULT_TAG_NAME, tag_value=DEFAULT_TAG_VALUE,
        native_only=True, filters={'status': SNAP_STATUSES}):
    """Returns snapshots with proper description."""
    if tag_name and tag_value:
        filters.update({'tag:{0}'.format(tag_name): tag_value})
    snaps = conn.get_all_snapshots(owner='self', filters=filters)
    is_described = lambda snap: get_snap_vol(snap) and get_snap_time(snap)
    snaps = [snp for snp in snaps if is_described(snp)]
    if native_only:
        is_native = lambda snp, reg: get_descr_attr(snp, 'Region') == reg.name
        snaps = [snp for snp in snaps if is_native(snp, conn.region)]
    return snaps
Example #3
0
def get_relevant_snapshots(
        conn, tag_name=DEFAULT_TAG_NAME, tag_value=DEFAULT_TAG_VALUE,
        native_only=True, filters={'status': SNAP_STATUSES}):
    """Returns snapshots with proper description."""
    if tag_name and tag_value:
        filters.update({'tag:{0}'.format(tag_name): tag_value})
    snaps = conn.get_all_snapshots(owner='self', filters=filters)
    is_described = lambda snap: get_snap_vol(snap) and get_snap_time(snap)
    snaps = [snp for snp in snaps if is_described(snp)]
    if native_only:
        is_native = lambda snp, reg: get_descr_attr(snp, 'Region') == reg.name
        snaps = [snp for snp in snaps if is_native(snp, conn.region)]
    return snaps
Example #4
0
def rsync_region(src_region_name, dst_region_name, tag_name=None,
                 tag_value=None, native_only=True):
    """Duplicates latest snapshots with given tag into dst_region.

    src_region_name, dst_region_name
        every latest volume snapshot from src_region will be rsynced
        to the dst_region;
    tag_name, tag_value
        snapshots will be filtered by tag. Tag will be fetched from
        config by default, may be configured per region;
    native_only
        sync only snapshots, created in the src_region_name. True by
        default."""
    src_conn = get_region_conn(src_region_name)
    dst_conn = get_region_conn(dst_region_name)
    tag_name = tag_name or config.get(src_conn.region.name, 'TAG_NAME')
    tag_value = tag_value or config.get(src_conn.region.name, 'TAG_VALUE')
    filters = {'tag-key': tag_name, 'tag-value': tag_value}
    snaps = src_conn.get_all_snapshots(owner='self', filters=filters)
    snaps = [snp for snp in snaps if not snp.status == 'error']
    _is_described = lambda snap: get_snap_vol(snap) and get_snap_time(snap)
    snaps = [snp for snp in snaps if _is_described(snp)]
    if native_only:

        def is_native(snap, region):
            return get_descr_attr(snap, 'Region') == region.name
        snaps = [snp for snp in snaps if is_native(snp, src_conn.region)]

    with nested(create_temp_inst(src_conn.region),
                create_temp_inst(dst_conn.region)) as (src_inst, dst_inst):
        snaps = sorted(snaps, key=get_snap_vol)    # Prepare for grouping.
        for vol, vol_snaps in groupby(snaps, get_snap_vol):
            latest_snap = sorted(vol_snaps, key=get_snap_time)[-1]
            for inst in src_inst, dst_inst:
                logger.debug('Rebooting {0} in {0.region} '
                             'to refresh attachments'.format(inst))
                inst.reboot()
            args = (src_region_name, latest_snap.id, dst_region_name, src_inst,
                    dst_inst)
            try:
                rsync_snapshot(*args)
            except:
                logger.exception('rsync of {1} from {0} to {2} failed'.format(
                    *args))
Example #5
0
def _trim_snapshots(region, dry_run=False):

    """Delete snapshots back in time in logarithmic manner.

    dry_run
        just print snapshot to be deleted.

    Modified version of the `boto.ec2.connection.trim_snapshots
    <http://pypi.python.org/pypi/boto/2.0>_`. Licensed under MIT license
    by Mitch Garnaat, 2011."""
    hourly_backups = config.getint('purge_backups', 'HOURLY_BACKUPS')
    daily_backups = config.getint('purge_backups', 'DAILY_BACKUPS')
    weekly_backups = config.getint('purge_backups', 'WEEKLY_BACKUPS')
    monthly_backups = config.getint('purge_backups', 'MONTHLY_BACKUPS')
    quarterly_backups = config.getint('purge_backups', 'QUARTERLY_BACKUPS')
    yearly_backups = config.getint('purge_backups', 'YEARLY_BACKUPS')

    # work with UTC time, which is what the snapshot start time is reported in
    now = datetime.utcnow()
    last_hour = datetime(now.year, now.month, now.day, now.hour)
    last_midnight = datetime(now.year, now.month, now.day)
    last_sunday = datetime(now.year, now.month,
          now.day) - timedelta(days=(now.weekday() + 1) % 7)
    last_month = datetime.now() - relativedelta(months=1)
    last_year = datetime.now() - relativedelta(years=1)
    other_years = datetime.now() - relativedelta(years=2)
    start_of_month = datetime(now.year, now.month, 1)

    target_backup_times = []
    # there are no snapshots older than 1/1/2000
    oldest_snapshot_date = datetime(2000, 1, 1)

    for hour in range(0, hourly_backups):
        target_backup_times.append(last_hour - timedelta(hours=hour))

    for day in range(0, daily_backups):
        target_backup_times.append(last_midnight - timedelta(days=day))

    for week in range(0, weekly_backups):
        target_backup_times.append(last_sunday - timedelta(weeks=week))

    for month in range(0, monthly_backups):
        target_backup_times.append(last_month - relativedelta(months=month))

    for quart in range(0, quarterly_backups):
        target_backup_times.append(last_year - relativedelta(months=4 * quart))

    for year in range(0, yearly_backups):
        target_backup_times.append(other_years - relativedelta(years=year))

    one_day = timedelta(days=1)
    while start_of_month > oldest_snapshot_date:
        # append the start of the month to the list of snapshot dates to save:
        target_backup_times.append(start_of_month)
        # there's no timedelta setting for one month, so instead:
        # decrement the day by one,
        #so we go to the final day of the previous month...
        start_of_month -= one_day
        # ... and then go to the first day of that previous month:
        start_of_month = datetime(start_of_month.year,
                               start_of_month.month, 1)

    temp = []

    for t in target_backup_times:
        if temp.__contains__(t) == False:
            temp.append(t)

    target_backup_times = temp
    target_backup_times.reverse()  # make the oldest date first

    # get all the snapshots, sort them by date and time,
    #and organize them into one array for each volume:
    conn = get_region_conn(region.name)
    all_snapshots = conn.get_all_snapshots(owner='self')
    # oldest first
    all_snapshots.sort(cmp=lambda x, y: cmp(x.start_time, y.start_time))

    snaps_for_each_volume = {}
    for snap in all_snapshots:
        # the snapshot name and the volume name are the same.
        # The snapshot name is set from the volume
        # name at the time the snapshot is taken
        volume_name = get_snap_vol(snap)

        if volume_name:
            # only examine snapshots that have a volume name
            snaps_for_volume = snaps_for_each_volume.get(volume_name)

            if not snaps_for_volume:
                snaps_for_volume = []
                snaps_for_each_volume[volume_name] = snaps_for_volume
            snaps_for_volume.append(snap)

    # Do a running comparison of snapshot dates to desired time periods,
    # keeping the oldest snapshot in each
    # time period and deleting the rest:
    for volume_name in snaps_for_each_volume:
        snaps = snaps_for_each_volume[volume_name]
        snaps = snaps[:-1]
        # never delete the newest snapshot, so remove it from consideration

        time_period_num = 0
        snap_found_for_this_time_period = False
        for snap in snaps:
            check_this_snap = True

            while (check_this_snap and
                   time_period_num < target_backup_times.__len__()):

                if get_snap_time(snap) < target_backup_times[time_period_num]:
                    # the snap date is before the cutoff date.
                    # Figure out if it's the first snap in this
                    # date range and act accordingly
                    #(since both date the date ranges and the snapshots
                    # are sorted chronologically, we know this
                    #snapshot isn't in an earlier date range):
                    if snap_found_for_this_time_period:
                        if not snap.tags.get('preserve_snapshot'):
                            if dry_run:
                                logger.info('Dry-trimmed {0} {1} from {2}'
                                    .format(snap, snap.description,
                                    snap.start_time))
                            else:
                                # as long as the snapshot wasn't marked with
                                # the 'preserve_snapshot' tag, delete it:
                                try:
                                    conn.delete_snapshot(snap.id)
                                except EC2ResponseError as err:
                                    logger.exception(str(err))
                                else:
                                    logger.info('Trimmed {0} {1} from {2}'
                                        .format(snap, snap.description,
                                        snap.start_time))
                       # go on and look at the next snapshot,
                       # leaving the time period alone
                    else:
                        # this was the first snapshot found for this time
                        # period. Leave it alone and look at the next snapshot:
                        snap_found_for_this_time_period = True
                    check_this_snap = False
                else:
                    # the snap is after the cutoff date.
                    # Check it against the next cutoff date
                    time_period_num += 1
                    snap_found_for_this_time_period = False
Example #6
0
def create_ami(region, snap_id, force=None, root_dev='/dev/sda1', zone_name=None,
               default_arch=None, default_type='t1.micro', security_groups=''):
    """
    Creates AMI image from given snapshot.

    Force option removes prompt request and creates new instance from
    created ami image.

    region, snap_id
        specify snapshot to be processed. Snapshot description in json
        format will be used to restore instance with same parameters.
        Will automaticaly process snapshots for same instance with near
        time (10 minutes or shorter), but for other devices (/dev/sdb,
        /dev/sdc, etc);
    force
        Run instance from ami after creation without confirmation. To
        enable set value to "RUN";
    default_arch
        architecture to use if not mentioned in snapshot description;
    default_type
        instance type to use if not mentioned in snapshot description.
        Used only if ``force`` is "RUN";
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'. Used only if ``force`` is "RUN".
    """
    conn = get_region_conn(region)
    snap = conn.get_all_snapshots(snapshot_ids=[snap_id, ])[0]
    instance_id = get_snap_instance(snap)
    _device = get_snap_device(snap)
    snaps = conn.get_all_snapshots(owner='self')
    snapshots = [snp for snp in snaps if
        get_snap_instance(snp) == instance_id and
        get_snap_device(snp) != _device and
        abs(get_snap_time(snap) - get_snap_time(snp)) <= timedelta(minutes=10)]
    snapshot = sorted(snapshots, key=get_snap_time,
                      reverse=True) if snapshots else None
    # setup for building an EBS boot snapshot
    default_arch = default_arch or config.get('DEFAULT', 'ARCHITECTURE')
    arch = get_descr_attr(snap, 'Arch') or default_arch
    kernel = config.get(conn.region.name, 'KERNEL' + arch.upper())
    dev = re.match(r'^/dev/sda$', _device)  # if our instance encrypted
    if dev:
        kernel = config.get(conn.region.name, 'KERNEL_ENCR_' + arch.upper())
    ebs = EBSBlockDeviceType()
    ebs.snapshot_id = snap_id
    ebs.delete_on_termination = True
    block_map = BlockDeviceMapping()
    block_map[_device] = ebs
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    block_map['/dev/sdb'] = sdb

    if snapshot:
        for s in snapshot:
            s_dev = get_snap_device(s)
            s_ebs = EBSBlockDeviceType()
            s_ebs.delete_on_termination = True
            s_ebs.snapshot_id = s.id
            block_map[s_dev] = s_ebs

    name = 'Created {0} using access key {1}'.format(timestamp(),
                                                     conn.access_key)
    name = name.replace(":", ".").replace(" ", "_")

    # create the new AMI all options from snap JSON description:
    wait_for(snap, '100%', limit=SNAP_TIME)
    result = conn.register_image(
        name=name,
        description=snap.description,
        architecture=get_descr_attr(snap, 'Arch') or default_arch,
        root_device_name=get_descr_attr(snap, 'Root_dev_name') or root_dev,
        block_device_map=block_map, kernel_id=kernel)
    sleep(2)
    image = conn.get_all_images(image_ids=[result, ])[0]
    wait_for(image, 'available', limit=10 * 60)
    add_tags(image, snap.tags)

    logger.info('The new AMI ID = {0}'.format(result))

    new_instance = None
    if force == 'RUN':
        instance_type = get_descr_attr(snap, 'Type') or default_type
        new_instance = launch_instance_from_ami(
            region, image.id, inst_type=instance_type,
            security_groups=security_groups, zone_name=zone_name)
    return image, new_instance
Example #7
0
def _trim_snapshots(region, dry_run=False):

    """Delete snapshots back in time in logarithmic manner.

    dry_run
        just print snapshot to be deleted.

    Modified version of the `boto.ec2.connection.trim_snapshots
    <http://pypi.python.org/pypi/boto/2.0>_`. Licensed under MIT license
    by Mitch Garnaat, 2011."""
    hourly_backups = config.getint('purge_backups', 'HOURLY_BACKUPS')
    daily_backups = config.getint('purge_backups', 'DAILY_BACKUPS')
    weekly_backups = config.getint('purge_backups', 'WEEKLY_BACKUPS')
    monthly_backups = config.getint('purge_backups', 'MONTHLY_BACKUPS')
    quarterly_backups = config.getint('purge_backups', 'QUARTERLY_BACKUPS')
    yearly_backups = config.getint('purge_backups', 'YEARLY_BACKUPS')

    # work with UTC time, which is what the snapshot start time is reported in
    now = datetime.utcnow()
    last_hour = datetime(now.year, now.month, now.day, now.hour)
    last_midnight = datetime(now.year, now.month, now.day)
    last_sunday = datetime(now.year, now.month,
          now.day) - timedelta(days=(now.weekday() + 1) % 7)
    last_month = datetime.now() - relativedelta(months=1)
    last_year = datetime.now() - relativedelta(years=1)
    other_years = datetime.now() - relativedelta(years=2)
    start_of_month = datetime(now.year, now.month, 1)

    target_backup_times = []
    # there are no snapshots older than 1/1/2000
    oldest_snapshot_date = datetime(2000, 1, 1)

    for hour in range(0, hourly_backups):
        target_backup_times.append(last_hour - timedelta(hours=hour))

    for day in range(0, daily_backups):
        target_backup_times.append(last_midnight - timedelta(days=day))

    for week in range(0, weekly_backups):
        target_backup_times.append(last_sunday - timedelta(weeks=week))

    for month in range(0, monthly_backups):
        target_backup_times.append(last_month - relativedelta(months=month))

    for quart in range(0, quarterly_backups):
        target_backup_times.append(last_year - relativedelta(months=4 * quart))

    for year in range(0, yearly_backups):
        target_backup_times.append(other_years - relativedelta(years=year))

    one_day = timedelta(days=1)
    while start_of_month > oldest_snapshot_date:
        # append the start of the month to the list of snapshot dates to save:
        target_backup_times.append(start_of_month)
        # there's no timedelta setting for one month, so instead:
        # decrement the day by one,
        #so we go to the final day of the previous month...
        start_of_month -= one_day
        # ... and then go to the first day of that previous month:
        start_of_month = datetime(start_of_month.year,
                               start_of_month.month, 1)

    temp = []

    for t in target_backup_times:
        if temp.__contains__(t) == False:
            temp.append(t)

    target_backup_times = temp
    target_backup_times.reverse()  # make the oldest date first

    # get all the snapshots, sort them by date and time,
    #and organize them into one array for each volume:
    conn = get_region_conn(region.name)
    all_snapshots = conn.get_all_snapshots(owner='self')
    # oldest first
    all_snapshots.sort(cmp=lambda x, y: cmp(x.start_time, y.start_time))

    snaps_for_each_volume = {}
    for snap in all_snapshots:
        # the snapshot name and the volume name are the same.
        # The snapshot name is set from the volume
        # name at the time the snapshot is taken
        volume_name = get_snap_vol(snap)

        if volume_name:
            # only examine snapshots that have a volume name
            snaps_for_volume = snaps_for_each_volume.get(volume_name)

            if not snaps_for_volume:
                snaps_for_volume = []
                snaps_for_each_volume[volume_name] = snaps_for_volume
            snaps_for_volume.append(snap)

    # Do a running comparison of snapshot dates to desired time periods,
    # keeping the oldest snapshot in each
    # time period and deleting the rest:
    for volume_name in snaps_for_each_volume:
        snaps = snaps_for_each_volume[volume_name]
        snaps = snaps[:-1]
        # never delete the newest snapshot, so remove it from consideration

        time_period_num = 0
        snap_found_for_this_time_period = False
        for snap in snaps:
            check_this_snap = True

            while (check_this_snap and
                   time_period_num < target_backup_times.__len__()):

                if get_snap_time(snap) < target_backup_times[time_period_num]:
                    # the snap date is before the cutoff date.
                    # Figure out if it's the first snap in this
                    # date range and act accordingly
                    #(since both date the date ranges and the snapshots
                    # are sorted chronologically, we know this
                    #snapshot isn't in an earlier date range):
                    if snap_found_for_this_time_period:
                        if not snap.tags.get('preserve_snapshot'):
                            if dry_run:
                                logger.info('Dry-trimmed {0} {1} from {2}'
                                    .format(snap, snap.description,
                                    snap.start_time))
                            else:
                                # as long as the snapshot wasn't marked with
                                # the 'preserve_snapshot' tag, delete it:
                                try:
                                    conn.delete_snapshot(snap.id)
                                except EC2ResponseError as err:
                                    logger.exception(str(err))
                                else:
                                    logger.info('Trimmed {0} {1} from {2}'
                                        .format(snap, snap.description,
                                        snap.start_time))
                       # go on and look at the next snapshot,
                       # leaving the time period alone
                    else:
                        # this was the first snapshot found for this time
                        # period. Leave it alone and look at the next snapshot:
                        snap_found_for_this_time_period = True
                    check_this_snap = False
                else:
                    # the snap is after the cutoff date.
                    # Check it against the next cutoff date
                    time_period_num += 1
                    snap_found_for_this_time_period = False
def create_ami(region,
               snap_id,
               force=None,
               root_dev='/dev/sda1',
               zone_name=None,
               default_arch=None,
               default_type='t1.micro',
               security_groups=''):
    """
    Creates AMI image from given snapshot.

    Force option removes prompt request and creates new instance from
    created ami image.

    region, snap_id
        specify snapshot to be processed. Snapshot description in json
        format will be used to restore instance with same parameters.
        Will automaticaly process snapshots for same instance with near
        time (10 minutes or shorter), but for other devices (/dev/sdb,
        /dev/sdc, etc);
    force
        Run instance from ami after creation without confirmation. To
        enable set value to "RUN";
    default_arch
        architecture to use if not mentioned in snapshot description;
    default_type
        instance type to use if not mentioned in snapshot description.
        Used only if ``force`` is "RUN";
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'. Used only if ``force`` is "RUN".
    """
    conn = get_region_conn(region)
    snap = conn.get_all_snapshots(snapshot_ids=[
        snap_id,
    ])[0]
    instance_id = get_snap_instance(snap)
    _device = get_snap_device(snap)
    snaps = conn.get_all_snapshots(owner='self')
    snapshots = [
        snp for snp in snaps if get_snap_instance(snp) == instance_id
        and get_snap_device(snp) != _device and
        abs(get_snap_time(snap) - get_snap_time(snp)) <= timedelta(minutes=10)
    ]
    snapshot = sorted(snapshots, key=get_snap_time,
                      reverse=True) if snapshots else None
    # setup for building an EBS boot snapshot
    default_arch = default_arch or config.get('DEFAULT', 'ARCHITECTURE')
    arch = get_descr_attr(snap, 'Arch') or default_arch
    kernel = config.get(conn.region.name, 'KERNEL' + arch.upper())
    dev = re.match(r'^/dev/sda$', _device)  # if our instance encrypted
    if dev:
        kernel = config.get(conn.region.name, 'KERNEL_ENCR_' + arch.upper())
    ebs = EBSBlockDeviceType()
    ebs.snapshot_id = snap_id
    ebs.delete_on_termination = True
    block_map = BlockDeviceMapping()
    block_map[_device] = ebs
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    block_map['/dev/sdb'] = sdb

    if snapshot:
        for s in snapshot:
            s_dev = get_snap_device(s)
            s_ebs = EBSBlockDeviceType()
            s_ebs.delete_on_termination = True
            s_ebs.snapshot_id = s.id
            block_map[s_dev] = s_ebs

    name = 'Created {0} using access key {1}'.format(timestamp(),
                                                     conn.access_key)
    name = name.replace(":", ".").replace(" ", "_")

    # create the new AMI all options from snap JSON description:
    wait_for(snap, '100%', limit=SNAP_TIME)
    result = conn.register_image(
        name=name,
        description=snap.description,
        architecture=get_descr_attr(snap, 'Arch') or default_arch,
        root_device_name=get_descr_attr(snap, 'Root_dev_name') or root_dev,
        block_device_map=block_map,
        kernel_id=kernel)
    sleep(2)
    image = conn.get_all_images(image_ids=[
        result,
    ])[0]
    wait_for(image, 'available', limit=10 * 60)
    add_tags(image, snap.tags)

    logger.info('The new AMI ID = {0}'.format(result))

    info = ('\nEnter RUN if you want to launch instance using '
            'just created {0}: '.format(image))
    new_instance = None
    if force == 'RUN' or raw_input(info).strip() == 'RUN':
        instance_type = get_descr_attr(snap, 'Type') or default_type
        new_instance = launch_instance_from_ami(
            region,
            image.id,
            inst_type=instance_type,
            security_groups=security_groups,
            zone_name=zone_name)
    return image, new_instance