Esempio n. 1
0
def _trim_snapshots(region, dry_run=False):

    """Delete snapshots back in time in logarithmic manner.

    dry_run
        just print snapshot to be deleted.

    Modified version of the `boto.ec2.connection.trim_snapshots
    <http://pypi.python.org/pypi/boto/2.0>_`. Licensed under MIT license
    by Mitch Garnaat, 2011."""
    hourly_backups = config.getint('purge_backups', 'HOURLY_BACKUPS')
    daily_backups = config.getint('purge_backups', 'DAILY_BACKUPS')
    weekly_backups = config.getint('purge_backups', 'WEEKLY_BACKUPS')
    monthly_backups = config.getint('purge_backups', 'MONTHLY_BACKUPS')
    quarterly_backups = config.getint('purge_backups', 'QUARTERLY_BACKUPS')
    yearly_backups = config.getint('purge_backups', 'YEARLY_BACKUPS')

    # work with UTC time, which is what the snapshot start time is reported in
    now = datetime.utcnow()
    last_hour = datetime(now.year, now.month, now.day, now.hour)
    last_midnight = datetime(now.year, now.month, now.day)
    last_sunday = datetime(now.year, now.month,
          now.day) - timedelta(days=(now.weekday() + 1) % 7)
    last_month = datetime.now() - relativedelta(months=1)
    last_year = datetime.now() - relativedelta(years=1)
    other_years = datetime.now() - relativedelta(years=2)
    start_of_month = datetime(now.year, now.month, 1)

    target_backup_times = []
    # there are no snapshots older than 1/1/2000
    oldest_snapshot_date = datetime(2000, 1, 1)

    for hour in range(0, hourly_backups):
        target_backup_times.append(last_hour - timedelta(hours=hour))

    for day in range(0, daily_backups):
        target_backup_times.append(last_midnight - timedelta(days=day))

    for week in range(0, weekly_backups):
        target_backup_times.append(last_sunday - timedelta(weeks=week))

    for month in range(0, monthly_backups):
        target_backup_times.append(last_month - relativedelta(months=month))

    for quart in range(0, quarterly_backups):
        target_backup_times.append(last_year - relativedelta(months=4 * quart))

    for year in range(0, yearly_backups):
        target_backup_times.append(other_years - relativedelta(years=year))

    one_day = timedelta(days=1)
    while start_of_month > oldest_snapshot_date:
        # append the start of the month to the list of snapshot dates to save:
        target_backup_times.append(start_of_month)
        # there's no timedelta setting for one month, so instead:
        # decrement the day by one,
        #so we go to the final day of the previous month...
        start_of_month -= one_day
        # ... and then go to the first day of that previous month:
        start_of_month = datetime(start_of_month.year,
                               start_of_month.month, 1)

    temp = []

    for t in target_backup_times:
        if temp.__contains__(t) == False:
            temp.append(t)

    target_backup_times = temp
    target_backup_times.reverse()  # make the oldest date first

    # get all the snapshots, sort them by date and time,
    #and organize them into one array for each volume:
    conn = get_region_conn(region.name)
    all_snapshots = conn.get_all_snapshots(owner='self')
    # oldest first
    all_snapshots.sort(cmp=lambda x, y: cmp(x.start_time, y.start_time))

    snaps_for_each_volume = {}
    for snap in all_snapshots:
        # the snapshot name and the volume name are the same.
        # The snapshot name is set from the volume
        # name at the time the snapshot is taken
        volume_name = get_snap_vol(snap)

        if volume_name:
            # only examine snapshots that have a volume name
            snaps_for_volume = snaps_for_each_volume.get(volume_name)

            if not snaps_for_volume:
                snaps_for_volume = []
                snaps_for_each_volume[volume_name] = snaps_for_volume
            snaps_for_volume.append(snap)

    # Do a running comparison of snapshot dates to desired time periods,
    # keeping the oldest snapshot in each
    # time period and deleting the rest:
    for volume_name in snaps_for_each_volume:
        snaps = snaps_for_each_volume[volume_name]
        snaps = snaps[:-1]
        # never delete the newest snapshot, so remove it from consideration

        time_period_num = 0
        snap_found_for_this_time_period = False
        for snap in snaps:
            check_this_snap = True

            while (check_this_snap and
                   time_period_num < target_backup_times.__len__()):

                if get_snap_time(snap) < target_backup_times[time_period_num]:
                    # the snap date is before the cutoff date.
                    # Figure out if it's the first snap in this
                    # date range and act accordingly
                    #(since both date the date ranges and the snapshots
                    # are sorted chronologically, we know this
                    #snapshot isn't in an earlier date range):
                    if snap_found_for_this_time_period:
                        if not snap.tags.get('preserve_snapshot'):
                            if dry_run:
                                logger.info('Dry-trimmed {0} {1} from {2}'
                                    .format(snap, snap.description,
                                    snap.start_time))
                            else:
                                # as long as the snapshot wasn't marked with
                                # the 'preserve_snapshot' tag, delete it:
                                try:
                                    conn.delete_snapshot(snap.id)
                                except EC2ResponseError as err:
                                    logger.exception(str(err))
                                else:
                                    logger.info('Trimmed {0} {1} from {2}'
                                        .format(snap, snap.description,
                                        snap.start_time))
                       # go on and look at the next snapshot,
                       # leaving the time period alone
                    else:
                        # this was the first snapshot found for this time
                        # period. Leave it alone and look at the next snapshot:
                        snap_found_for_this_time_period = True
                    check_this_snap = False
                else:
                    # the snap is after the cutoff date.
                    # Check it against the next cutoff date
                    time_period_num += 1
                    snap_found_for_this_time_period = False
Esempio n. 2
0
def _trim_snapshots(region, dry_run=False):

    """Delete snapshots back in time in logarithmic manner.

    dry_run
        just print snapshot to be deleted.

    Modified version of the `boto.ec2.connection.trim_snapshots
    <http://pypi.python.org/pypi/boto/2.0>_`. Licensed under MIT license
    by Mitch Garnaat, 2011."""
    hourly_backups = config.getint('purge_backups', 'HOURLY_BACKUPS')
    daily_backups = config.getint('purge_backups', 'DAILY_BACKUPS')
    weekly_backups = config.getint('purge_backups', 'WEEKLY_BACKUPS')
    monthly_backups = config.getint('purge_backups', 'MONTHLY_BACKUPS')
    quarterly_backups = config.getint('purge_backups', 'QUARTERLY_BACKUPS')
    yearly_backups = config.getint('purge_backups', 'YEARLY_BACKUPS')

    # work with UTC time, which is what the snapshot start time is reported in
    now = datetime.utcnow()
    last_hour = datetime(now.year, now.month, now.day, now.hour)
    last_midnight = datetime(now.year, now.month, now.day)
    last_sunday = datetime(now.year, now.month,
          now.day) - timedelta(days=(now.weekday() + 1) % 7)
    last_month = datetime.now() - relativedelta(months=1)
    last_year = datetime.now() - relativedelta(years=1)
    other_years = datetime.now() - relativedelta(years=2)
    start_of_month = datetime(now.year, now.month, 1)

    target_backup_times = []
    # there are no snapshots older than 1/1/2000
    oldest_snapshot_date = datetime(2000, 1, 1)

    for hour in range(0, hourly_backups):
        target_backup_times.append(last_hour - timedelta(hours=hour))

    for day in range(0, daily_backups):
        target_backup_times.append(last_midnight - timedelta(days=day))

    for week in range(0, weekly_backups):
        target_backup_times.append(last_sunday - timedelta(weeks=week))

    for month in range(0, monthly_backups):
        target_backup_times.append(last_month - relativedelta(months=month))

    for quart in range(0, quarterly_backups):
        target_backup_times.append(last_year - relativedelta(months=4 * quart))

    for year in range(0, yearly_backups):
        target_backup_times.append(other_years - relativedelta(years=year))

    one_day = timedelta(days=1)
    while start_of_month > oldest_snapshot_date:
        # append the start of the month to the list of snapshot dates to save:
        target_backup_times.append(start_of_month)
        # there's no timedelta setting for one month, so instead:
        # decrement the day by one,
        #so we go to the final day of the previous month...
        start_of_month -= one_day
        # ... and then go to the first day of that previous month:
        start_of_month = datetime(start_of_month.year,
                               start_of_month.month, 1)

    temp = []

    for t in target_backup_times:
        if temp.__contains__(t) == False:
            temp.append(t)

    target_backup_times = temp
    target_backup_times.reverse()  # make the oldest date first

    # get all the snapshots, sort them by date and time,
    #and organize them into one array for each volume:
    conn = get_region_conn(region.name)
    all_snapshots = conn.get_all_snapshots(owner='self')
    # oldest first
    all_snapshots.sort(cmp=lambda x, y: cmp(x.start_time, y.start_time))

    snaps_for_each_volume = {}
    for snap in all_snapshots:
        # the snapshot name and the volume name are the same.
        # The snapshot name is set from the volume
        # name at the time the snapshot is taken
        volume_name = get_snap_vol(snap)

        if volume_name:
            # only examine snapshots that have a volume name
            snaps_for_volume = snaps_for_each_volume.get(volume_name)

            if not snaps_for_volume:
                snaps_for_volume = []
                snaps_for_each_volume[volume_name] = snaps_for_volume
            snaps_for_volume.append(snap)

    # Do a running comparison of snapshot dates to desired time periods,
    # keeping the oldest snapshot in each
    # time period and deleting the rest:
    for volume_name in snaps_for_each_volume:
        snaps = snaps_for_each_volume[volume_name]
        snaps = snaps[:-1]
        # never delete the newest snapshot, so remove it from consideration

        time_period_num = 0
        snap_found_for_this_time_period = False
        for snap in snaps:
            check_this_snap = True

            while (check_this_snap and
                   time_period_num < target_backup_times.__len__()):

                if get_snap_time(snap) < target_backup_times[time_period_num]:
                    # the snap date is before the cutoff date.
                    # Figure out if it's the first snap in this
                    # date range and act accordingly
                    #(since both date the date ranges and the snapshots
                    # are sorted chronologically, we know this
                    #snapshot isn't in an earlier date range):
                    if snap_found_for_this_time_period:
                        if not snap.tags.get('preserve_snapshot'):
                            if dry_run:
                                logger.info('Dry-trimmed {0} {1} from {2}'
                                    .format(snap, snap.description,
                                    snap.start_time))
                            else:
                                # as long as the snapshot wasn't marked with
                                # the 'preserve_snapshot' tag, delete it:
                                try:
                                    conn.delete_snapshot(snap.id)
                                except EC2ResponseError as err:
                                    logger.exception(str(err))
                                else:
                                    logger.info('Trimmed {0} {1} from {2}'
                                        .format(snap, snap.description,
                                        snap.start_time))
                       # go on and look at the next snapshot,
                       # leaving the time period alone
                    else:
                        # this was the first snapshot found for this time
                        # period. Leave it alone and look at the next snapshot:
                        snap_found_for_this_time_period = True
                    check_this_snap = False
                else:
                    # the snap is after the cutoff date.
                    # Check it against the next cutoff date
                    time_period_num += 1
                    snap_found_for_this_time_period = False
Esempio n. 3
0
    get_descr_attr, get_inst_by_id, get_region_conn, get_snap_device,
    get_snap_time, get_snap_vol, timestamp, wait_for, wait_for_sudo)


USERNAME = config.get('DEFAULT', 'USERNAME')
env.update({'user': USERNAME, 'disable_known_hosts': True})

logger = logging.getLogger(__name__)


DEFAULT_TAG_NAME = config.get('DEFAULT', 'TAG_NAME')
DEFAULT_TAG_VALUE = config.get('DEFAULT', 'TAG_VALUE')
DESCRIPTION_TAG = 'Description'
SNAP_STATUSES = ['pending', 'completed']    # All but "error".
VOL_STATUSES = ['creating', 'available', 'in-use']
DETACH_TIME = config.getint('DEFAULT', 'MINUTES_FOR_DETACH') * 60
SNAP_TIME = config.getint('DEFAULT', 'MINUTES_FOR_SNAP') * 60
REPLICATION_SPEED = config.getfloat('DEFAULT', 'REPLICATION_SPEED')


class ReplicationCollisionError(Exception):
    pass


def create_snapshot(vol, description='', tags=None, synchronously=True,
                    consistent=False):
    """Return new snapshot for the volume.

    vol
        volume to snapshot;
    synchronously
Esempio n. 4
0
    get_descr_attr, get_inst_by_id, get_region_conn, get_snap_device,
    get_snap_time, get_snap_vol, timestamp, wait_for, wait_for_sudo)


USERNAME = config.get('DEFAULT', 'USERNAME')
env.update({'user': USERNAME, 'disable_known_hosts': True})

logger = logging.getLogger(__name__)


DEFAULT_TAG_NAME = config.get('DEFAULT', 'TAG_NAME')
DEFAULT_TAG_VALUE = config.get('DEFAULT', 'TAG_VALUE')
DESCRIPTION_TAG = 'Description'
SNAP_STATUSES = ['pending', 'completed']    # All but "error".
VOL_STATUSES = ['creating', 'available', 'in-use']
DETACH_TIME = config.getint('DEFAULT', 'MINUTES_FOR_DETACH') * 60
SNAP_TIME = config.getint('DEFAULT', 'MINUTES_FOR_SNAP') * 60
REPLICATION_SPEED = config.getfloat('DEFAULT', 'REPLICATION_SPEED')


class ReplicationCollisionError(Exception):
    pass


def create_snapshot(vol, description='', tags=None, synchronously=True,
                    consistent=False):
    """Return new snapshot for the volume.

    vol
        volume to snapshot;
    synchronously