Example #1
0
def rotate_backups(filename, destination=DEFAULT_DESTINATION, profile="default", **kwargs):
    """Rotate backup using grandfather-father-son rotation scheme.

    :type filename: str
    :param filename: File/directory name.

    :type destination: str
    :param destination: s3|glacier

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :type days: int
    :keyword days: Number of days to keep.

    :type weeks: int
    :keyword weeks: Number of weeks to keep.

    :type months: int
    :keyword months: Number of months to keep.

    :type first_week_day: str
    :keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).

    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    conf = kwargs.get("conf", None)
    storage_backend = _get_store_backend(conf, destination, profile)
    rotate = RotationConfig(conf, profile)
    if not rotate:
        raise Exception("You must run bakthat configure_backups_rotation or provide rotation configuration.")

    deleted = []

    backups = Backups.search(filename, destination, profile=profile)
    backups_date = [datetime.fromtimestamp(float(backup.backup_date)) for backup in backups]

    to_delete = grandfatherson.to_delete(backups_date,
                                         days=int(rotate.conf["days"]),
                                         weeks=int(rotate.conf["weeks"]),
                                         months=int(rotate.conf["months"]),
                                         firstweekday=int(rotate.conf["first_week_day"]),
                                         now=datetime.utcnow())

    for delete_date in to_delete:
        backup_date = int(delete_date.strftime("%s"))
        backup = Backups.search(filename, destination, backup_date=backup_date, profile=profile).get()
        if backup:
            real_key = backup.stored_filename
            log.info("Deleting {0}".format(real_key))

            storage_backend.delete(real_key)
            backup.set_deleted()
            deleted.append(real_key)

    BakSyncer(conf).sync_auto()

    return deleted
Example #2
0
    def apply_retention(self, backups_by_timestamp):
        '''
        Prune backups from the given dict, according to our retention policy
        '''
        # determine the "now" for retention...
        relative_now = datetime.now()
        if self.config['retention_lag']:
            relative_now = relative_now - timedelta(**self.interval_to_units(self.config['interval']))

        # convert our string timestamps into datetime objs, before providing them to grandfatherson
        backup_datetimes = [datetime.strptime(timestamp, self.date_format) for timestamp in backups_by_timestamp.keys()]
        datetimes_to_delete = to_delete(backup_datetimes, firstweekday=SUNDAY, now=relative_now, **self.config['retention'])
        del backup_datetimes[:]

        # convert returned datetime objects back into string timestamps
        timestamps_to_delete = [dt_object.strftime(self.date_format) for dt_object in datetimes_to_delete]
        datetimes_to_delete.clear()

        # remove backups by timestamp
        for timestamp in timestamps_to_delete:
            self.announce('Removing backup %s' % timestamp)
            if self.config['method'].lower() != 'local':
                self.driver.delete_object(backups_by_timestamp[timestamp])
            else:
                os.remove('%s/%s' % (self.config['container'], backups_by_timestamp[timestamp]))
Example #3
0
def filter_delete_filename_list(filename_list, keeps):
    """
    获取需要删除的
    """
    time_to_filename = dict()

    for filename in filename_list:
        slist = filename.split('.')
        # like yb.20140516_114126.tar.gz

        if len(slist) < 2:
            # 说明不是合法的文件
            continue

        str_datetime = slist[1]

        try:
            dt = datetime.datetime.strptime(str_datetime, constants.STRFTIME_TPL)
            time_to_filename[dt] = filename
        except Exception as e:
            logger.error('exc occur. e: %s, filename: %s', e, filename, exc_info=True)

    # 要删除的时间,如果同一天有好多个的话,只保留第一个
    delete_times = to_delete(list(time_to_filename.keys()), **keeps)

    return [time_to_filename[it] for it in delete_times]
Example #4
0
def rotate_backups(filename, destination=DEFAULT_DESTINATION, **kwargs):
    """Rotate backup using grandfather-father-son rotation scheme.

    :type filename: str
    :param filename: File/directory name.

    :type destination: str
    :param destination: s3|glacier

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :type days: int
    :keyword days: Number of days to keep.

    :type weeks: int
    :keyword weeks: Number of weeks to keep.

    :type months: int
    :keyword months: Number of months to keep.

    :type first_week_day: str
    :keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).
        
    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    conf = kwargs.get("conf", None)
    storage_backend = _get_store_backend(conf, destination)
    rotate = RotationConfig(kwargs)
    if not rotate:
        raise Exception("You must run bakthat configure_backups_rotation or provide rotation configuration.")

    deleted = []

    backups = match_filename(filename, destination, conf)
    backups_date = [backup["backup_date"] for backup in backups]

    to_delete = grandfatherson.to_delete(backups_date, days=int(rotate.conf["days"]),
                                                    weeks=int(rotate.conf["weeks"]),
                                                    months=int(rotate.conf["months"]),
                                                    firstweekday=int(rotate.conf["first_week_day"]),
                                                    now=datetime.utcnow())
    
    for key in backups:
        if key.get("backup_date") in to_delete:
            real_key = key.get("key")
            log.info("Deleting {0}".format(real_key))
            storage_backend.delete(real_key)
            deleted.append(real_key)

    return deleted
Example #5
0
    def simulate(self):
        '''
        Simulate when backups would be created, retained, and deleted from one year in the past to now
        '''
        units = self.interval_to_units(self.config['interval'])
        self.announce('Simulating a backup every %s %s' % (units[units.keys()[0]], units.keys()[0]))

        self.announce('Retention policy:')
        for unit in ['hours', 'days', 'weeks', 'months', 'years']:
            if unit in self.config['retention'] and self.config['retention'][unit]:
                self.announce('\t%s %s' % (self.config['retention'][unit], unit))

        end_date = datetime.now()
        start_date = end_date - timedelta(days=365)

        created = dict()
        deleted = dict()
        existing = dict()
        filename = dict()

        for current_date in self.perdelta(start_date, end_date, self.config['interval']):
            # simulate backup creation
            current_date_key = str(current_date)
            created[current_date_key] = current_date
            existing[current_date_key] = current_date
            filename[current_date_key] = '%s-%s.%s' % (self.slug, current_date.strftime(self.date_format), self.backup_format)

            # simulate backup deletions
            relative_now = current_date
            if self.config['retention_lag']:
                relative_now = relative_now - timedelta(**units)
            for datestamp in to_delete(existing.values(), now=relative_now, firstweekday=SUNDAY, **self.config['retention']):
                datestamp_key = str(datestamp)
                if existing.has_key(datestamp_key):
                    del existing[datestamp_key]
                    deleted[datestamp_key] = current_date

        tabulated_results = list()

        for date_key in sorted(created.keys()):
            new_row = list()

            new_row.append(created[date_key])
            new_row.append(deleted[date_key] if deleted.has_key(date_key) else None)
            new_row.append((deleted[date_key] if deleted.has_key(date_key) else end_date) - created[date_key])
            new_row.append(filename[date_key])

            tabulated_results.append(new_row)

        self.announce(tabulate(tabulated_results, headers=["created on", "deleted on", "retained for", "filename"]))
    def rotate(self, filepath, keyname, days, weeks, months):
        """
        Upload an object to the HCP, rotating old files if needed
        """
        objects = self.list()
        del_list = sorted(to_delete(objects.keys(), days=days,
                                    weeks=weeks, months=months,
                                    firstweekday=MONDAY))
        for timestamp in del_list:
            name = objects[timestamp]
            message = "removing old object: {}".format(name)
            logging.debug(message)
            self.delete(name)

        message = "adding new object: {}".format(keyname)
        logging.debug(message)
        self.upload(filepath, keyname)
Example #7
0
File: utils.py Project: ljdog/yunbk
def filter_delete_filename_list(filename_list, keeps):
    """
    获取需要删除的
    """
    time_to_filename = dict()

    for filename in filename_list:
        slist = filename.split('.')
        # like yb.20140516_114126.tar.gz

        str_datetime = slist[1]

        dt = datetime.datetime.strptime(str_datetime, constants.STRFTIME_TPL)

        time_to_filename[dt] = filename

    # 要删除的时间,如果同一天有好多个的话,只保留第一个
    delete_times = to_delete(time_to_filename.keys(), **keeps)

    return [time_to_filename[it] for it in delete_times]
Example #8
0
File: utils.py Project: CDCml/yunbk
def filter_delete_filename_list(filename_list, keeps):
    """
    获取需要删除的
    """
    time_to_filename = dict()

    for filename in filename_list:
        slist = filename.split('.')
        if len(slist) < 2:
            #like yb.20140516_114126.tar
            continue

        str_datetime = slist[-2]

        dt = datetime.datetime.strptime(str_datetime, constants.STRFTIME_TPL)

        time_to_filename[dt] = filename

    # 要删除的时间,如果同一天有好多个的话,只保留第一个
    delete_times = to_delete(time_to_filename.keys(), **keeps)

    return [time_to_filename[it] for it in delete_times]
Example #9
0
def rotate_backups(filename,
                   destination=None,
                   profile="default",
                   config=CONFIG_FILE,
                   **kwargs):
    """Rotate backup using grandfather-father-son rotation scheme.

    :type filename: str
    :param filename: File/directory name.

    :type destination: str
    :param destination: s3|glacier|swift

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :type days: int
    :keyword days: Number of days to keep.

    :type weeks: int
    :keyword weeks: Number of weeks to keep.

    :type months: int
    :keyword months: Number of months to keep.

    :type first_week_day: str
    :keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).

    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    storage_backend, destination, conf = _get_store_backend(
        config, destination, profile)
    rotate = RotationConfig(conf, profile)
    if not rotate:
        raise Exception(
            "You must run bakthat configure_backups_rotation or provide rotation configuration."
        )

    deleted = []

    backups = Backups.search(filename,
                             destination,
                             profile=profile,
                             config=config)
    backups_date = [
        datetime.fromtimestamp(float(backup.backup_date)) for backup in backups
    ]

    rotate_kwargs = rotate.conf.copy()
    del rotate_kwargs["first_week_day"]
    for k, v in rotate_kwargs.iteritems():
        rotate_kwargs[k] = int(v)
    rotate_kwargs["firstweekday"] = int(rotate.conf["first_week_day"])
    rotate_kwargs["now"] = datetime.utcnow()

    to_delete = grandfatherson.to_delete(backups_date, **rotate_kwargs)
    for delete_date in to_delete:
        try:
            backup_date = int(delete_date.strftime("%s"))
            backup = Backups.search(filename,
                                    destination,
                                    backup_date=backup_date,
                                    profile=profile,
                                    config=config).get()

            if backup:
                real_key = backup.stored_filename
                log.info("Deleting {0}".format(real_key))

                storage_backend.delete(real_key)
                backup.set_deleted()
                deleted.append(real_key)
        except Exception, exc:
            log.error("Error when deleting {0}".format(backup))
            log.exception(exc)
Example #10
0
def rotate_backups(filename, destination=None, profile="default", config=CONFIG_FILE, **kwargs):
    """Rotate backup using grandfather-father-son rotation scheme.

    :type filename: str
    :param filename: File/directory name.

    :type destination: str
    :param destination: s3|glacier|swift

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :type days: int
    :keyword days: Number of days to keep.

    :type weeks: int
    :keyword weeks: Number of weeks to keep.

    :type months: int
    :keyword months: Number of months to keep.

    :type first_week_day: str
    :keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).

    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    storage_backend, destination, conf = _get_store_backend(config, destination, profile)
    rotate = RotationConfig(conf, profile)
    if not rotate:
        raise Exception("You must run bakthat configure_backups_rotation or provide rotation configuration.")

    session_id = str(uuid.uuid4())
    events.before_rotate_backups(session_id)

    deleted = []

    backups = Backups.search(filename, destination, profile=profile, config=config)
    backups_date = [datetime.fromtimestamp(float(backup.backup_date)) for backup in backups]

    rotate_kwargs = rotate.conf.copy()
    del rotate_kwargs["first_week_day"]
    for k, v in rotate_kwargs.iteritems():
        rotate_kwargs[k] = int(v)
    rotate_kwargs["firstweekday"] = int(rotate.conf["first_week_day"])
    rotate_kwargs["now"] = datetime.utcnow()

    to_delete = grandfatherson.to_delete(backups_date, **rotate_kwargs)
    for delete_date in to_delete:
        try:
            backup_date = int(delete_date.strftime("%s"))
            backup = Backups.search(filename, destination, backup_date=backup_date, profile=profile, config=config).get()

            if backup:
                real_key = backup.stored_filename
                log.info("Deleting {0}".format(real_key))

                storage_backend.delete(real_key)
                backup.set_deleted()
                deleted.append(backup)
        except Exception, exc:
            log.error("Error when deleting {0}".format(backup))
            log.exception(exc)
Example #11
0
def rotate_backups(filename, destination=DEFAULT_DESTINATION, profile="default", **kwargs):
    """Rotate backup using grandfather-father-son rotation scheme.

    :type filename: str
    :param filename: File/directory name.

    :type destination: str
    :param destination: s3|glacier

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :type days: int
    :keyword days: Number of days to keep.

    :type weeks: int
    :keyword weeks: Number of weeks to keep.

    :type months: int
    :keyword months: Number of months to keep.

    :type first_week_day: str
    :keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).
        
    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    conf = kwargs.get("conf", None)
    storage_backend = _get_store_backend(conf, destination, profile)
    rotate = RotationConfig(conf, profile)
    if not rotate:
        raise Exception("You must run bakthat configure_backups_rotation or provide rotation configuration.")

    deleted = []

    query = "SELECT backup_date FROM backups WHERE backend == '{0}' \
            AND filename LIKE '{1}%' AND is_deleted == 0".format(
        destination, filename
    )

    backups = dump_truck.execute(query)
    backups_date = [datetime.fromtimestamp(float(backup["backup_date"])) for backup in backups]

    to_delete = grandfatherson.to_delete(
        backups_date,
        days=int(rotate.conf["days"]),
        weeks=int(rotate.conf["weeks"]),
        months=int(rotate.conf["months"]),
        firstweekday=int(rotate.conf["first_week_day"]),
        now=datetime.utcnow(),
    )

    for delete_date in to_delete:
        backup_date = int(delete_date.strftime("%s"))
        query = "SELECT stored_filename FROM backups WHERE backend == '{0}' \
                AND filename LIKE '{1}%' AND backup_date == {2:d} \
                AND is_deleted == 0".format(
            destination, filename, backup_date
        )
        backups = dump_truck.execute(query)
        if backups:
            real_key = backups[0].get("stored_filename")
            log.info("Deleting {0}".format(real_key))
            storage_backend.delete(real_key)
            dump_truck_delete_backup(real_key)
            deleted.append(real_key)

    return deleted
Example #12
0
def filter_delete_filename_list(filename_list, keeps):
    """
    获取需要删除的
    """
    time_to_filename = dict()

    for filename in filename_list:
        slist = filename.split('.')
        # like yb.20140516_114126.tar.gz

        if len(slist) < 2:
            # 说明不是合法的文件
            continue

        str_datetime = slist[1]

        try:
            dt = datetime.datetime.strptime(str_datetime,
                                            constants.STRFTIME_TPL)
            time_to_filename[dt] = filename
        except Exception, e:
            logger.error('exc occur. e: %s, filename: %s',
                         e,
                         filename,
                         exc_info=True)

    # 要删除的时间,如果同一天有好多个的话,只保留第一个
    delete_times = to_delete(time_to_filename.keys(), **keeps)

    return [time_to_filename[it] for it in delete_times]