Beispiel #1
0
def rotate_backups(filename, destination=DEFAULT_DESTINATION, profile="default", **kwargs):
    """Rotate backup using grandfather-father-son rotation scheme.

    :type filename: str
    :param filename: File/directory name.

    :type destination: str
    :param destination: s3|glacier

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :type days: int
    :keyword days: Number of days to keep.

    :type weeks: int
    :keyword weeks: Number of weeks to keep.

    :type months: int
    :keyword months: Number of months to keep.

    :type first_week_day: str
    :keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).

    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    conf = kwargs.get("conf", None)
    storage_backend = _get_store_backend(conf, destination, profile)
    rotate = RotationConfig(conf, profile)
    if not rotate:
        raise Exception("You must run bakthat configure_backups_rotation or provide rotation configuration.")

    deleted = []

    backups = Backups.search(filename, destination, profile=profile)
    backups_date = [datetime.fromtimestamp(float(backup.backup_date)) for backup in backups]

    to_delete = grandfatherson.to_delete(backups_date,
                                         days=int(rotate.conf["days"]),
                                         weeks=int(rotate.conf["weeks"]),
                                         months=int(rotate.conf["months"]),
                                         firstweekday=int(rotate.conf["first_week_day"]),
                                         now=datetime.utcnow())

    for delete_date in to_delete:
        backup_date = int(delete_date.strftime("%s"))
        backup = Backups.search(filename, destination, backup_date=backup_date, profile=profile).get()
        if backup:
            real_key = backup.stored_filename
            log.info("Deleting {0}".format(real_key))

            storage_backend.delete(real_key)
            backup.set_deleted()
            deleted.append(real_key)

    BakSyncer(conf).sync_auto()

    return deleted
Beispiel #2
0
def show(query="",
         destination="",
         tags="",
         profile="default",
         config=CONFIG_FILE):
    backups = Backups.search(query,
                             destination,
                             profile=profile,
                             tags=tags,
                             config=config)
    _display_backups(backups)
Beispiel #3
0
def delete_older_than(filename,
                      interval,
                      profile="default",
                      config=CONFIG_FILE,
                      destination=None,
                      **kwargs):
    """Delete backups matching the given filename older than the given interval string.

    :type filename: str
    :param filename: File/directory name.

    :type interval: str
    :param interval: Interval string like 1M, 1W, 1M3W4h2s...
        (s => seconds, m => minutes, h => hours, D => days, W => weeks, M => months, Y => Years).

    :type destination: str
    :param destination: glacier|s3|swift

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    storage_backend, destination, conf = _get_store_backend(
        config, destination, profile)

    session_id = str(uuid.uuid4())
    events.before_delete_older_than(session_id)

    interval_seconds = _interval_string_to_seconds(interval)

    deleted = []

    backup_date_filter = int(
        datetime.utcnow().strftime("%s")) - interval_seconds
    for backup in Backups.search(filename,
                                 destination,
                                 older_than=backup_date_filter,
                                 profile=profile,
                                 config=config):
        real_key = backup.stored_filename
        log.info("Deleting {0}".format(real_key))

        storage_backend.delete(real_key)
        backup.set_deleted()
        deleted.append(backup)

    events.on_delete_older_than(session_id, deleted)

    return deleted
Beispiel #4
0
    def sync(self):
        """Draft for implementing bakthat clients (hosts) backups data synchronization.

        Synchronize Bakthat sqlite database via a HTTP POST request.

        Backups are never really deleted from sqlite database, we just update the is_deleted key.

        It sends the last server sync timestamp along with data updated since last sync.
        Then the server return backups that have been updated on the server since last sync.

        On both sides, backups are either created if they don't exists or updated if the incoming version is newer.
        """
        log.debug("Start syncing")

        self.register()

        last_sync_ts = Config.get_key("sync_ts", 0)
        to_insert_in_mongo = [b._data for b in Backups.search(last_updated_gt=last_sync_ts)]
        data = dict(sync_ts=last_sync_ts, to_insert_in_mongo=to_insert_in_mongo)
        r_kwargs = self.request_kwargs.copy()
        log.debug("Initial payload: {0}".format(data))
        r_kwargs.update({"data": json.dumps(data)})
        r = requests.post(self.get_resource("backups/sync/status"), **r_kwargs)
        if r.status_code != 200:
            log.error("An error occured during sync: {0}".format(r.text))
            return

        log.debug("Sync result: {0}".format(r.json()))
        to_insert_in_bakthat = r.json().get("to_insert_in_bakthat")
        sync_ts = r.json().get("sync_ts")
        for newbackup in to_insert_in_bakthat:
            sqlite_backup = Backups.match_filename(newbackup["stored_filename"], newbackup["backend"])
            if sqlite_backup and newbackup["last_updated"] > sqlite_backup.last_updated:
                    log.debug("Upsert {0}".format(newbackup))
                    Backups.upsert(**newbackup)
            elif not sqlite_backup:
                log.debug("Create backup {0}".format(newbackup))
                Backups.create(**newbackup)

        Config.set_key("sync_ts", sync_ts)

        log.debug("Sync succcesful")
Beispiel #5
0
def delete_older_than(filename, interval, profile="default", config=CONFIG_FILE, destination=None, **kwargs):
    """Delete backups matching the given filename older than the given interval string.

    :type filename: str
    :param filename: File/directory name.

    :type interval: str
    :param interval: Interval string like 1M, 1W, 1M3W4h2s...
        (s => seconds, m => minutes, h => hours, D => days, W => weeks, M => months, Y => Years).

    :type destination: str
    :param destination: glacier|s3|swift

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    storage_backend, destination, conf = _get_store_backend(config, destination, profile)

    session_id = str(uuid.uuid4())
    events.before_delete_older_than(session_id)

    interval_seconds = _interval_string_to_seconds(interval)

    deleted = []

    backup_date_filter = int(datetime.utcnow().strftime("%s")) - interval_seconds
    for backup in Backups.search(filename, destination, older_than=backup_date_filter, profile=profile, config=config):
        real_key = backup.stored_filename
        log.info("Deleting {0}".format(real_key))

        storage_backend.delete(real_key)
        backup.set_deleted()
        deleted.append(backup)

    events.on_delete_older_than(session_id, deleted)

    return deleted
Beispiel #6
0
    def sync(self):
        """Draft for implementing bakthat clients (hosts) backups data synchronization.

        Synchronize Bakthat sqlite database via a HTTP POST request.

        Backups are never really deleted from sqlite database, we just update the is_deleted key.

        It sends the last server sync timestamp along with data updated since last sync.
        Then the server return backups that have been updated on the server since last sync.

        On both sides, backups are either created if they don't exists or updated if the incoming version is newer.
        """
        log.debug("Start syncing")

        self.register()

        last_sync_ts = Config.get_key("sync_ts", 0)
        to_insert_in_mongo = [
            b._data for b in Backups.search(last_updated_gt=last_sync_ts)
        ]
        data = dict(sync_ts=last_sync_ts, new=to_insert_in_mongo)
        r_kwargs = self.request_kwargs.copy()
        log.debug("Initial payload: {0}".format(data))
        r_kwargs.update({"data": json.dumps(data)})
        r = requests.post(self.get_resource("backups/sync"), **r_kwargs)
        if r.status_code != 200:
            log.error("An error occured during sync: {0}".format(r.text))
            return

        log.debug("Sync result: {0}".format(r.json()))
        to_insert_in_bakthat = r.json().get("updated", [])
        sync_ts = r.json().get("sync_ts")
        for newbackup in to_insert_in_bakthat:
            log.debug("Upsert {0}".format(newbackup))
            Backups.upsert(**newbackup)

        Config.set_key("sync_ts", sync_ts)

        log.debug("Sync succcesful")
Beispiel #7
0
    def sync(self):
        """Draft for implementing bakthat clients (hosts) backups data synchronization.

        Synchronize Bakthat sqlite database via a HTTP POST request.

        Backups are never really deleted from sqlite database, we just update the is_deleted key.

        It sends the last server sync timestamp along with data updated since last sync.
        Then the server return backups that have been updated on the server since last sync.

        Both side (bakthat and the sync server) make upserts of the latest data avaible:
        - if it doesn't exist yet, it will be created.
        - if it has been modified (e.g deleted, since it's the only action we can take) we update it.
        """
        log.debug("Start syncing")

        self.register()

        last_sync_ts = Config.get_key("sync_ts", 0)
        to_insert_in_mongo = [b._data for b in Backups.search(last_updated_gt=last_sync_ts)]
        data = dict(sync_ts=last_sync_ts, to_insert_in_mongo=to_insert_in_mongo)
        r_kwargs = self.request_kwargs.copy()
        log.debug("Initial payload: {0}".format(data))
        r_kwargs.update({"data": json.dumps(data)})
        r = requests.post(self.get_resource("backups/sync/status"), **r_kwargs)
        if r.status_code != 200:
            log.error("An error occured during sync: {0}".format(r.text))
            return

        log.debug("Sync result: {0}".format(r.json()))
        to_insert_in_bakthat = r.json().get("to_insert_in_bakthat")
        sync_ts = r.json().get("sync_ts")
        for newbackup in to_insert_in_bakthat:
            log.debug("Upsert {0}".format(newbackup))
            Backups.upsert(**newbackup)

        Config.set_key("sync_ts", sync_ts)

        log.debug("Sync succcesful")
Beispiel #8
0
def delete_older_than(filename, interval, destination=DEFAULT_DESTINATION, profile="default", **kwargs):
    """Delete backups matching the given filename older than the given interval string.

    :type filename: str
    :param filename: File/directory name.

    :type interval: str
    :param interval: Interval string like 1M, 1W, 1M3W4h2s...
        (s => seconds, m => minutes, h => hours, D => days, W => weeks, M => months, Y => Years).

    :type destination: str
    :param destination: glacier|s3

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    conf = kwargs.get("conf")
    storage_backend = _get_store_backend(conf, destination, profile)
    interval_seconds = _interval_string_to_seconds(interval)

    deleted = []

    backup_date_filter = int(datetime.utcnow().strftime("%s")) - interval_seconds
    for backup in Backups.search(filename, destination, older_than=backup_date_filter, profile=profile):
        real_key = backup.stored_filename
        log.info("Deleting {0}".format(real_key))

        storage_backend.delete(real_key)
        backup.set_deleted()
        deleted.append(real_key)

    BakSyncer(conf).sync_auto()

    return deleted
Beispiel #9
0
def rotate_backups(filename,
                   destination=None,
                   profile="default",
                   config=CONFIG_FILE,
                   **kwargs):
    """Rotate backup using grandfather-father-son rotation scheme.

    :type filename: str
    :param filename: File/directory name.

    :type destination: str
    :param destination: s3|glacier|swift

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :type days: int
    :keyword days: Number of days to keep.

    :type weeks: int
    :keyword weeks: Number of weeks to keep.

    :type months: int
    :keyword months: Number of months to keep.

    :type first_week_day: str
    :keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).

    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    storage_backend, destination, conf = _get_store_backend(
        config, destination, profile)
    rotate = RotationConfig(conf, profile)
    if not rotate:
        raise Exception(
            "You must run bakthat configure_backups_rotation or provide rotation configuration."
        )

    deleted = []

    backups = Backups.search(filename,
                             destination,
                             profile=profile,
                             config=config)
    backups_date = [
        datetime.fromtimestamp(float(backup.backup_date)) for backup in backups
    ]

    rotate_kwargs = rotate.conf.copy()
    del rotate_kwargs["first_week_day"]
    for k, v in rotate_kwargs.iteritems():
        rotate_kwargs[k] = int(v)
    rotate_kwargs["firstweekday"] = int(rotate.conf["first_week_day"])
    rotate_kwargs["now"] = datetime.utcnow()

    to_delete = grandfatherson.to_delete(backups_date, **rotate_kwargs)
    for delete_date in to_delete:
        try:
            backup_date = int(delete_date.strftime("%s"))
            backup = Backups.search(filename,
                                    destination,
                                    backup_date=backup_date,
                                    profile=profile,
                                    config=config).get()

            if backup:
                real_key = backup.stored_filename
                log.info("Deleting {0}".format(real_key))

                storage_backend.delete(real_key)
                backup.set_deleted()
                deleted.append(real_key)
        except Exception, exc:
            log.error("Error when deleting {0}".format(backup))
            log.exception(exc)
Beispiel #10
0
def show(query="", destination="", tags="", profile="default", config=CONFIG_FILE):
    backups = Backups.search(query, destination, profile=profile, tags=tags, config=config)
    _display_backups(backups)
Beispiel #11
0
def rotate_backups(filename, destination=None, profile="default", config=CONFIG_FILE, **kwargs):
    """Rotate backup using grandfather-father-son rotation scheme.

    :type filename: str
    :param filename: File/directory name.

    :type destination: str
    :param destination: s3|glacier|swift

    :type conf: dict
    :keyword conf: Override/set AWS configuration.

    :type days: int
    :keyword days: Number of days to keep.

    :type weeks: int
    :keyword weeks: Number of weeks to keep.

    :type months: int
    :keyword months: Number of months to keep.

    :type first_week_day: str
    :keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).

    :rtype: list
    :return: A list containing the deleted keys (S3) or archives (Glacier).

    """
    storage_backend, destination, conf = _get_store_backend(config, destination, profile)
    rotate = RotationConfig(conf, profile)
    if not rotate:
        raise Exception("You must run bakthat configure_backups_rotation or provide rotation configuration.")

    session_id = str(uuid.uuid4())
    events.before_rotate_backups(session_id)

    deleted = []

    backups = Backups.search(filename, destination, profile=profile, config=config)
    backups_date = [datetime.fromtimestamp(float(backup.backup_date)) for backup in backups]

    rotate_kwargs = rotate.conf.copy()
    del rotate_kwargs["first_week_day"]
    for k, v in rotate_kwargs.iteritems():
        rotate_kwargs[k] = int(v)
    rotate_kwargs["firstweekday"] = int(rotate.conf["first_week_day"])
    rotate_kwargs["now"] = datetime.utcnow()

    to_delete = grandfatherson.to_delete(backups_date, **rotate_kwargs)
    for delete_date in to_delete:
        try:
            backup_date = int(delete_date.strftime("%s"))
            backup = Backups.search(filename, destination, backup_date=backup_date, profile=profile, config=config).get()

            if backup:
                real_key = backup.stored_filename
                log.info("Deleting {0}".format(real_key))

                storage_backend.delete(real_key)
                backup.set_deleted()
                deleted.append(backup)
        except Exception, exc:
            log.error("Error when deleting {0}".format(backup))
            log.exception(exc)
Beispiel #12
0
def show(query="", destination="", tags="", profile="default", help="Profile, blank to show all"):
    backups = Backups.search(query, destination, profile=profile, tags=tags)
    _display_backups(backups)