Пример #1
0
 def doCleanUpBackups(self, *args, **kwargs):
     # here we check all backups we have and remove the old one
     # user can set how many versions of that file or folder to keep
     # other versions (older) will be removed here
     from storage import backup_rebuilder
     try:
         self.backups_progress_last_iteration = len(
             backup_rebuilder.A().backupsWasRebuilt)
     except:
         self.backups_progress_last_iteration = 0
     versionsToKeep = settings.getBackupsMaxCopies()
     if not contactsdb.num_suppliers():
         bytesUsed = 0
     else:
         bytesUsed = backup_fs.sizebackups() / contactsdb.num_suppliers()
     bytesNeeded = diskspace.GetBytesFromString(settings.getNeededString(),
                                                0)
     customerGlobID = my_id.getGlobalID()
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_monitor.doCleanUpBackups backupsToKeep=%d used=%d needed=%d'
             % (versionsToKeep, bytesUsed, bytesNeeded))
     delete_count = 0
     if versionsToKeep > 0:
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             pathID = global_id.CanonicalID(pathID)
             if backup_control.IsPathInProcess(pathID):
                 continue
             versions = itemInfo.list_versions()
             # TODO: do we need to sort the list? it comes from a set, so must be sorted may be
             while len(versions) > versionsToKeep:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  versions.pop(0))
                 if _Debug:
                     lg.out(
                         _DebugLevel,
                         'backup_monitor.doCleanUpBackups %d of %d backups for %s, so remove older %s'
                         % (len(versions), versionsToKeep, localPath,
                            backupID))
                 backup_control.DeleteBackup(backupID,
                                             saveDB=False,
                                             calculate=False)
                 delete_count += 1
     # we need also to fit used space into needed space (given from other users)
     # they trust us - do not need to take extra space from our friends
     # so remove oldest backups, but keep at least one for every folder - at least locally!
     # still our suppliers will remove our "extra" files by their "local_tester"
     if bytesNeeded <= bytesUsed:
         sizeOk = False
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             if sizeOk:
                 break
             pathID = global_id.CanonicalID(pathID)
             versions = itemInfo.list_versions(True, False)
             if len(versions) <= 1:
                 continue
             for version in versions[1:]:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  version)
                 versionInfo = itemInfo.get_version_info(version)
                 if versionInfo[1] > 0:
                     if _Debug:
                         lg.out(
                             _DebugLevel,
                             'backup_monitor.doCleanUpBackups over use %d of %d, so remove %s of %s'
                             %
                             (bytesUsed, bytesNeeded, backupID, localPath))
                     backup_control.DeleteBackup(backupID,
                                                 saveDB=False,
                                                 calculate=False)
                     delete_count += 1
                     bytesUsed -= versionInfo[1]
                     if bytesNeeded > bytesUsed:
                         sizeOk = True
                         break
     if delete_count > 0:
         backup_fs.Scan()
         backup_fs.Calculate()
         backup_control.Save()
         from main import control
         control.request_update()
     collected = gc.collect()
     if self.backups_progress_last_iteration > 0:
         if _Debug:
             lg.out(
                 _DebugLevel,
                 'backup_monitor.doCleanUpBackups  sending "restart", backups_progress_last_iteration=%s'
                 % self.backups_progress_last_iteration)
         reactor.callLater(1, self.automat, 'restart')  # @UndefinedVariable
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_monitor.doCleanUpBackups collected %d objects' %
             collected)
Пример #2
0
def do_backup_key(key_id, keys_folder=None, wait_result=False):
    """
    Send given key to my suppliers to store it remotely.
    This will make a regular backup copy of that key file - encrypted with my master key.
    """
    if _Debug:
        lg.out(_DebugLevel, 'key_ring.do_backup_key     key_id=%r' % key_id)
    if key_id == my_id.getGlobalID(key_alias='master') or key_id == 'master':
        lg.err('master key must never leave local host')
        if wait_result:
            return fail(Exception('master key must never leave local host'))
        return False
    if not my_keys.is_key_registered(key_id):
        lg.err('unknown key: "%s"' % key_id)
        if wait_result:
            return fail(Exception('unknown key: "%s"' % key_id))
        return False
    if not keys_folder:
        keys_folder = settings.KeyStoreDir()
    if my_keys.is_key_private(key_id):
        local_key_filepath = os.path.join(keys_folder, '%s.private' % key_id)
        remote_path_for_key = '.keys/%s.private' % key_id
    else:
        local_key_filepath = os.path.join(keys_folder, '%s.public' % key_id)
        remote_path_for_key = '.keys/%s.public' % key_id
    global_key_path = global_id.MakeGlobalID(
        key_alias='master', customer=my_id.getGlobalID(), path=remote_path_for_key)
    res = api.file_exists(global_key_path)
    if res['status'] == 'OK' and res['result'] and res['result'].get('exist'):
        lg.warn('key %s already exists in catalog' % global_key_path)
        global_key_path_id = res['result'].get('path_id')
        if global_key_path_id and backup_control.IsPathInProcess(global_key_path_id):
            lg.warn('skip, another backup for key already started: %s' % global_key_path_id)
            if not wait_result:
                return True
            backup_id_list = backup_control.FindRunningBackup(global_key_path_id)
            if backup_id_list:
                backup_id = backup_id_list[0]
                backup_job = backup_control.GetRunningBackupObject(backup_id)
                if backup_job:
                    backup_result = Deferred()
                    backup_job.resultDefer.addCallback(
                        lambda resp: backup_result.callback(True) if resp == 'done' else backup_result.errback(
                            Exception('failed to upload key "%s", task was not started: %r' % (global_key_path, resp))))
                    if _Debug:
                        backup_job.resultDefer.addErrback(lg.errback, debug=_Debug, debug_level=_DebugLevel, method='key_ring.do_backup_key')
                    backup_job.resultDefer.addErrback(backup_result.errback)
                    if _Debug:
                        lg.args(_DebugLevel, backup_id=backup_id, global_key_path_id=global_key_path_id)
                    return backup_result
                else:
                    lg.warn('did not found running backup job: %r' % backup_id)
            else:
                lg.warn('did not found running backup id for path: %r' % global_key_path_id)
    else:
        res = api.file_create(global_key_path)
        if res['status'] != 'OK':
            lg.err('failed to create path "%s" in the catalog: %r' % (global_key_path, res))
            if wait_result:
                return fail(Exception('failed to create path "%s" in the catalog: %r' % (global_key_path, res)))
            return False
    res = api.file_upload_start(
        local_path=local_key_filepath,
        remote_path=global_key_path,
        wait_result=wait_result,
        open_share=False,
    )
    if not wait_result:
        if res['status'] != 'OK':
            lg.err('failed to upload key "%s": %r' % (global_key_path, res))
            return False
        if _Debug:
            lg.out(_DebugLevel, 'key_ring.do_backup_key key_id=%s : %r' % (key_id, res))
        return True

    backup_result = Deferred()

    # TODO: put that code bellow into api.file_upload_start() method with additional parameter

    def _job_done(result):
        if _Debug:
            lg.args(_DebugLevel, key_id=key_id, result=result)
        if result == 'done':
            backup_result.callback(True)
        else:
            backup_result.errback(Exception('failed to upload key "%s", backup is %r' % (key_id, result)))
        return None

    def _task_started(resp):
        if _Debug:
            lg.args(_DebugLevel, key_id=key_id, response=resp)
        if resp['status'] != 'OK':
            backup_result.errback(Exception('failed to upload key "%s", task was not started: %r' % (global_key_path, resp)))
            return None
        backupObj = backup_control.jobs().get(resp['version'])
        if not backupObj:
            backup_result.errback(Exception('failed to upload key "%s", task %r failed to start' % (global_key_path, resp['version'])))
            return None
        backupObj.resultDefer.addCallback(_job_done)
        backupObj.resultDefer.addErrback(backup_result.errback)
        return None

    if not isinstance(res, Deferred):
        res_defer = Deferred()
        res_defer.callback(res)
        res = res_defer
    res.addCallback(_task_started)
    res.addErrback(backup_result.errback)
    return backup_result