Пример #1
0
 def _do_open_known_shares(self):
     from crypt import my_keys
     from access import shared_access_coordinator
     from storage import backup_fs
     known_offline_shares = []
     for key_id in my_keys.known_keys():
         if not key_id.startswith('share_'):
             continue
         active_share = shared_access_coordinator.get_active_share(key_id)
         if active_share:
             continue
         known_offline_shares.append(key_id)
     to_be_opened = []
     for pathID, localPath, itemInfo in backup_fs.IterateIDs():
         if not itemInfo.key_id:
             continue
         if itemInfo.key_id in to_be_opened:
             continue
         if itemInfo.key_id not in known_offline_shares:
             continue
         to_be_opened.append(itemInfo.key_id)
     for key_id in to_be_opened:
         active_share = shared_access_coordinator.SharedAccessCoordinator(
             key_id,
             log_events=True,
             publish_events=False,
         )
         active_share.automat('restart')
Пример #2
0
 def _do_open_known_shares(self):
     from crypt import my_keys
     from main import listeners
     from access import shared_access_coordinator
     from storage import backup_fs
     known_offline_shares = []
     for key_id in my_keys.known_keys():
         if not key_id.startswith('share_'):
             continue
         active_share = shared_access_coordinator.get_active_share(key_id)
         if active_share:
             continue
         known_offline_shares.append(key_id)
     to_be_opened = []
     for _, _, itemInfo in backup_fs.IterateIDs():
         if not itemInfo.key_id:
             continue
         if itemInfo.key_id in to_be_opened:
             continue
         if itemInfo.key_id not in known_offline_shares:
             continue
         to_be_opened.append(itemInfo.key_id)
     for key_id in to_be_opened:
         active_share = shared_access_coordinator.SharedAccessCoordinator(key_id, log_events=True, publish_events=False, )
         active_share.automat('restart')
         if listeners.is_populate_requered('shared_file'):
             listeners.populate_later().remove('shared_file')
             backup_fs.populate_shared_files(key_id=key_id)
Пример #3
0
 def doCleanUpBackups(self, *args, **kwargs):
     # here we check all backups we have and remove the old one
     # user can set how many versions of that file or folder to keep
     # other versions (older) will be removed here
     from storage import backup_rebuilder
     try:
         self.backups_progress_last_iteration = len(
             backup_rebuilder.A().backupsWasRebuilt)
     except:
         self.backups_progress_last_iteration = 0
     versionsToKeep = settings.getBackupsMaxCopies()
     if not contactsdb.num_suppliers():
         bytesUsed = 0
     else:
         bytesUsed = backup_fs.sizebackups() / contactsdb.num_suppliers()
     bytesNeeded = diskspace.GetBytesFromString(settings.getNeededString(),
                                                0)
     customerGlobID = my_id.getGlobalID()
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_monitor.doCleanUpBackups backupsToKeep=%d used=%d needed=%d'
             % (versionsToKeep, bytesUsed, bytesNeeded))
     delete_count = 0
     if versionsToKeep > 0:
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             pathID = global_id.CanonicalID(pathID)
             if backup_control.IsPathInProcess(pathID):
                 continue
             versions = itemInfo.list_versions()
             # TODO: do we need to sort the list? it comes from a set, so must be sorted may be
             while len(versions) > versionsToKeep:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  versions.pop(0))
                 if _Debug:
                     lg.out(
                         _DebugLevel,
                         'backup_monitor.doCleanUpBackups %d of %d backups for %s, so remove older %s'
                         % (len(versions), versionsToKeep, localPath,
                            backupID))
                 backup_control.DeleteBackup(backupID,
                                             saveDB=False,
                                             calculate=False)
                 delete_count += 1
     # we need also to fit used space into needed space (given from other users)
     # they trust us - do not need to take extra space from our friends
     # so remove oldest backups, but keep at least one for every folder - at least locally!
     # still our suppliers will remove our "extra" files by their "local_tester"
     if bytesNeeded <= bytesUsed:
         sizeOk = False
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             if sizeOk:
                 break
             pathID = global_id.CanonicalID(pathID)
             versions = itemInfo.list_versions(True, False)
             if len(versions) <= 1:
                 continue
             for version in versions[1:]:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  version)
                 versionInfo = itemInfo.get_version_info(version)
                 if versionInfo[1] > 0:
                     if _Debug:
                         lg.out(
                             _DebugLevel,
                             'backup_monitor.doCleanUpBackups over use %d of %d, so remove %s of %s'
                             %
                             (bytesUsed, bytesNeeded, backupID, localPath))
                     backup_control.DeleteBackup(backupID,
                                                 saveDB=False,
                                                 calculate=False)
                     delete_count += 1
                     bytesUsed -= versionInfo[1]
                     if bytesNeeded > bytesUsed:
                         sizeOk = True
                         break
     if delete_count > 0:
         backup_fs.Scan()
         backup_fs.Calculate()
         backup_control.Save()
         from main import control
         control.request_update()
     collected = gc.collect()
     if self.backups_progress_last_iteration > 0:
         if _Debug:
             lg.out(
                 _DebugLevel,
                 'backup_monitor.doCleanUpBackups  sending "restart", backups_progress_last_iteration=%s'
                 % self.backups_progress_last_iteration)
         reactor.callLater(1, self.automat, 'restart')  # @UndefinedVariable
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_monitor.doCleanUpBackups collected %d objects' %
             collected)