예제 #1
0
 def _on_my_identity_rotated(self, evt):
     from logs import lg
     from lib import packetid
     from storage import backup_matrix
     backup_matrix.ReadLocalFiles()
     remote_files_ids = list(backup_matrix.remote_files().keys())
     for currentID in remote_files_ids:
         latestID = packetid.LatestBackupID(currentID)
         if latestID != currentID:
             backup_matrix.remote_files(
             )[latestID] = backup_matrix.remote_files().pop(currentID)
             lg.info(
                 'detected backup ID change in remote_files() after identity rotate : %r -> %r'
                 % (
                     currentID,
                     latestID,
                 ))
     remote_max_block_numbers_ids = list(
         backup_matrix.remote_max_block_numbers().keys())
     for currentID in remote_max_block_numbers_ids:
         latestID = packetid.LatestBackupID(currentID)
         if latestID != currentID:
             backup_matrix.remote_max_block_numbers(
             )[latestID] = backup_matrix.remote_max_block_numbers().pop(
                 currentID)
             lg.info(
                 'detected backup ID change in remote_max_block_numbers() after identity rotate : %r -> %r'
                 % (
                     currentID,
                     latestID,
                 ))
예제 #2
0
 def doScanBrokenBlocks(self, *args, **kwargs):
     """
     Action method.
     """
     # if remote data structure is not exist for this backup - create it
     # this mean this is only local backup!
     from storage import backup_matrix
     if self.currentBackupID not in backup_matrix.remote_files():
         backup_matrix.remote_files()[self.currentBackupID] = {}
         # we create empty remote info for every local block
         # range(0) should return []
         for blockNum in range(backup_matrix.local_max_block_numbers().get(
                 self.currentBackupID, -1) + 1):
             backup_matrix.remote_files()[
                 self.currentBackupID][blockNum] = {
                     'D': [0] * contactsdb.num_suppliers(),
                     'P': [0] * contactsdb.num_suppliers()
                 }
     # detect missing blocks from remote info
     self.workingBlocksQueue = backup_matrix.ScanMissingBlocks(
         self.currentBackupID)
     # find the correct max block number for this backup
     # we can have remote and local files
     # will take biggest block number from both
     backupMaxBlock = max(
         backup_matrix.remote_max_block_numbers().get(
             self.currentBackupID, -1),
         backup_matrix.local_max_block_numbers().get(
             self.currentBackupID, -1))
     # now need to remember this biggest block number
     # remote info may have less blocks - need to create empty info for
     # missing blocks
     for blockNum in range(backupMaxBlock + 1):
         if blockNum in backup_matrix.remote_files()[self.currentBackupID]:
             continue
         backup_matrix.remote_files()[self.currentBackupID][blockNum] = {
             'D': [0] * contactsdb.num_suppliers(),
             'P': [0] * contactsdb.num_suppliers()
         }
     # clear requesting queue, remove old packets for this backup, we will
     # send them again
     from stream import io_throttle
     io_throttle.DeleteBackupRequests(self.currentBackupID)
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_rebuilder.doScanBrokenBlocks for %s : %s' %
             (self.currentBackupID, str(self.workingBlocksQueue)))
     self.automat('backup-ready')
예제 #3
0
 def doPrepareListBackups(self, *args, **kwargs):
     from storage import backup_rebuilder
     if backup_control.HasRunningBackup():
         # if some backups are running right now no need to rebuild something - too much use of CPU
         backup_rebuilder.RemoveAllBackupsToWork()
         lg.out(6, 'backup_monitor.doPrepareListBackups skip all rebuilds')
         self.automat('list-backups-done')
         return
     # take remote and local backups and get union from it
     allBackupIDs = set(
         list(backup_matrix.local_files().keys()) +
         list(backup_matrix.remote_files().keys()))
     # take only backups from data base
     allBackupIDs.intersection_update(backup_fs.ListAllBackupIDs())
     # remove running backups
     allBackupIDs.difference_update(backup_control.ListRunningBackups())
     # sort it in reverse order - newer backups should be repaired first
     allBackupIDs = misc.sorted_backup_ids(list(allBackupIDs), True)
     # add backups to the queue
     backup_rebuilder.AddBackupsToWork(allBackupIDs)
     lg.out(
         6, 'backup_monitor.doPrepareListBackups %d items:' %
         len(allBackupIDs))
     lg.out(6, '    %s' % allBackupIDs)
     self.automat('list-backups-done', allBackupIDs)
예제 #4
0
 def doPrepareListBackups(self, arg):
     import backup_rebuilder
     if backup_control.HasRunningBackup():
         # if some backups are running right now no need to rebuild something - too much use of CPU
         backup_rebuilder.RemoveAllBackupsToWork()
         lg.out(6, 'backup_monitor.doPrepareListBackups skip all rebuilds')
         self.automat('list-backups-done')
         return
     # take remote and local backups and get union from it
     allBackupIDs = set(backup_matrix.local_files().keys() + backup_matrix.remote_files().keys())
     # take only backups from data base
     allBackupIDs.intersection_update(backup_fs.ListAllBackupIDs())
     # remove running backups
     allBackupIDs.difference_update(backup_control.ListRunningBackups())
     # sort it in reverse order - newer backups should be repaired first
     allBackupIDs = misc.sorted_backup_ids(list(allBackupIDs), True)
     # add backups to the queue
     backup_rebuilder.AddBackupsToWork(allBackupIDs)
     lg.out(6, 'backup_monitor.doPrepareListBackups %d items' % len(allBackupIDs))
     self.automat('list-backups-done')
예제 #5
0
 def doRemoveUnusedFiles(self, *args, **kwargs):
     """
     Action method.
     """
     if not list_files_orator.is_synchronized():
         # always make sure we have a very fresh info about remote files before take any actions
         return
     # we want to remove files for this block
     # because we only need them during rebuilding
     if settings.getBackupsKeepLocalCopies() is True:
         # if user set this in settings - he want to keep the local files
         return
     # ... user do not want to keep local backups
     if settings.getGeneralWaitSuppliers() is True:
         from customer import fire_hire
         # but he want to be sure - all suppliers are green for a long time
         if len(online_status.listOfflineSuppliers()) > 0 or (
                 time.time() - fire_hire.GetLastFireTime() < 24 * 60 * 60):
             # some people are not there or we do not have stable team yet
             # do not remove the files because we need it to rebuild
             return
     count = 0
     from storage import backup_matrix
     from storage import restore_monitor
     from storage import backup_rebuilder
     if _Debug:
         lg.out(_DebugLevel, 'data_sender.doRemoveUnusedFiles')
     for backupID in misc.sorted_backup_ids(
             list(backup_matrix.local_files().keys())):
         if restore_monitor.IsWorking(backupID):
             if _Debug:
                 lg.out(_DebugLevel,
                        '        %s : SKIP, because restoring' % backupID)
             continue
         if backup_rebuilder.IsBackupNeedsWork(backupID):
             if _Debug:
                 lg.out(
                     _DebugLevel,
                     '        %s : SKIP, because needs rebuilding' %
                     backupID)
             continue
         if not backup_rebuilder.ReadStoppedFlag():
             if backup_rebuilder.A().currentBackupID is not None:
                 if backup_rebuilder.A().currentBackupID == backupID:
                     if _Debug:
                         lg.out(
                             _DebugLevel,
                             '        %s : SKIP, because rebuilding is in process'
                             % backupID)
                     continue
         if backupID not in backup_matrix.remote_files():
             if _Debug:
                 lg.out(
                     _DebugLevel,
                     '        going to erase %s because not found in remote files'
                     % backupID)
             customer, pathID, version = packetid.SplitBackupID(backupID)
             dirpath = os.path.join(settings.getLocalBackupsDir(), customer,
                                    pathID, version)
             if os.path.isdir(dirpath):
                 try:
                     count += bpio.rmdir_recursive(dirpath,
                                                   ignore_errors=True)
                 except:
                     lg.exc()
             continue
         packets = backup_matrix.ScanBlocksToRemove(
             backupID,
             check_all_suppliers=settings.getGeneralWaitSuppliers())
         for packetID in packets:
             customer, pathID = packetid.SplitPacketID(packetID)
             filename = os.path.join(settings.getLocalBackupsDir(),
                                     customer, pathID)
             if os.path.isfile(filename):
                 try:
                     os.remove(filename)
                 except:
                     lg.exc()
                     continue
                 count += 1
     if _Debug:
         lg.out(_DebugLevel, '    %d files were removed' % count)
     backup_matrix.ReadLocalFiles()