def doRemoveUnusedFiles(self, arg): # we want to remove files for this block # because we only need them during rebuilding if settings.getGeneralLocalBackups() is True: # if user set this in settings - he want to keep the local files return # ... user do not want to keep local backups if settings.getGeneralWaitSuppliers() is True: # but he want to be sure - all suppliers are green for a long time if contact_status.hasOfflineSuppliers() or time.time() - fire_hire.GetLastFireTime() < 24*60*60: # some people are not there or we do not have stable team yet # do not remove the files because we need it to rebuild return count = 0 for backupID in misc.sorted_backup_ids(backup_matrix.local_files().keys()): packets = backup_matrix.ScanBlocksToRemove(backupID, settings.getGeneralWaitSuppliers()) for packetID in packets: filename = os.path.join(settings.getLocalBackupsDir(), packetID) if os.path.isfile(filename): try: os.remove(filename) # dhnio.Dprint(6, ' ' + os.path.basename(filename)) except: dhnio.DprintException() continue count += 1 dhnio.Dprint(8, 'data_sender.doRemoveUnusedFiles %d files were removed' % count) backup_matrix.ReadLocalFiles()
def doScanAndQueue(self, arg): global _ShutdownFlag dhnio.Dprint(10, 'data_sender.doScanAndQueue') log = open(os.path.join(settings.LogsDir(), 'data_sender.log'), 'w') log.write('doScanAndQueue %s\n' % time.asctime()) if _ShutdownFlag: log.write('doScanAndQueue _ShutdownFlag is True\n') self.automat('scan-done') log.flush() log.close() return if '' not in contacts.getSupplierIDs(): for backupID in misc.sorted_backup_ids(backup_matrix.local_files().keys(), True): packetsBySupplier = backup_matrix.ScanBlocksToSend(backupID) log.write('%s\n' % packetsBySupplier) for supplierNum in packetsBySupplier.keys(): supplier_idurl = contacts.getSupplierID(supplierNum) if not supplier_idurl: dhnio.Dprint(2, 'data_sender.doScanAndQueue WARNING ?supplierNum? %s for %s' % (supplierNum, backupID)) continue for packetID in packetsBySupplier[supplierNum]: backupID_, blockNum, supplierNum_, dataORparity = packetid.BidBnSnDp(packetID) if backupID_ != backupID: dhnio.Dprint(2, 'data_sender.doScanAndQueue WARNING ?backupID? %s for %s' % (packetID, backupID)) continue if supplierNum_ != supplierNum: dhnio.Dprint(2, 'data_sender.doScanAndQueue WARNING ?supplierNum? %s for %s' % (packetID, backupID)) continue if io_throttle.HasPacketInSendQueue(supplier_idurl, packetID): log.write('%s in the send queue to %s\n' % (packetID, supplier_idurl)) continue if not io_throttle.OkToSend(supplier_idurl): log.write('ok to send %s ? - NO!\n' % supplier_idurl) continue tranByiID = transport_control.transfers_by_idurl(supplier_idurl) if len(tranByiID) > 3: log.write('transfers by %s: %d\n' % (supplier_idurl, len(tranByiID))) continue filename = os.path.join(settings.getLocalBackupsDir(), packetID) if not os.path.isfile(filename): log.write('%s is not file\n' % filename) continue io_throttle.QueueSendFile( filename, packetID, supplier_idurl, misc.getLocalID(), self._packetAcked, self._packetFailed) log.write('io_throttle.QueueSendFile %s\n' % packetID) # dhnio.Dprint(6, ' %s for %s' % (packetID, backupID)) self.automat('scan-done') log.flush() log.close()
def doPrepareListBackups(self, arg): if backup_control.HasRunningBackup(): # if some backups are running right now no need to rebuild something - too much use of CPU backup_rebuilder.RemoveAllBackupsToWork() dhnio.Dprint(6, 'backup_monitor.doPrepareListBackups skip all rebuilds') self.automat('list-backups-done') return # take remote and local backups and get union from it allBackupIDs = set(backup_matrix.local_files().keys() + backup_matrix.remote_files().keys()) # take only backups from data base allBackupIDs.intersection_update(backup_fs.ListAllBackupIDs()) # remove running backups allBackupIDs.difference_update(backup_control.ListRunningBackups()) # sort it in reverse order - newer backups should be repaired first allBackupIDs = misc.sorted_backup_ids(list(allBackupIDs), True) # add backups to the queue backup_rebuilder.AddBackupsToWork(allBackupIDs) dhnio.Dprint(6, 'backup_monitor.doPrepareListBackups %d items' % len(allBackupIDs)) self.automat('list-backups-done')