def _upload(params): path = params['path'] if bpio.Linux() or bpio.Mac(): path = '/' + (path.lstrip('/')) localPath = strng.to_text(path) if not bpio.pathExist(localPath): return { 'result': { "success": False, "error": 'local path %s was not found' % path } } result = [] pathID = backup_fs.ToID(localPath) if pathID is None: if bpio.pathIsDir(localPath): pathID, iter, iterID = backup_fs.AddDir(localPath, read_stats=True) result.append('new folder was added: %s' % localPath) else: pathID, iter, iterID = backup_fs.AddFile(localPath, read_stats=True) result.append('new file was added: %s' % localPath) pathID = global_id.CanonicalID(pathID) backup_control.StartSingle(pathID=pathID, localPath=localPath) backup_fs.Calculate() backup_control.Save() control.request_update([ ('pathID', pathID), ]) result.append('backup started: %s' % pathID) return { 'result': result, }
def _delete_version(params): lg.out(6, '_delete_version %s' % str(params)) backupID = params['backupid'] if not packetid.Valid(backupID): return { 'result': { "success": False, "error": "backupID %s is not valid" % backupID } } customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID) if not customerGlobalID: customerGlobalID = my_id.getGlobalID() if not backup_fs.ExistsID( remotePath, iterID=backup_fs.fsID( global_id.GlobalUserToIDURL(customerGlobalID))): return { 'result': { "success": False, "error": "path %s not found" % remotePath } } if version: backup_control.DeleteBackup(backupID, saveDB=False, calculate=False) backup_fs.Scan() backup_fs.Calculate() backup_control.Save() backup_monitor.A('restart') control.request_update([ ('backupID', backupID), ]) return {'result': {"success": True, "error": None}}
def _delete(params): # localPath = params['path'].lstrip('/') pathID = params['id'] if not packetid.Valid(pathID): return { 'result': { "success": False, "error": "path %s is not valid" % pathID } } if not backup_fs.ExistsID(pathID): return { 'result': { "success": False, "error": "path %s not found" % pathID } } backup_control.DeletePathBackups(pathID, saveDB=False, calculate=False) backup_fs.DeleteLocalDir(settings.getLocalBackupsDir(), pathID) backup_fs.DeleteByID(pathID) backup_fs.Scan() backup_fs.Calculate() backup_control.Save() control.request_update([ ('pathID', pathID), ]) backup_monitor.A('restart') return {'result': {"success": True, "error": None}}
def DeleteBackup(backupID, removeLocalFilesToo=True, saveDB=True, calculate=True): """ This removes a single backup ID completely. Perform several operations: 1) abort backup if it just started and is running at the moment 2) if we requested for files for this backup we do not need it anymore - remove 'Data' requests 3) remove interests in transport_control, see ``lib.transport_control.DeleteBackupInterest()`` 4) remove that ID from the index data base 5) remove local files for this backup ID 6) remove all remote info for this backup from the memory, see ``p2p.backup_matrix.EraseBackupRemoteInfo()`` 7) also remove local info from memory, see ``p2p.backup_matrix.EraseBackupLocalInfo()`` 8) stop any rebuilding, we will restart it soon 9) check and calculate used space 10) save the modified index data base, soon it will be synchronized with "index_synchronizer()" state machine """ backupID = global_id.CanonicalID(backupID) # if the user deletes a backup, make sure we remove any work we're doing on it # abort backup if it just started and is running at the moment if AbortRunningBackup(backupID): lg.out( 8, 'backup_control.DeleteBackup %s is in process, stopping' % backupID) return True from customer import io_throttle from . import backup_rebuilder lg.out(8, 'backup_control.DeleteBackup ' + backupID) # if we requested for files for this backup - we do not need it anymore io_throttle.DeleteBackupRequests(backupID) io_throttle.DeleteBackupSendings(backupID) # remove interests in transport_control # callback.delete_backup_interest(backupID) # mark it as being deleted in the db, well... just remove it from the index now if not backup_fs.DeleteBackupID(backupID): return False # finally remove local files for this backupID if removeLocalFilesToo: backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID) # remove all remote info for this backup from the memory backup_matrix.EraseBackupRemoteInfo(backupID) # also remove local info backup_matrix.EraseBackupLocalInfo(backupID) # stop any rebuilding, we will restart it soon backup_rebuilder.RemoveAllBackupsToWork() backup_rebuilder.SetStoppedFlag() # check and calculate used space if calculate: backup_fs.Scan() backup_fs.Calculate() # in some cases we want to save the DB later if saveDB: Save() control.request_update([ ('backupID', backupID), ]) return True
def OnJobDone(backupID, result): """ A callback method fired when backup is finished. Here we need to save the index data base. """ from storage import backup_rebuilder # from customer import io_throttle lg.info('job done [%s] with result "%s", %d more tasks' % (backupID, result, len(tasks()))) jobs().pop(backupID) customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID) customer_idurl = global_id.GlobalUserToIDURL(customerGlobalID) if result == 'done': maxBackupsNum = settings.getBackupsMaxCopies() if maxBackupsNum: item = backup_fs.GetByID(remotePath, iterID=backup_fs.fsID(customer_idurl)) if item: versions = item.list_versions(sorted=True, reverse=True) if len(versions) > maxBackupsNum: for version in versions[maxBackupsNum:]: item.delete_version(version) backupID = packetid.MakeBackupID( customerGlobalID, remotePath, version) backup_rebuilder.RemoveBackupToWork(backupID) # io_throttle.DeleteBackupRequests(backupID) # io_throttle.DeleteBackupSendings(backupID) # callback.delete_backup_interest(backupID) backup_fs.DeleteLocalBackup( settings.getLocalBackupsDir(), backupID) backup_matrix.EraseBackupLocalInfo(backupID) backup_matrix.EraseBackupLocalInfo(backupID) backup_fs.ScanID(remotePath) backup_fs.Calculate() Save() control.request_update([ ('pathID', remotePath), ]) # TODO: check used space, if we have over use - stop all tasks immediately backup_matrix.RepaintBackup(backupID) elif result == 'abort': DeleteBackup(backupID) if len(tasks()) == 0: # do we really need to restart backup_monitor after each backup? # if we have a lot tasks started this will produce a lot unneeded actions # will be smarter to restart it once we finish all tasks # because user will probably leave BitDust working after starting a long running operations from storage import backup_monitor if _Debug: lg.out( _DebugLevel, 'backup_control.OnJobDone restarting backup_monitor() machine because no tasks left' ) backup_monitor.A('restart') reactor.callLater(0, RunTask) # @UndefinedVariable reactor.callLater(0, FireTaskFinishedCallbacks, remotePath, version, result) # @UndefinedVariable
def DeletePathBackups(pathID, removeLocalFilesToo=True, saveDB=True, calculate=True): """ This removes all backups of given path ID Doing same operations as ``DeleteBackup()``. """ from . import backup_rebuilder from customer import io_throttle pathID = global_id.CanonicalID(pathID) # get the working item customer, remotePath = packetid.SplitPacketID(pathID) customer_idurl = global_id.GlobalUserToIDURL(customer) item = backup_fs.GetByID(remotePath, iterID=backup_fs.fsID(customer_idurl)) if item is None: return False lg.out(8, 'backup_control.DeletePathBackups ' + pathID) # this is a list of all known backups of this path versions = item.list_versions() for version in versions: backupID = packetid.MakeBackupID(customer, remotePath, version) lg.out(8, ' removing %s' % backupID) # abort backup if it just started and is running at the moment AbortRunningBackup(backupID) # if we requested for files for this backup - we do not need it anymore io_throttle.DeleteBackupRequests(backupID) io_throttle.DeleteBackupSendings(backupID) # remove interests in transport_control # callback.delete_backup_interest(backupID) # remove local files for this backupID if removeLocalFilesToo: backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID) # remove remote info for this backup from the memory backup_matrix.EraseBackupRemoteInfo(backupID) # also remove local info backup_matrix.EraseBackupLocalInfo(backupID) # finally remove this backup from the index item.delete_version(version) # lg.out(8, 'backup_control.DeletePathBackups ' + backupID) # stop any rebuilding, we will restart it soon backup_rebuilder.RemoveAllBackupsToWork() backup_rebuilder.SetStoppedFlag() # check and calculate used space if calculate: backup_fs.Scan() backup_fs.Calculate() # save the index if needed if saveDB: Save() control.request_update() return True
def IncomingSupplierBackupIndex(newpacket): """ Called by ``p2p.p2p_service`` when a remote copy of our local index data base ( in the "Data" packet ) is received from one of our suppliers. The index is also stored on suppliers to be able to restore it. """ b = encrypted.Unserialize(newpacket.Payload) if b is None: lg.out(2, 'backup_control.IncomingSupplierBackupIndex ERROR reading data from %s' % newpacket.RemoteID) return try: session_key = key.DecryptLocalPrivateKey(b.EncryptedSessionKey) padded_data = key.DecryptWithSessionKey(session_key, b.EncryptedData) inpt = cStringIO.StringIO(padded_data[:int(b.Length)]) supplier_revision = inpt.readline().rstrip('\n') if supplier_revision: supplier_revision = int(supplier_revision) else: supplier_revision = -1 # inpt.seek(0) except: lg.out(2, 'backup_control.IncomingSupplierBackupIndex ERROR reading data from %s' % newpacket.RemoteID) lg.out(2, '\n' + padded_data) lg.exc() try: inpt.close() except: pass return if driver.is_on('service_backup_db'): from storage import index_synchronizer index_synchronizer.A('index-file-received', (newpacket, supplier_revision)) if revision() >= supplier_revision: inpt.close() lg.out(4, 'backup_control.IncomingSupplierBackupIndex SKIP, supplier %s revision=%d, local revision=%d' % ( newpacket.RemoteID, supplier_revision, revision(), )) return raw_data = inpt.read() inpt.close() if ReadIndex(raw_data): commit(supplier_revision) backup_fs.Scan() backup_fs.Calculate() WriteIndex() control.request_update() lg.out(4, 'backup_control.IncomingSupplierBackupIndex updated to revision %d from %s' % ( revision(), newpacket.RemoteID)) else: lg.warn('failed to read catalog index from supplier')
def IncomingSupplierBackupIndex(newpacket): """ Called by ``p2p.p2p_service`` when a remote copy of our local index data base ( in the "Data" packet ) is received from one of our suppliers. The index is also stored on suppliers to be able to restore it. """ b = encrypted.Unserialize(newpacket.Payload) if b is None: lg.err('failed reading data from %s' % newpacket.RemoteID) return None try: session_key = key.DecryptLocalPrivateKey(b.EncryptedSessionKey) padded_data = key.DecryptWithSessionKey(session_key, b.EncryptedData, session_key_type=b.SessionKeyType) inpt = StringIO(strng.to_text(padded_data[:int(b.Length)])) supplier_revision = inpt.readline().rstrip('\n') if supplier_revision: supplier_revision = int(supplier_revision) else: supplier_revision = -1 except: lg.exc() try: inpt.close() except: pass return None if revision() > supplier_revision: inpt.close() if _Debug: lg.out(_DebugLevel, 'backup_control.IncomingSupplierBackupIndex SKIP, supplier %s revision=%d, local revision=%d' % ( newpacket.RemoteID, supplier_revision, revision(), )) return supplier_revision text_data = inpt.read() inpt.close() if ReadIndex(text_data): commit(supplier_revision) backup_fs.Scan() backup_fs.Calculate() WriteIndex() control.request_update() if _Debug: lg.out(_DebugLevel, 'backup_control.IncomingSupplierBackupIndex updated to revision %d from %s' % ( revision(), newpacket.RemoteID)) else: lg.warn('failed to read catalog index from supplier') return supplier_revision
def DeleteAllBackups(): """ Remove all backup IDs from index data base, see ``DeleteBackup()`` method. """ # prepare a list of all known backup IDs all_ids = set(backup_fs.ListAllBackupIDs()) all_ids.update(backup_matrix.GetBackupIDs(remote=True, local=True)) lg.out(4, 'backup_control.DeleteAllBackups %d ID\'s to kill' % len(all_ids)) # delete one by one for backupID in all_ids: DeleteBackup(backupID, saveDB=False, calculate=False) # scan all files backup_fs.Scan() # check and calculate used space backup_fs.Calculate() # save the index Save() # refresh the GUI control.request_update()
def OnFoundFolderSize(pth, sz, arg): """ This is a callback, fired from ``lib.dirsize.ask()`` method after finish calculating of folder size. """ try: pathID, version = arg customerGlobID, pathID = packetid.SplitPacketID(pathID) customerIDURL = global_id.GlobalUserToIDURL(customerGlobID) item = backup_fs.GetByID(pathID, iterID=backup_fs.fsID(customerIDURL)) if item: item.set_size(sz) backup_fs.Calculate() Save() if version: backupID = packetid.MakeBackupID(customerGlobID, pathID, version) job = GetRunningBackupObject(backupID) if job: job.totalSize = sz if _Debug: lg.out(_DebugLevel, 'backup_control.OnFoundFolderSize %s %d' % (backupID, sz)) except: lg.exc()
def _itemRestored(backupID, result): customerGlobalID, remotePath, _ = packetid.SplitBackupID(backupID) backup_fs.ScanID( remotePath, customer_idurl=global_id.GlobalUserToIDURL(customerGlobalID)) backup_fs.Calculate()
def doCleanUpBackups(self, *args, **kwargs): # here we check all backups we have and remove the old one # user can set how many versions of that file or folder to keep # other versions (older) will be removed here from storage import backup_rebuilder try: self.backups_progress_last_iteration = len( backup_rebuilder.A().backupsWasRebuilt) except: self.backups_progress_last_iteration = 0 versionsToKeep = settings.getBackupsMaxCopies() if not contactsdb.num_suppliers(): bytesUsed = 0 else: bytesUsed = backup_fs.sizebackups() / contactsdb.num_suppliers() bytesNeeded = diskspace.GetBytesFromString(settings.getNeededString(), 0) customerGlobID = my_id.getGlobalID() if _Debug: lg.out( _DebugLevel, 'backup_monitor.doCleanUpBackups backupsToKeep=%d used=%d needed=%d' % (versionsToKeep, bytesUsed, bytesNeeded)) delete_count = 0 if versionsToKeep > 0: for pathID, localPath, itemInfo in backup_fs.IterateIDs(): pathID = global_id.CanonicalID(pathID) if backup_control.IsPathInProcess(pathID): continue versions = itemInfo.list_versions() # TODO: do we need to sort the list? it comes from a set, so must be sorted may be while len(versions) > versionsToKeep: backupID = packetid.MakeBackupID(customerGlobID, pathID, versions.pop(0)) if _Debug: lg.out( _DebugLevel, 'backup_monitor.doCleanUpBackups %d of %d backups for %s, so remove older %s' % (len(versions), versionsToKeep, localPath, backupID)) backup_control.DeleteBackup(backupID, saveDB=False, calculate=False) delete_count += 1 # we need also to fit used space into needed space (given from other users) # they trust us - do not need to take extra space from our friends # so remove oldest backups, but keep at least one for every folder - at least locally! # still our suppliers will remove our "extra" files by their "local_tester" if bytesNeeded <= bytesUsed: sizeOk = False for pathID, localPath, itemInfo in backup_fs.IterateIDs(): if sizeOk: break pathID = global_id.CanonicalID(pathID) versions = itemInfo.list_versions(True, False) if len(versions) <= 1: continue for version in versions[1:]: backupID = packetid.MakeBackupID(customerGlobID, pathID, version) versionInfo = itemInfo.get_version_info(version) if versionInfo[1] > 0: if _Debug: lg.out( _DebugLevel, 'backup_monitor.doCleanUpBackups over use %d of %d, so remove %s of %s' % (bytesUsed, bytesNeeded, backupID, localPath)) backup_control.DeleteBackup(backupID, saveDB=False, calculate=False) delete_count += 1 bytesUsed -= versionInfo[1] if bytesNeeded > bytesUsed: sizeOk = True break if delete_count > 0: backup_fs.Scan() backup_fs.Calculate() backup_control.Save() from main import control control.request_update() collected = gc.collect() if self.backups_progress_last_iteration > 0: if _Debug: lg.out( _DebugLevel, 'backup_monitor.doCleanUpBackups sending "restart", backups_progress_last_iteration=%s' % self.backups_progress_last_iteration) reactor.callLater(1, self.automat, 'restart') # @UndefinedVariable if _Debug: lg.out( _DebugLevel, 'backup_monitor.doCleanUpBackups collected %d objects' % collected)
def run(self): """ Runs a new ``Job`` from that ``Task``. """ iter_and_path = backup_fs.WalkByID(self.remotePath, iterID=backup_fs.fsID(self.customerIDURL)) if iter_and_path is None: lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.remotePath) # self.defer.callback('error', self.pathID) # self._on_job_failed(self.pathID) err = 'remote path "%s" not found in the catalog' % self.remotePath OnTaskFailed(self.pathID, err) return err itemInfo, sourcePath = iter_and_path if isinstance(itemInfo, dict): try: itemInfo = itemInfo[backup_fs.INFO_KEY] except: lg.exc() # self._on_job_failed(self.pathID) err = 'catalog item related to "%s" is broken' % self.remotePath OnTaskFailed(self.pathID, err) return err if not self.localPath: self.localPath = sourcePath lg.out('backup_control.Task.run local path was populated from catalog: %s' % self.localPath) if self.localPath != sourcePath: lg.warn('local path is differ from catalog: %s != %s' % (self.localPath, sourcePath)) if not bpio.pathExist(self.localPath): lg.warn('path not exist: %s' % self.localPath) # self._on_job_failed(self.pathID) err = 'local path "%s" not exist' % self.localPath OnTaskFailed(self.pathID, err) return err # if os.path.isfile(self.localPath) and self.localPath != sourcePath: # tmpfile.make(name, extension, prefix) dataID = misc.NewBackupID() if itemInfo.has_version(dataID): # ups - we already have same version # let's add 1,2,3... to the end to make absolutely unique version ID i = 1 while itemInfo.has_version(dataID + str(i)): i += 1 dataID += str(i) self.backupID = packetid.MakeBackupID( customer=self.fullCustomerID, path_id=self.remotePath, version=dataID, ) if self.backupID in jobs(): lg.warn('backup job %s already started' % self.backupID) return 'backup job %s already started' % self.backupID try: backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), self.backupID) except: lg.exc() lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID) # self.defer.callback('error', self.pathID) # self._on_job_failed(self.backupID) err = 'failed creating destination folder for "%s"' % self.backupID return OnTaskFailed(self.backupID, err) compress_mode = 'bz2' # 'none' # 'gz' arcname = os.path.basename(sourcePath) if bpio.pathIsDir(self.localPath): backupPipe = backup_tar.backuptardir(self.localPath, arcname=arcname, compress=compress_mode) else: backupPipe = backup_tar.backuptarfile(self.localPath, arcname=arcname, compress=compress_mode) backupPipe.make_nonblocking() job = backup.backup( self.backupID, backupPipe, finishCallback=OnJobDone, blockResultCallback=OnBackupBlockReport, blockSize=settings.getBackupBlockSize(), sourcePath=self.localPath, keyID=self.keyID or itemInfo.key_id, ) jobs()[self.backupID] = job itemInfo.add_version(dataID) if itemInfo.type == backup_fs.DIR: dirsize.ask(self.localPath, OnFoundFolderSize, (self.pathID, dataID)) else: sz = os.path.getsize(self.localPath) jobs()[self.backupID].totalSize = sz itemInfo.set_size(sz) backup_fs.Calculate() Save() jobs()[self.backupID].automat('start') reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID) lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % ( self.number, self.pathID, dataID, itemInfo.size, self.localPath)) return None