def test_backup_restore(self): test_ecc_map = 'ecc/2x2' test_done = Deferred() backupID = '[email protected]_8084:1/F1234' outputLocation = '/tmp/' with open('/tmp/_some_folder/random_file', 'wb') as fout: fout.write(os.urandom(10)) # fout.write(os.urandom(100*1024)) backupPipe = backup_tar.backuptardir_thread('/tmp/_some_folder/') def _extract_done(retcode, backupID, source_filename, output_location): assert retcode is True print('file size is: %d bytes' % len(bpio.ReadBinaryFile('/tmp/random_file'))) assert bpio.ReadBinaryFile('/tmp/random_file') == bpio.ReadBinaryFile('/tmp/_some_folder/random_file') reactor.callLater(0, raid_worker.A, 'shutdown') # @UndefinedVariable reactor.callLater(0.5, test_done.callback, True) # @UndefinedVariable def _restore_done(result, backupID, outfd, tarfilename, outputlocation): assert result == 'done' d = backup_tar.extracttar_thread(tarfilename, outputlocation) d.addCallback(_extract_done, backupID, tarfilename, outputlocation) return d def _restore(): outfd, outfilename = tmpfile.make( 'restore', extension='.tar.gz', prefix=backupID.replace('@', '_').replace('.', '_').replace('/', '_').replace(':', '_') + '_', ) r = restore_worker.RestoreWorker(backupID, outfd, KeyID=None, ecc_map=eccmap.eccmap(test_ecc_map)) r.MyDeferred.addCallback(_restore_done, backupID, outfd, outfilename, outputLocation) r.automat('init') def _bk_done(bid, result): assert result == 'done' def _bk_closed(job): if False: os.remove('/tmp/.bitdust_tmp/backups/[email protected]_8084/1/F1234/0-1-Data') os.remove('/tmp/.bitdust_tmp/backups/[email protected]_8084/1/F1234/0-1-Parity') reactor.callLater(0.5, _restore) # @UndefinedVariable reactor.callWhenRunning(raid_worker.A, 'init') # @UndefinedVariable job = backup.backup(backupID, backupPipe, blockSize=1024*1024, ecc_map=eccmap.eccmap(test_ecc_map)) job.finishCallback = _bk_done job.addStateChangedCallback(lambda *a, **k: _bk_closed(job), oldstate=None, newstate='DONE') reactor.callLater(0.5, job.automat, 'start') # @UndefinedVariable return test_done
def main(): sourcePath = sys.argv[1] backupID = sys.argv[2] lg.set_debug_level(24) compress_mode = 'none' # 'gz' raid_worker.A('init') backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID) if bpio.pathIsDir(sourcePath): backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode) else: backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode) backupPipe.make_nonblocking() job = backup.backup(backupID, backupPipe, backup_done) reactor.callLater(1, job.automat, 'start') reactor.run()
def main(): sourcePath = sys.argv[1] backupID = sys.argv[2] lg.set_debug_level(24) compress_mode = 'none' # 'gz' raid_worker.A('init') backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID) if bpio.pathIsDir(sourcePath): backupPipe = backup_tar.backuptardir(sourcePath, compress=compress_mode) else: backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode) backupPipe.make_nonblocking() job = backup.backup(backupID, backupPipe, backup_done) reactor.callLater(1, job.automat, 'start') reactor.run()
def _do_start_archive_backup(self): local_path = self.local_data_callback(self.queue_id, self.latest_sequence_id) supplier_path_id = os.path.join(self.archive_folder_path, strng.to_text(self.latest_sequence_id)) dataID = misc.NewBackupID() backup_id = packetid.MakeBackupID( customer=self.queue_owner_id, path_id=supplier_path_id, key_alias=self.queue_alias, version=dataID, ) backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backup_id) if bpio.Android(): compress_mode = 'none' else: compress_mode = 'bz2' arcname = os.path.basename(local_path) backupPipe = backup_tar.backuptarfile_thread(local_path, arcname=arcname, compress=compress_mode) self.backup_job = backup.backup( backupID=backup_id, pipe=backupPipe, blockResultCallback=self._on_archive_backup_block_result, finishCallback=self._on_archive_backup_done, blockSize=1024 * 1024 * 10, sourcePath=local_path, keyID=self.group_key_id, ecc_map=eccmap.eccmap(self.ecc_map), creatorIDURL=self.queue_owner_idurl, ) self.backup_job.automat('start') if _Debug: lg.args(_DebugLevel, job=self.backup_job, backup_id=backup_id, local_path=local_path, group_key_id=self.group_key_id)
def run(self): """ Runs a new ``Job`` from that ``Task``. """ iter_and_path = backup_fs.WalkByID(self.remotePath, iterID=backup_fs.fsID(self.customerIDURL)) if iter_and_path is None: lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.remotePath) # self.defer.callback('error', self.pathID) # self._on_job_failed(self.pathID) err = 'remote path "%s" not found in the catalog' % self.remotePath OnTaskFailed(self.pathID, err) return err itemInfo, sourcePath = iter_and_path if isinstance(itemInfo, dict): try: itemInfo = itemInfo[backup_fs.INFO_KEY] except: lg.exc() # self._on_job_failed(self.pathID) err = 'catalog item related to "%s" is broken' % self.remotePath OnTaskFailed(self.pathID, err) return err if not self.localPath: self.localPath = sourcePath lg.out('backup_control.Task.run local path was populated from catalog: %s' % self.localPath) if self.localPath != sourcePath: lg.warn('local path is differ from catalog: %s != %s' % (self.localPath, sourcePath)) if not bpio.pathExist(self.localPath): lg.warn('path not exist: %s' % self.localPath) # self._on_job_failed(self.pathID) err = 'local path "%s" not exist' % self.localPath OnTaskFailed(self.pathID, err) return err # if os.path.isfile(self.localPath) and self.localPath != sourcePath: # tmpfile.make(name, extension, prefix) dataID = misc.NewBackupID() if itemInfo.has_version(dataID): # ups - we already have same version # let's add 1,2,3... to the end to make absolutely unique version ID i = 1 while itemInfo.has_version(dataID + str(i)): i += 1 dataID += str(i) self.backupID = packetid.MakeBackupID( customer=self.fullCustomerID, path_id=self.remotePath, version=dataID, ) if self.backupID in jobs(): lg.warn('backup job %s already started' % self.backupID) return 'backup job %s already started' % self.backupID try: backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), self.backupID) except: lg.exc() lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID) # self.defer.callback('error', self.pathID) # self._on_job_failed(self.backupID) err = 'failed creating destination folder for "%s"' % self.backupID return OnTaskFailed(self.backupID, err) compress_mode = 'bz2' # 'none' # 'gz' arcname = os.path.basename(sourcePath) if bpio.pathIsDir(self.localPath): backupPipe = backup_tar.backuptardir(self.localPath, arcname=arcname, compress=compress_mode) else: backupPipe = backup_tar.backuptarfile(self.localPath, arcname=arcname, compress=compress_mode) backupPipe.make_nonblocking() job = backup.backup( self.backupID, backupPipe, finishCallback=OnJobDone, blockResultCallback=OnBackupBlockReport, blockSize=settings.getBackupBlockSize(), sourcePath=self.localPath, keyID=self.keyID or itemInfo.key_id, ) jobs()[self.backupID] = job itemInfo.add_version(dataID) if itemInfo.type == backup_fs.DIR: dirsize.ask(self.localPath, OnFoundFolderSize, (self.pathID, dataID)) else: sz = os.path.getsize(self.localPath) jobs()[self.backupID].totalSize = sz itemInfo.set_size(sz) backup_fs.Calculate() Save() jobs()[self.backupID].automat('start') reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID) lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % ( self.number, self.pathID, dataID, itemInfo.size, self.localPath)) return None