def _do_restore_next_backup(self, backup_index): if _Debug: lg.args(_DebugLevel, backup_index=backup_index, selected_backups=len(self.selected_backups)) if backup_index >= len(self.selected_backups): lg.info('all selected backups are processed') self.automat('extract-all-done', self.extracted_messages) return backup_id = self.selected_backups[backup_index] outfd, outfilename = tmpfile.make( 'restore', extension='.tar.gz', prefix=backup_id.replace('@', '_').replace('.', '_').replace( '/', '_').replace(':', '_') + '_', ) rw = restore_worker.RestoreWorker(backup_id, outfd, KeyID=self.group_key_id) rw.MyDeferred.addCallback(self._on_restore_done, backup_id, outfd, outfilename, backup_index) if _Debug: rw.MyDeferred.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='archive_reader.doStartRestoreWorker') rw.MyDeferred.addErrback(self._on_restore_failed, backup_id, outfd, outfilename, backup_index) rw.automat('init')
def _restore(): outfd, outfilename = tmpfile.make( 'restore', extension='.tar.gz', prefix=backupID.replace('@', '_').replace('.', '_').replace('/', '_').replace(':', '_') + '_', ) r = restore_worker.RestoreWorker(backupID, outfd, KeyID=None, ecc_map=eccmap.eccmap(test_ecc_map)) r.MyDeferred.addCallback(_restore_done, backupID, outfd, outfilename, outputLocation) r.automat('init')
def doStartRestoreWorker(self, *args, **kwargs): """ Action method. """ iterID_and_path = backup_fs.WalkByID(self.archive_folder_path, iterID=backup_fs.fsID( self.queue_owner_idurl)) if iterID_and_path is None: lg.err('did not found archive folder in the catalog: %r' % self.archive_folder_path) self.automat('restore-failed') return iterID, path = iterID_and_path known_archive_snapshots_list = backup_fs.ListAllBackupIDsFull( iterID=iterID) if not known_archive_snapshots_list: lg.err( 'failed to restore data from archive, no snapshots found in folder: %r' % self.archive_folder_path) self.automat('restore-failed') return snapshots_list = [] for archive_item in known_archive_snapshots_list: snapshots_list.append(archive_item[1]) if _Debug: lg.args(_DebugLevel, snapshots_list=snapshots_list) if not snapshots_list: lg.err('no available snapshots found in archive list: %r' % known_archive_snapshots_list) self.automat('restore-failed') return backupID = snapshots_list[0] outfd, outfilename = tmpfile.make( 'restore', extension='.tar.gz', prefix=backupID.replace('@', '_').replace('.', '_').replace( '/', '_').replace(':', '_') + '_', ) rw = restore_worker.RestoreWorker(backupID, outfd, KeyID=self.group_key_id) rw.MyDeferred.addCallback(self._on_restore_done, backupID, outfd, outfilename) rw.MyDeferred.addErrback(self._on_restore_failed, backupID, outfd, outfilename) if _Debug: rw.MyDeferred.addErrback( lg.errback, debug=_Debug, debug_level=_DebugLevel, method='archive_reader.doStartRestoreWorker') rw.automat('init')
def backup_done(bid, result): from crypt import signed try: os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.out')) except: pass for filename in os.listdir(os.path.join(settings.getLocalBackupsDir(), bid)): filepath = os.path.join(settings.getLocalBackupsDir(), bid, filename) payld = str(bpio.ReadBinaryFile(filepath)) outpacket = signed.Packet('Data', my_id.getLocalID(), my_id.getLocalID(), filename, payld, 'http://megafaq.ru/cvps1010.xml') newfilepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out', filename) bpio.WriteBinaryFile(newfilepath, outpacket.Serialize()) # Assume we delivered all pieces from ".out" to suppliers and lost original data # Then we requested the data back and got it into ".inp" try: os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.inp')) except: pass for filename in os.listdir( os.path.join(settings.getLocalBackupsDir(), bid + '.out')): filepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out', filename) data = bpio.ReadBinaryFile(filepath) inppacket = signed.Unserialize(data) assert inppacket assert inppacket.Valid() newfilepath = os.path.join(settings.getLocalBackupsDir(), bid + '.inp', filename) bpio.WriteBinaryFile(newfilepath, inppacket.Payload) # Now do restore from input data backupID = bid + '.inp' outfd, tarfilename = tmpfile.make( 'restore', extension='.tar.gz', prefix=backupID.replace('/', '_') + '_', ) r = restore_worker.RestoreWorker(backupID, outfd) r.MyDeferred.addBoth(restore_done, tarfilename) reactor.callLater(1, r.automat, 'init')
def Start(backupID, outputLocation, callback=None, keyID=None): lg.out(8, 'restore_monitor.Start %s to %s' % (backupID, outputLocation)) global _WorkingBackupIDs global _WorkingRestoreProgress if backupID in _WorkingBackupIDs.keys(): return _WorkingBackupIDs[backupID] outfd, outfilename = tmpfile.make( 'restore', extension='.tar.gz', prefix=backupID.replace('@', '_').replace('.', '_').replace('/', '_').replace(':', '_') + '_', ) from storage import restore_worker r = restore_worker.RestoreWorker(backupID, outfd, KeyID=keyID) r.MyDeferred.addCallback(restore_done, backupID, outfd, outfilename, outputLocation, callback) # r.MyDeferred.addErrback(restore_failed, outfilename, callback) r.set_block_restored_callback(block_restored_callback) r.set_packet_in_callback(packet_in_callback) _WorkingBackupIDs[backupID] = r _WorkingRestoreProgress[backupID] = {} r.automat('init') return r