def __init__(self, backupID, pipe, finishCallback=None, blockResultCallback=None, blockSize=None, sourcePath=None): self.backupID = backupID self.sourcePath = sourcePath self.eccmap = eccmap.Current() self.pipe = pipe self.blockSize = blockSize if self.blockSize is None: self.blockSize = settings.getBackupBlockSize() self.ask4abort = False self.terminating = False self.stateEOF = False self.stateReading = False self.closed = False self.currentBlockData = cStringIO.StringIO() self.currentBlockSize = 0 self.workBlocks = {} self.blockNumber = 0 self.dataSent = 0 self.blocksSent = 0 self.totalSize = -1 self.finishCallback = finishCallback self.blockResultCallback = blockResultCallback automat.Automat.__init__(self, 'backup_%s' % self.backupID, 'AT_STARTUP', _DebugLevel)
def __init__(self, parent, callOnReceived, creatorID, packetID, ownerID, remoteID, debug_level=_DebugLevel, log_events=_Debug, log_transitions=_Debug, publish_events=False, **kwargs): """ Builds `file_down()` state machine. """ self.parent = parent self.callOnReceived = [] self.callOnReceived.append(callOnReceived) self.creatorID = creatorID self.packetID = global_id.CanonicalID(packetID) parts = global_id.ParseGlobalID(packetID) self.customerID = parts['customer'] self.remotePath = parts['path'] self.customerIDURL = parts['idurl'] customerGlobalID, remotePath, versionName, fileName = packetid.SplitVersionFilename(packetID) self.backupID = packetid.MakeBackupID(customerGlobalID, remotePath, versionName) self.fileName = fileName self.ownerID = ownerID self.remoteID = remoteID self.requestTime = None self.fileReceivedTime = None self.requestTimeout = max(30, 2 * int(settings.getBackupBlockSize() / settings.SendingSpeedLimit())) self.result = '' self.created = utime.get_sec1970() super(FileDown, self).__init__( name="file_down_%s_%s/%s/%s" % (nameurl.GetName(self.remoteID), remotePath, versionName, fileName), state="AT_STARTUP", debug_level=debug_level, log_events=log_events, log_transitions=log_transitions, publish_events=publish_events, **kwargs )
def __init__( self, backupID, pipe, finishCallback=None, blockResultCallback=None, notifyNewDataCallback=None, blockSize=None, sourcePath=None, keyID=None, ecc_map=None, creatorIDURL=None, ): self.backupID = backupID self.creatorIDURL = creatorIDURL or my_id.getIDURL() _parts = packetid.SplitBackupID(self.backupID) self.customerGlobalID = _parts[0] self.pathID = _parts[1] self.version = _parts[2] self.customerIDURL = global_id.GlobalUserToIDURL(self.customerGlobalID) self.sourcePath = sourcePath self.keyID = keyID self.eccmap = ecc_map or eccmap.Current() self.pipe = pipe self.blockSize = blockSize if self.blockSize is None: self.blockSize = settings.getBackupBlockSize() self.ask4abort = False self.terminating = False self.stateEOF = False self.stateReading = False self.closed = False self.currentBlockData = BytesIO() self.currentBlockSize = 0 self.workBlocks = {} self.blockNumber = 0 self.dataSent = 0 self.blocksSent = 0 self.totalSize = -1 self.resultDefer = Deferred() self.finishCallback = finishCallback self.blockResultCallback = blockResultCallback self.notifyNewDataCallback = notifyNewDataCallback automat.Automat.__init__( self, name='backup_%s' % self.version, state='AT_STARTUP', debug_level=_DebugLevel, log_events=_Debug, log_transitions=_Debug, )
def __init__(self, callOnReceived, creatorID, packetID, ownerID, remoteID): self.callOnReceived = [] self.callOnReceived.append(callOnReceived) self.creatorID = creatorID self.packetID = global_id.CanonicalID(packetID) parts = global_id.ParseGlobalID(packetID) self.customerID = parts['customer'] self.remotePath = parts['path'] self.customerIDURL = parts['idurl'] customerGlobalID, remotePath, versionName, fileName = packetid.SplitVersionFilename(packetID) self.backupID = packetid.MakeBackupID(customerGlobalID, remotePath, versionName) self.fileName = fileName self.ownerID = ownerID self.remoteID = remoteID self.requestTime = None self.fileReceivedTime = None self.requestTimeout = max(30, 2 * int(settings.getBackupBlockSize() / settings.SendingSpeedLimit())) self.result = '' self.created = utime.get_sec1970() PacketReport('request', self.remoteID, self.packetID, 'init')
def __init__( self, backupID, pipe, finishCallback=None, blockResultCallback=None, blockSize=None, sourcePath=None, keyID=None, ): self.backupID = backupID _parts = packetid.SplitBackupID(self.backupID) self.customerGlobalID = _parts[0] self.pathID = _parts[1] self.version = _parts[2] self.customerIDURL = global_id.GlobalUserToIDURL(self.customerGlobalID) self.sourcePath = sourcePath self.keyID = keyID self.eccmap = eccmap.Current() self.pipe = pipe self.blockSize = blockSize if self.blockSize is None: self.blockSize = settings.getBackupBlockSize() self.ask4abort = False self.terminating = False self.stateEOF = False self.stateReading = False self.closed = False self.currentBlockData = BytesIO() self.currentBlockSize = 0 self.workBlocks = {} self.blockNumber = 0 self.dataSent = 0 self.blocksSent = 0 self.totalSize = -1 self.finishCallback = finishCallback self.blockResultCallback = blockResultCallback automat.Automat.__init__(self, 'backup_%s' % self.version, 'AT_STARTUP', _DebugLevel)
def run(self): """ Runs a new ``Job`` from that ``Task``. """ iter_and_path = backup_fs.WalkByID(self.remotePath, iterID=backup_fs.fsID(self.customerIDURL)) if iter_and_path is None: lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.remotePath) # self.defer.callback('error', self.pathID) # self._on_job_failed(self.pathID) err = 'remote path "%s" not found in the catalog' % self.remotePath OnTaskFailed(self.pathID, err) return err itemInfo, sourcePath = iter_and_path if isinstance(itemInfo, dict): try: itemInfo = itemInfo[backup_fs.INFO_KEY] except: lg.exc() # self._on_job_failed(self.pathID) err = 'catalog item related to "%s" is broken' % self.remotePath OnTaskFailed(self.pathID, err) return err if not self.localPath: self.localPath = sourcePath lg.out('backup_control.Task.run local path was populated from catalog: %s' % self.localPath) if self.localPath != sourcePath: lg.warn('local path is differ from catalog: %s != %s' % (self.localPath, sourcePath)) if not bpio.pathExist(self.localPath): lg.warn('path not exist: %s' % self.localPath) # self._on_job_failed(self.pathID) err = 'local path "%s" not exist' % self.localPath OnTaskFailed(self.pathID, err) return err # if os.path.isfile(self.localPath) and self.localPath != sourcePath: # tmpfile.make(name, extension, prefix) dataID = misc.NewBackupID() if itemInfo.has_version(dataID): # ups - we already have same version # let's add 1,2,3... to the end to make absolutely unique version ID i = 1 while itemInfo.has_version(dataID + str(i)): i += 1 dataID += str(i) self.backupID = packetid.MakeBackupID( customer=self.fullCustomerID, path_id=self.remotePath, version=dataID, ) if self.backupID in jobs(): lg.warn('backup job %s already started' % self.backupID) return 'backup job %s already started' % self.backupID try: backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), self.backupID) except: lg.exc() lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID) # self.defer.callback('error', self.pathID) # self._on_job_failed(self.backupID) err = 'failed creating destination folder for "%s"' % self.backupID return OnTaskFailed(self.backupID, err) compress_mode = 'bz2' # 'none' # 'gz' arcname = os.path.basename(sourcePath) if bpio.pathIsDir(self.localPath): backupPipe = backup_tar.backuptardir(self.localPath, arcname=arcname, compress=compress_mode) else: backupPipe = backup_tar.backuptarfile(self.localPath, arcname=arcname, compress=compress_mode) backupPipe.make_nonblocking() job = backup.backup( self.backupID, backupPipe, finishCallback=OnJobDone, blockResultCallback=OnBackupBlockReport, blockSize=settings.getBackupBlockSize(), sourcePath=self.localPath, keyID=self.keyID or itemInfo.key_id, ) jobs()[self.backupID] = job itemInfo.add_version(dataID) if itemInfo.type == backup_fs.DIR: dirsize.ask(self.localPath, OnFoundFolderSize, (self.pathID, dataID)) else: sz = os.path.getsize(self.localPath) jobs()[self.backupID].totalSize = sz itemInfo.set_size(sz) backup_fs.Calculate() Save() jobs()[self.backupID].automat('start') reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID) lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % ( self.number, self.pathID, dataID, itemInfo.size, self.localPath)) return None
def run(self): """ Runs a new ``Job`` from that ``Task``. Called from ``RunTasks()`` method if it is possible to start a new task - the maximum number of simultaneously running ``Jobs`` is limited. """ import backup_tar import backup iter_and_path = backup_fs.WalkByID(self.pathID) if iter_and_path is None: lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.pathID) # self.defer.callback('error', self.pathID) return itemInfo, sourcePath = iter_and_path if isinstance(itemInfo, dict): try: itemInfo = itemInfo[backup_fs.INFO_KEY] except: lg.exc() return if self.localPath and self.localPath != sourcePath: lg.warn('local path were changed: %s -> %s' % (self.localPath, sourcePath)) self.localPath = sourcePath if not bpio.pathExist(sourcePath): lg.warn('path not exist: %s' % sourcePath) reactor.callLater(0, OnTaskFailed, self.pathID, 'not exist') return dataID = misc.NewBackupID() if itemInfo.has_version(dataID): # ups - we already have same version # let's add 1,2,3... to the end to make absolutely unique version ID i = 1 while itemInfo.has_version(dataID + str(i)): i += 1 dataID += str(i) backupID = self.pathID + '/' + dataID try: backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID) except: lg.exc() lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID) # self.defer.callback('error', self.pathID) return compress_mode = 'bz2' # 'none' # 'gz' if bpio.pathIsDir(sourcePath): backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode) else: backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode) backupPipe.make_nonblocking() job = backup.backup( backupID, backupPipe, OnJobDone, OnBackupBlockReport, settings.getBackupBlockSize(), sourcePath) jobs()[backupID] = job itemInfo.add_version(dataID) if itemInfo.type in [backup_fs.PARENT, backup_fs.DIR]: dirsize.ask(sourcePath, OnFoundFolderSize, (self.pathID, dataID)) else: jobs()[backupID].totalSize = os.path.getsize(sourcePath) jobs()[backupID].automat('start') reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID) lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % ( self.number, self.pathID, dataID, itemInfo.size, sourcePath))