def _upload(params): path = params['path'] if bpio.Linux() or bpio.Mac(): path = '/' + (path.lstrip('/')) localPath = unicode(path) if not bpio.pathExist(localPath): return { 'result': { "success": False, "error": 'local path %s was not found' % path } } result = [] pathID = backup_fs.ToID(localPath) if pathID is None: if bpio.pathIsDir(localPath): pathID, iter, iterID = backup_fs.AddDir(localPath, read_stats=True) result.append('new folder was added: %s' % localPath) else: pathID, iter, iterID = backup_fs.AddFile(localPath, read_stats=True) result.append('new file was added: %s' % localPath) pathID = global_id.CanonicalID(pathID) backup_control.StartSingle(pathID=pathID, localPath=localPath) backup_fs.Calculate() backup_control.Save() control.request_update([ ('pathID', pathID), ]) result.append('backup started: %s' % pathID) return { 'result': result, }
def _upload(params): path = params['path'] if bpio.Linux() or bpio.Mac(): path = '/' + (path.lstrip('/')) localPath = unicode(path) if not bpio.pathExist(localPath): return {'result': {"success": False, "error": 'local path %s was not found' % path}} result = [] pathID = backup_fs.ToID(localPath) if pathID is None: if bpio.pathIsDir(localPath): pathID, iter, iterID = backup_fs.AddDir(localPath, True) result.append('new folder was added: %s' % localPath) else: pathID, iter, iterID = backup_fs.AddFile(localPath, True) result.append('new file was added: %s' % localPath) backup_control.StartSingle(pathID, localPath) backup_fs.Calculate() backup_control.Save() control.request_update([('pathID', pathID), ]) result.append('backup started: %s' % pathID) return {'result': result, }
def run(self): """ Runs a new ``Job`` from that ``Task``. """ iter_and_path = backup_fs.WalkByID(self.remotePath, iterID=backup_fs.fsID(self.customerIDURL)) if iter_and_path is None: lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.remotePath) # self.defer.callback('error', self.pathID) # self._on_job_failed(self.pathID) err = 'remote path "%s" not found in the catalog' % self.remotePath OnTaskFailed(self.pathID, err) return err itemInfo, sourcePath = iter_and_path if isinstance(itemInfo, dict): try: itemInfo = itemInfo[backup_fs.INFO_KEY] except: lg.exc() # self._on_job_failed(self.pathID) err = 'catalog item related to "%s" is broken' % self.remotePath OnTaskFailed(self.pathID, err) return err if not self.localPath: self.localPath = sourcePath lg.out('backup_control.Task.run local path was populated from catalog: %s' % self.localPath) if self.localPath != sourcePath: lg.warn('local path is differ from catalog: %s != %s' % (self.localPath, sourcePath)) if not bpio.pathExist(self.localPath): lg.warn('path not exist: %s' % self.localPath) # self._on_job_failed(self.pathID) err = 'local path "%s" not exist' % self.localPath OnTaskFailed(self.pathID, err) return err # if os.path.isfile(self.localPath) and self.localPath != sourcePath: # tmpfile.make(name, extension, prefix) dataID = misc.NewBackupID() if itemInfo.has_version(dataID): # ups - we already have same version # let's add 1,2,3... to the end to make absolutely unique version ID i = 1 while itemInfo.has_version(dataID + str(i)): i += 1 dataID += str(i) self.backupID = packetid.MakeBackupID( customer=self.fullCustomerID, path_id=self.remotePath, version=dataID, ) if self.backupID in jobs(): lg.warn('backup job %s already started' % self.backupID) return 'backup job %s already started' % self.backupID try: backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), self.backupID) except: lg.exc() lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID) # self.defer.callback('error', self.pathID) # self._on_job_failed(self.backupID) err = 'failed creating destination folder for "%s"' % self.backupID return OnTaskFailed(self.backupID, err) compress_mode = 'bz2' # 'none' # 'gz' arcname = os.path.basename(sourcePath) if bpio.pathIsDir(self.localPath): backupPipe = backup_tar.backuptardir(self.localPath, arcname=arcname, compress=compress_mode) else: backupPipe = backup_tar.backuptarfile(self.localPath, arcname=arcname, compress=compress_mode) backupPipe.make_nonblocking() job = backup.backup( self.backupID, backupPipe, finishCallback=OnJobDone, blockResultCallback=OnBackupBlockReport, blockSize=settings.getBackupBlockSize(), sourcePath=self.localPath, keyID=self.keyID or itemInfo.key_id, ) jobs()[self.backupID] = job itemInfo.add_version(dataID) if itemInfo.type == backup_fs.DIR: dirsize.ask(self.localPath, OnFoundFolderSize, (self.pathID, dataID)) else: sz = os.path.getsize(self.localPath) jobs()[self.backupID].totalSize = sz itemInfo.set_size(sz) backup_fs.Calculate() Save() jobs()[self.backupID].automat('start') reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID) lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % ( self.number, self.pathID, dataID, itemInfo.size, self.localPath)) return None
def run(self): """ Runs a new ``Job`` from that ``Task``. Called from ``RunTasks()`` method if it is possible to start a new task - the maximum number of simultaneously running ``Jobs`` is limited. """ import backup_tar import backup iter_and_path = backup_fs.WalkByID(self.pathID) if iter_and_path is None: lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.pathID) # self.defer.callback('error', self.pathID) return itemInfo, sourcePath = iter_and_path if isinstance(itemInfo, dict): try: itemInfo = itemInfo[backup_fs.INFO_KEY] except: lg.exc() return if self.localPath and self.localPath != sourcePath: lg.warn('local path were changed: %s -> %s' % (self.localPath, sourcePath)) self.localPath = sourcePath if not bpio.pathExist(sourcePath): lg.warn('path not exist: %s' % sourcePath) reactor.callLater(0, OnTaskFailed, self.pathID, 'not exist') return dataID = misc.NewBackupID() if itemInfo.has_version(dataID): # ups - we already have same version # let's add 1,2,3... to the end to make absolutely unique version ID i = 1 while itemInfo.has_version(dataID + str(i)): i += 1 dataID += str(i) backupID = self.pathID + '/' + dataID try: backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID) except: lg.exc() lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID) # self.defer.callback('error', self.pathID) return compress_mode = 'bz2' # 'none' # 'gz' if bpio.pathIsDir(sourcePath): backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode) else: backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode) backupPipe.make_nonblocking() job = backup.backup( backupID, backupPipe, OnJobDone, OnBackupBlockReport, settings.getBackupBlockSize(), sourcePath) jobs()[backupID] = job itemInfo.add_version(dataID) if itemInfo.type in [backup_fs.PARENT, backup_fs.DIR]: dirsize.ask(sourcePath, OnFoundFolderSize, (self.pathID, dataID)) else: jobs()[backupID].totalSize = os.path.getsize(sourcePath) jobs()[backupID].automat('start') reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID) lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % ( self.number, self.pathID, dataID, itemInfo.size, sourcePath))