Exemple #1
0
 def run(self):
     """
     Runs a new `Job` from that `Task`.
     Called from `RunTasks()` method if it is possible to start a new task -
     the maximum number of simultaneously running `Jobs` is limited.  
     """
     iter_and_path = backup_fs.WalkByID(self.pathID)
     if iter_and_path is None:
         dhnio.Dprint(4, 'backup_control.Task.run ERROR %s not found in the index' % self.pathID)
         # self.defer.callback('error', self.pathID)
         return
     itemInfo, sourcePath = iter_and_path
     if isinstance(itemInfo, dict):
         try:
             itemInfo = itemInfo[backup_fs.INFO_KEY]
         except:
             dhnio.DprintException()
             return
     if not backup_fs.pathExist(sourcePath):
         dhnio.Dprint(4, 'backup_control.Task.run WARNING path not exist: %s' % sourcePath)
         reactor.callLater(0, OnTaskFailed, self.pathID, 'not exist')
         return
     dataID = misc.NewBackupID()
     if itemInfo.has_version(dataID):
         # ups - we already have same version
         # let's add 1,2,3... to the end to make absolutely unique version ID
         i = 1
         while itemInfo.has_version(dataID+str(i)):
             i += 1
         dataID += str(i)
     backupID = self.pathID + '/' + dataID
     try:
         backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID)
     except:
         dhnio.DprintException()
         dhnio.Dprint(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID)
         # self.defer.callback('error', self.pathID)
         return 
     if backup_fs.pathIsDir(sourcePath):
         backupPipe = backup_tar.backuptar(sourcePath, compress='gz')
     else:    
         backupPipe = backup_tar.backuptarfile(sourcePath, compress='gz')
     backupPipe.make_nonblocking()
     resultDefer = Deferred()
     blockSize = settings.getBackupBlockSize()
     job = backup.backup(backupID, backupPipe, OnJobDone, OnBackupBlockReport, settings.getBackupBlockSize())
     jobs()[backupID] = job
     itemInfo.add_version(dataID)
     if itemInfo.type in [ backup_fs.PARENT, backup_fs.DIR ]:
         dirsize.ask(sourcePath, FoundFolderSize, (self.pathID, dataID))
     # self.defer.callback('started', backupID)
     reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID)
     dhnio.Dprint(4, 'backup_control.Task.run %s [%s], size=%d' % (self.pathID, dataID, itemInfo.size))
Exemple #2
0
def main():
    from system import bpio
    import backup_tar
    import backup_fs
    lg.set_debug_level(24)
    sourcePath = sys.argv[1]
    compress_mode = 'none'  # 'gz'
    backupID = sys.argv[2]
    raid_worker.A('init')
    backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID)
    if bpio.pathIsDir(sourcePath):
        backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode)
    else:
        backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode)
    backupPipe.make_nonblocking()

    def _bk_done(bid, result):
        from crypt import signed
        try:
            os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.out'))
        except:
            pass
        for filename in os.listdir(os.path.join(settings.getLocalBackupsDir(), bid)):
            filepath = os.path.join(settings.getLocalBackupsDir(), bid, filename)
            payld = str(bpio.ReadBinaryFile(filepath))
            newpacket = signed.Packet(
                'Data',
                my_id.getLocalID(),
                my_id.getLocalID(),
                filename,
                payld,
                'http://megafaq.ru/cvps1010.xml')
            newfilepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out', filename)
            bpio.AtomicWriteFile(newfilepath, newpacket.Serialize())
        reactor.stop()
    job = backup(backupID, backupPipe, _bk_done)
    reactor.callLater(1, job.automat, 'start')
    reactor.run()
    def run(self):
        """
        Runs a new ``Job`` from that ``Task``.

        Called from ``RunTasks()`` method if it is possible to start a
        new task - the maximum number of simultaneously running ``Jobs``
        is limited.
        """
        import backup_tar
        import backup
        iter_and_path = backup_fs.WalkByID(self.pathID)
        if iter_and_path is None:
            lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.pathID)
            # self.defer.callback('error', self.pathID)
            return
        itemInfo, sourcePath = iter_and_path
        if isinstance(itemInfo, dict):
            try:
                itemInfo = itemInfo[backup_fs.INFO_KEY]
            except:
                lg.exc()
                return
        if self.localPath and self.localPath != sourcePath:
            lg.warn('local path were changed: %s -> %s' % (self.localPath, sourcePath))
        self.localPath = sourcePath
        if not bpio.pathExist(sourcePath):
            lg.warn('path not exist: %s' % sourcePath)
            reactor.callLater(0, OnTaskFailed, self.pathID, 'not exist')
            return
        dataID = misc.NewBackupID()
        if itemInfo.has_version(dataID):
            # ups - we already have same version
            # let's add 1,2,3... to the end to make absolutely unique version ID
            i = 1
            while itemInfo.has_version(dataID + str(i)):
                i += 1
            dataID += str(i)
        backupID = self.pathID + '/' + dataID
        try:
            backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID)
        except:
            lg.exc()
            lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID)
            # self.defer.callback('error', self.pathID)
            return
        compress_mode = 'bz2'  # 'none' # 'gz'
        if bpio.pathIsDir(sourcePath):
            backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode)
        else:
            backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode)
        backupPipe.make_nonblocking()
        job = backup.backup(
            backupID, backupPipe,
            OnJobDone, OnBackupBlockReport,
            settings.getBackupBlockSize(),
            sourcePath)
        jobs()[backupID] = job
        itemInfo.add_version(dataID)
        if itemInfo.type in [backup_fs.PARENT, backup_fs.DIR]:
            dirsize.ask(sourcePath, OnFoundFolderSize, (self.pathID, dataID))
        else:
            jobs()[backupID].totalSize = os.path.getsize(sourcePath)
        jobs()[backupID].automat('start')
        reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID)
        lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % (
            self.number, self.pathID, dataID, itemInfo.size, sourcePath))