Beispiel #1
0
 def _do_select_archive_snapshots(self):
     iterID_and_path = backup_fs.WalkByID(self.archive_folder_path,
                                          iterID=backup_fs.fsID(
                                              self.queue_owner_idurl,
                                              self.queue_alias))
     if iterID_and_path is None:
         lg.err('did not found archive folder in the catalog: %r' %
                self.archive_folder_path)
         self.automat('restore-failed')
         return False
     iterID, _ = iterID_and_path
     known_archive_snapshots_list = backup_fs.ListAllBackupIDsFull(
         iterID=iterID)
     if not known_archive_snapshots_list:
         lg.err(
             'failed to restore data from archive, no snapshots found in folder: %r'
             % self.archive_folder_path)
         self.automat('restore-failed')
         return False
     snapshots_list = []
     for archive_item in known_archive_snapshots_list:
         snapshots_list.append(archive_item[1])
     if _Debug:
         lg.args(_DebugLevel, snapshots_list=snapshots_list)
     if not snapshots_list:
         lg.err('no available snapshots found in archive list: %r' %
                known_archive_snapshots_list)
         self.automat('restore-failed')
         return False
     snapshot_sequence_ids = []
     for backup_id in snapshots_list:
         _, path_id, _ = packetid.SplitBackupID(backup_id)
         if not path_id:
             continue
         try:
             snapshot_sequence_id = int(path_id.split('/')[-1])
         except:
             lg.exc()
             continue
         if self.start_sequence_id is not None and self.start_sequence_id > snapshot_sequence_id:
             continue
         if self.end_sequence_id is not None and self.end_sequence_id < snapshot_sequence_id:
             continue
         snapshot_sequence_ids.append((
             snapshot_sequence_id,
             backup_id,
         ))
     snapshot_sequence_ids.sort(key=lambda item: int(item[0]))
     if _Debug:
         lg.args(_DebugLevel, snapshot_sequence_ids=snapshot_sequence_ids)
     self.selected_backups = [item[1] for item in snapshot_sequence_ids]
     if not self.selected_backups:
         lg.err('no backups selected from snapshot list')
         self.automat('restore-failed')
         return False
     if _Debug:
         lg.args(_DebugLevel, selected_backups=self.selected_backups)
     return True
Beispiel #2
0
 def doStartRestoreWorker(self, *args, **kwargs):
     """
     Action method.
     """
     iterID_and_path = backup_fs.WalkByID(self.archive_folder_path,
                                          iterID=backup_fs.fsID(
                                              self.queue_owner_idurl))
     if iterID_and_path is None:
         lg.err('did not found archive folder in the catalog: %r' %
                self.archive_folder_path)
         self.automat('restore-failed')
         return
     iterID, path = iterID_and_path
     known_archive_snapshots_list = backup_fs.ListAllBackupIDsFull(
         iterID=iterID)
     if not known_archive_snapshots_list:
         lg.err(
             'failed to restore data from archive, no snapshots found in folder: %r'
             % self.archive_folder_path)
         self.automat('restore-failed')
         return
     snapshots_list = []
     for archive_item in known_archive_snapshots_list:
         snapshots_list.append(archive_item[1])
     if _Debug:
         lg.args(_DebugLevel, snapshots_list=snapshots_list)
     if not snapshots_list:
         lg.err('no available snapshots found in archive list: %r' %
                known_archive_snapshots_list)
         self.automat('restore-failed')
         return
     backupID = snapshots_list[0]
     outfd, outfilename = tmpfile.make(
         'restore',
         extension='.tar.gz',
         prefix=backupID.replace('@', '_').replace('.', '_').replace(
             '/', '_').replace(':', '_') + '_',
     )
     rw = restore_worker.RestoreWorker(backupID,
                                       outfd,
                                       KeyID=self.group_key_id)
     rw.MyDeferred.addCallback(self._on_restore_done, backupID, outfd,
                               outfilename)
     rw.MyDeferred.addErrback(self._on_restore_failed, backupID, outfd,
                              outfilename)
     if _Debug:
         rw.MyDeferred.addErrback(
             lg.errback,
             debug=_Debug,
             debug_level=_DebugLevel,
             method='archive_reader.doStartRestoreWorker')
     rw.automat('init')
Beispiel #3
0
    def run(self):
        """
        Runs a new ``Job`` from that ``Task``.
        """
        iter_and_path = backup_fs.WalkByID(self.remotePath, iterID=backup_fs.fsID(self.customerIDURL))
        if iter_and_path is None:
            lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.remotePath)
            # self.defer.callback('error', self.pathID)
            # self._on_job_failed(self.pathID)
            err = 'remote path "%s" not found in the catalog' % self.remotePath
            OnTaskFailed(self.pathID, err)
            return err
        itemInfo, sourcePath = iter_and_path
        if isinstance(itemInfo, dict):
            try:
                itemInfo = itemInfo[backup_fs.INFO_KEY]
            except:
                lg.exc()
                # self._on_job_failed(self.pathID)
                err = 'catalog item related to "%s" is broken' % self.remotePath
                OnTaskFailed(self.pathID, err)
                return err
        if not self.localPath:
            self.localPath = sourcePath
            lg.out('backup_control.Task.run local path was populated from catalog: %s' % self.localPath)
        if self.localPath != sourcePath:
            lg.warn('local path is differ from catalog: %s != %s' % (self.localPath, sourcePath))
        if not bpio.pathExist(self.localPath):
            lg.warn('path not exist: %s' % self.localPath)
            # self._on_job_failed(self.pathID)
            err = 'local path "%s" not exist' % self.localPath
            OnTaskFailed(self.pathID, err)
            return err
#         if os.path.isfile(self.localPath) and self.localPath != sourcePath:
#             tmpfile.make(name, extension, prefix)
        dataID = misc.NewBackupID()
        if itemInfo.has_version(dataID):
            # ups - we already have same version
            # let's add 1,2,3... to the end to make absolutely unique version ID
            i = 1
            while itemInfo.has_version(dataID + str(i)):
                i += 1
            dataID += str(i)
        self.backupID = packetid.MakeBackupID(
            customer=self.fullCustomerID,
            path_id=self.remotePath,
            version=dataID,
        )
        if self.backupID in jobs():
            lg.warn('backup job %s already started' % self.backupID)
            return 'backup job %s already started' % self.backupID
        try:
            backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), self.backupID)
        except:
            lg.exc()
            lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID)
            # self.defer.callback('error', self.pathID)
            # self._on_job_failed(self.backupID)
            err = 'failed creating destination folder for "%s"' % self.backupID
            return OnTaskFailed(self.backupID, err)
        compress_mode = 'bz2'  # 'none' # 'gz'
        arcname = os.path.basename(sourcePath)
        if bpio.pathIsDir(self.localPath):
            backupPipe = backup_tar.backuptardir(self.localPath, arcname=arcname, compress=compress_mode)
        else:
            backupPipe = backup_tar.backuptarfile(self.localPath, arcname=arcname, compress=compress_mode)
        backupPipe.make_nonblocking()
        job = backup.backup(
            self.backupID,
            backupPipe,
            finishCallback=OnJobDone,
            blockResultCallback=OnBackupBlockReport,
            blockSize=settings.getBackupBlockSize(),
            sourcePath=self.localPath,
            keyID=self.keyID or itemInfo.key_id,
        )
        jobs()[self.backupID] = job
        itemInfo.add_version(dataID)
        if itemInfo.type == backup_fs.DIR:
            dirsize.ask(self.localPath, OnFoundFolderSize, (self.pathID, dataID))
        else:
            sz = os.path.getsize(self.localPath)
            jobs()[self.backupID].totalSize = sz
            itemInfo.set_size(sz)
            backup_fs.Calculate()
            Save()
        jobs()[self.backupID].automat('start')
        reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID)
        lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % (
            self.number, self.pathID, dataID, itemInfo.size, self.localPath))
        return None