Exemple #1
0
def _download(params):
    # localName = params['name']
    backupID = global_id.CanonicalID(params['backupid'])
    destpath = params['dest_path']
    if bpio.Linux() or bpio.Mac():
        destpath = '/' + destpath.lstrip('/')
    restorePath = bpio.portablePath(destpath)
    # overwrite = params['overwrite']
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    pathID = packetid.MakeBackupID(customerGlobalID, remotePath)
    if not customerGlobalID:
        customerGlobalID = my_id.getGlobalID()
    if not packetid.IsCanonicalVersion(version):
        return {'result': {"success": False, "error": "path %s is not valid" % backupID}}
    if not remotePath:
        return {'result': {"success": False, "error": "path %s is not valid" % backupID}}
    if not packetid.Valid(remotePath):
        return {'result': {"success": False, "error": "path %s is not valid" % backupID}}
    if backup_control.IsBackupInProcess(backupID):
        return {'result': {"success": True, "error": None}}
    if backup_control.HasTask(pathID):
        return {'result': {"success": True, "error": None}}
    localPath = backup_fs.ToPath(remotePath)
    if localPath == restorePath:
        restorePath = os.path.dirname(restorePath)

    def _itemRestored(backupID, result):
        customerGlobalID, remotePath, _ = packetid.SplitBackupID(backupID)
        backup_fs.ScanID(remotePath, customer_idurl=global_id.GlobalUserToIDURL(customerGlobalID))
        backup_fs.Calculate()

    restore_monitor.Start(backupID, restorePath, callback=_itemRestored)
    return {'result': {"success": True, "error": None}}
Exemple #2
0
 def __init__(self,
              fileName,
              packetID,
              remoteID,
              ownerID,
              callOnAck=None,
              callOnFail=None):
     self.fileName = fileName
     try:
         self.fileSize = os.path.getsize(os.path.abspath(fileName))
     except:
         lg.exc()
         self.fileSize = 0
     self.packetID = global_id.CanonicalID(packetID)
     parts = global_id.ParseGlobalID(packetID)
     self.customerID = parts['customer']
     self.remotePath = parts['path']
     self.customerIDURL = parts['idurl']
     customerGlobalID, remotePath, versionName, _ = packetid.SplitVersionFilename(
         packetID)
     self.backupID = packetid.MakeBackupID(customerGlobalID, remotePath,
                                           versionName)
     self.remoteID = remoteID
     self.ownerID = ownerID
     self.callOnAck = callOnAck
     self.callOnFail = callOnFail
     self.sendTime = None
     self.ackTime = None
     self.sendTimeout = 10 * 2 * (
         max(int(self.fileSize / settings.SendingSpeedLimit()), 5) + 5
     )  # maximum 5 seconds to get an Ack
     self.result = ''
     self.created = utime.get_sec1970()
     PacketReport('send', self.remoteID, self.packetID, 'init')
Exemple #3
0
 def __init__(self,
              parent, callOnReceived, creatorID, packetID, ownerID, remoteID,
              debug_level=_DebugLevel, log_events=_Debug, log_transitions=_Debug, publish_events=False, **kwargs):
     """
     Builds `file_down()` state machine.
     """
     self.parent = parent
     self.callOnReceived = []
     self.callOnReceived.append(callOnReceived)
     self.creatorID = creatorID
     self.packetID = global_id.CanonicalID(packetID)
     parts = global_id.ParseGlobalID(packetID)
     self.customerID = parts['customer']
     self.remotePath = parts['path']
     self.customerIDURL = parts['idurl']
     customerGlobalID, remotePath, versionName, fileName = packetid.SplitVersionFilename(packetID)
     self.backupID = packetid.MakeBackupID(customerGlobalID, remotePath, versionName)
     self.fileName = fileName
     self.ownerID = ownerID
     self.remoteID = remoteID
     self.requestTime = None
     self.fileReceivedTime = None
     self.requestTimeout = max(30, 2 * int(settings.getBackupBlockSize() / settings.SendingSpeedLimit()))
     self.result = ''
     self.created = utime.get_sec1970()
     super(FileDown, self).__init__(
         name="file_down_%s_%s/%s/%s" % (nameurl.GetName(self.remoteID), remotePath, versionName, fileName),
         state="AT_STARTUP",
         debug_level=debug_level,
         log_events=log_events,
         log_transitions=log_transitions,
         publish_events=publish_events,
         **kwargs
     )
Exemple #4
0
def OnJobDone(backupID, result):
    """
    A callback method fired when backup is finished.

    Here we need to save the index data base.
    """
    from storage import backup_rebuilder
    # from customer import io_throttle
    lg.info('job done [%s] with result "%s", %d more tasks' %
            (backupID, result, len(tasks())))
    jobs().pop(backupID)
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    customer_idurl = global_id.GlobalUserToIDURL(customerGlobalID)
    if result == 'done':
        maxBackupsNum = settings.getBackupsMaxCopies()
        if maxBackupsNum:
            item = backup_fs.GetByID(remotePath,
                                     iterID=backup_fs.fsID(customer_idurl))
            if item:
                versions = item.list_versions(sorted=True, reverse=True)
                if len(versions) > maxBackupsNum:
                    for version in versions[maxBackupsNum:]:
                        item.delete_version(version)
                        backupID = packetid.MakeBackupID(
                            customerGlobalID, remotePath, version)
                        backup_rebuilder.RemoveBackupToWork(backupID)
                        # io_throttle.DeleteBackupRequests(backupID)
                        # io_throttle.DeleteBackupSendings(backupID)
                        # callback.delete_backup_interest(backupID)
                        backup_fs.DeleteLocalBackup(
                            settings.getLocalBackupsDir(), backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
        backup_fs.ScanID(remotePath)
        backup_fs.Calculate()
        Save()
        control.request_update([
            ('pathID', remotePath),
        ])
        # TODO: check used space, if we have over use - stop all tasks immediately
        backup_matrix.RepaintBackup(backupID)
    elif result == 'abort':
        DeleteBackup(backupID)
    if len(tasks()) == 0:
        # do we really need to restart backup_monitor after each backup?
        # if we have a lot tasks started this will produce a lot unneeded actions
        # will be smarter to restart it once we finish all tasks
        # because user will probably leave BitDust working after starting a long running operations
        from storage import backup_monitor
        if _Debug:
            lg.out(
                _DebugLevel,
                'backup_control.OnJobDone restarting backup_monitor() machine because no tasks left'
            )
        backup_monitor.A('restart')
    reactor.callLater(0, RunTask)  # @UndefinedVariable
    reactor.callLater(0, FireTaskFinishedCallbacks, remotePath, version,
                      result)  # @UndefinedVariable
def DeletePathBackups(pathID,
                      removeLocalFilesToo=True,
                      saveDB=True,
                      calculate=True):
    """
    This removes all backups of given path ID
    Doing same operations as ``DeleteBackup()``.
    """
    from . import backup_rebuilder
    from customer import io_throttle
    pathID = global_id.CanonicalID(pathID)
    # get the working item
    customer, remotePath = packetid.SplitPacketID(pathID)
    customer_idurl = global_id.GlobalUserToIDURL(customer)
    item = backup_fs.GetByID(remotePath, iterID=backup_fs.fsID(customer_idurl))
    if item is None:
        return False
    lg.out(8, 'backup_control.DeletePathBackups ' + pathID)
    # this is a list of all known backups of this path
    versions = item.list_versions()
    for version in versions:
        backupID = packetid.MakeBackupID(customer, remotePath, version)
        lg.out(8, '        removing %s' % backupID)
        # abort backup if it just started and is running at the moment
        AbortRunningBackup(backupID)
        # if we requested for files for this backup - we do not need it anymore
        io_throttle.DeleteBackupRequests(backupID)
        io_throttle.DeleteBackupSendings(backupID)
        # remove interests in transport_control
        # callback.delete_backup_interest(backupID)
        # remove local files for this backupID
        if removeLocalFilesToo:
            backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(),
                                        backupID)
        # remove remote info for this backup from the memory
        backup_matrix.EraseBackupRemoteInfo(backupID)
        # also remove local info
        backup_matrix.EraseBackupLocalInfo(backupID)
        # finally remove this backup from the index
        item.delete_version(version)
        # lg.out(8, 'backup_control.DeletePathBackups ' + backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # save the index if needed
    if saveDB:
        Save()
        control.request_update()
    return True
Exemple #6
0
 def __init__(self,
              parent,
              fileName,
              packetID,
              remoteID,
              ownerID,
              callOnAck=None,
              callOnFail=None,
              debug_level=_DebugLevel,
              log_events=_Debug,
              log_transitions=_Debug,
              publish_events=False,
              **kwargs):
     """
     Builds `file_up()` state machine.
     """
     self.parent = parent
     self.fileName = fileName
     try:
         self.fileSize = os.path.getsize(os.path.abspath(fileName))
     except:
         lg.exc()
         self.fileSize = 0
     self.packetID = global_id.CanonicalID(packetID)
     parts = global_id.ParseGlobalID(packetID)
     self.customerID = parts['customer']
     self.remotePath = parts['path']
     self.customerIDURL = parts['idurl']
     customerGlobalID, remotePath, versionName, fileName = packetid.SplitVersionFilename(
         packetID)
     self.backupID = packetid.MakeBackupID(customerGlobalID, remotePath,
                                           versionName)
     self.remoteID = remoteID
     self.ownerID = ownerID
     self.callOnAck = callOnAck
     self.callOnFail = callOnFail
     self.sendTime = None
     self.ackTime = None
     self.sendTimeout = 10 * 2 * (
         max(int(self.fileSize / settings.SendingSpeedLimit()), 5) + 5
     )  # maximum 5 seconds to get an Ack
     self.result = ''
     self.created = utime.get_sec1970()
     super(FileUp,
           self).__init__(name="file_up_%s_%s/%s/%s" % (nameurl.GetName(
               self.remoteID), remotePath, versionName, fileName),
                          state="AT_STARTUP",
                          debug_level=debug_level,
                          log_events=log_events,
                          log_transitions=log_transitions,
                          publish_events=publish_events,
                          **kwargs)
Exemple #7
0
 def __init__(self, callOnReceived, creatorID, packetID, ownerID, remoteID):
     self.callOnReceived = []
     self.callOnReceived.append(callOnReceived)
     self.creatorID = creatorID
     self.packetID = global_id.CanonicalID(packetID)
     parts = global_id.ParseGlobalID(packetID)
     self.customerID = parts['customer']
     self.remotePath = parts['path']
     self.customerIDURL = parts['idurl']
     customerGlobalID, remotePath, versionName, fileName = packetid.SplitVersionFilename(packetID)
     self.backupID = packetid.MakeBackupID(customerGlobalID, remotePath, versionName)
     self.fileName = fileName
     self.ownerID = ownerID
     self.remoteID = remoteID
     self.requestTime = None
     self.fileReceivedTime = None
     self.requestTimeout = max(30, 2 * int(settings.getBackupBlockSize() / settings.SendingSpeedLimit()))
     self.result = ''
     self.created = utime.get_sec1970()
     PacketReport('request', self.remoteID, self.packetID, 'init')
Exemple #8
0
 def _do_start_archive_backup(self):
     local_path = self.local_data_callback(self.queue_id,
                                           self.latest_sequence_id)
     supplier_path_id = os.path.join(self.archive_folder_path,
                                     strng.to_text(self.latest_sequence_id))
     dataID = misc.NewBackupID()
     backup_id = packetid.MakeBackupID(
         customer=self.queue_owner_id,
         path_id=supplier_path_id,
         key_alias=self.queue_alias,
         version=dataID,
     )
     backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backup_id)
     if bpio.Android():
         compress_mode = 'none'
     else:
         compress_mode = 'bz2'
     arcname = os.path.basename(local_path)
     backupPipe = backup_tar.backuptarfile_thread(local_path,
                                                  arcname=arcname,
                                                  compress=compress_mode)
     self.backup_job = backup.backup(
         backupID=backup_id,
         pipe=backupPipe,
         blockResultCallback=self._on_archive_backup_block_result,
         finishCallback=self._on_archive_backup_done,
         blockSize=1024 * 1024 * 10,
         sourcePath=local_path,
         keyID=self.group_key_id,
         ecc_map=eccmap.eccmap(self.ecc_map),
         creatorIDURL=self.queue_owner_idurl,
     )
     self.backup_job.automat('start')
     if _Debug:
         lg.args(_DebugLevel,
                 job=self.backup_job,
                 backup_id=backup_id,
                 local_path=local_path,
                 group_key_id=self.group_key_id)
Exemple #9
0
def OnFoundFolderSize(pth, sz, arg):
    """
    This is a callback, fired from ``lib.dirsize.ask()`` method after finish
    calculating of folder size.
    """
    try:
        pathID, version = arg
        customerGlobID, pathID = packetid.SplitPacketID(pathID)
        customerIDURL = global_id.GlobalUserToIDURL(customerGlobID)
        item = backup_fs.GetByID(pathID, iterID=backup_fs.fsID(customerIDURL))
        if item:
            item.set_size(sz)
            backup_fs.Calculate()
            Save()
        if version:
            backupID = packetid.MakeBackupID(customerGlobID, pathID, version)
            job = GetRunningBackupObject(backupID)
            if job:
                job.totalSize = sz
        if _Debug:
            lg.out(_DebugLevel, 'backup_control.OnFoundFolderSize %s %d' % (backupID, sz))
    except:
        lg.exc()
Exemple #10
0
 def doCleanUpBackups(self, *args, **kwargs):
     # here we check all backups we have and remove the old one
     # user can set how many versions of that file or folder to keep
     # other versions (older) will be removed here
     from storage import backup_rebuilder
     try:
         self.backups_progress_last_iteration = len(
             backup_rebuilder.A().backupsWasRebuilt)
     except:
         self.backups_progress_last_iteration = 0
     versionsToKeep = settings.getBackupsMaxCopies()
     if not contactsdb.num_suppliers():
         bytesUsed = 0
     else:
         bytesUsed = backup_fs.sizebackups() / contactsdb.num_suppliers()
     bytesNeeded = diskspace.GetBytesFromString(settings.getNeededString(),
                                                0)
     customerGlobID = my_id.getGlobalID()
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_monitor.doCleanUpBackups backupsToKeep=%d used=%d needed=%d'
             % (versionsToKeep, bytesUsed, bytesNeeded))
     delete_count = 0
     if versionsToKeep > 0:
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             pathID = global_id.CanonicalID(pathID)
             if backup_control.IsPathInProcess(pathID):
                 continue
             versions = itemInfo.list_versions()
             # TODO: do we need to sort the list? it comes from a set, so must be sorted may be
             while len(versions) > versionsToKeep:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  versions.pop(0))
                 if _Debug:
                     lg.out(
                         _DebugLevel,
                         'backup_monitor.doCleanUpBackups %d of %d backups for %s, so remove older %s'
                         % (len(versions), versionsToKeep, localPath,
                            backupID))
                 backup_control.DeleteBackup(backupID,
                                             saveDB=False,
                                             calculate=False)
                 delete_count += 1
     # we need also to fit used space into needed space (given from other users)
     # they trust us - do not need to take extra space from our friends
     # so remove oldest backups, but keep at least one for every folder - at least locally!
     # still our suppliers will remove our "extra" files by their "local_tester"
     if bytesNeeded <= bytesUsed:
         sizeOk = False
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             if sizeOk:
                 break
             pathID = global_id.CanonicalID(pathID)
             versions = itemInfo.list_versions(True, False)
             if len(versions) <= 1:
                 continue
             for version in versions[1:]:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  version)
                 versionInfo = itemInfo.get_version_info(version)
                 if versionInfo[1] > 0:
                     if _Debug:
                         lg.out(
                             _DebugLevel,
                             'backup_monitor.doCleanUpBackups over use %d of %d, so remove %s of %s'
                             %
                             (bytesUsed, bytesNeeded, backupID, localPath))
                     backup_control.DeleteBackup(backupID,
                                                 saveDB=False,
                                                 calculate=False)
                     delete_count += 1
                     bytesUsed -= versionInfo[1]
                     if bytesNeeded > bytesUsed:
                         sizeOk = True
                         break
     if delete_count > 0:
         backup_fs.Scan()
         backup_fs.Calculate()
         backup_control.Save()
         from main import control
         control.request_update()
     collected = gc.collect()
     if self.backups_progress_last_iteration > 0:
         if _Debug:
             lg.out(
                 _DebugLevel,
                 'backup_monitor.doCleanUpBackups  sending "restart", backups_progress_last_iteration=%s'
                 % self.backups_progress_last_iteration)
         reactor.callLater(1, self.automat, 'restart')  # @UndefinedVariable
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_monitor.doCleanUpBackups collected %d objects' %
             collected)
Exemple #11
0
    def run(self):
        """
        Runs a new ``Job`` from that ``Task``.
        """
        iter_and_path = backup_fs.WalkByID(self.remotePath, iterID=backup_fs.fsID(self.customerIDURL))
        if iter_and_path is None:
            lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.remotePath)
            # self.defer.callback('error', self.pathID)
            # self._on_job_failed(self.pathID)
            err = 'remote path "%s" not found in the catalog' % self.remotePath
            OnTaskFailed(self.pathID, err)
            return err
        itemInfo, sourcePath = iter_and_path
        if isinstance(itemInfo, dict):
            try:
                itemInfo = itemInfo[backup_fs.INFO_KEY]
            except:
                lg.exc()
                # self._on_job_failed(self.pathID)
                err = 'catalog item related to "%s" is broken' % self.remotePath
                OnTaskFailed(self.pathID, err)
                return err
        if not self.localPath:
            self.localPath = sourcePath
            lg.out('backup_control.Task.run local path was populated from catalog: %s' % self.localPath)
        if self.localPath != sourcePath:
            lg.warn('local path is differ from catalog: %s != %s' % (self.localPath, sourcePath))
        if not bpio.pathExist(self.localPath):
            lg.warn('path not exist: %s' % self.localPath)
            # self._on_job_failed(self.pathID)
            err = 'local path "%s" not exist' % self.localPath
            OnTaskFailed(self.pathID, err)
            return err
#         if os.path.isfile(self.localPath) and self.localPath != sourcePath:
#             tmpfile.make(name, extension, prefix)
        dataID = misc.NewBackupID()
        if itemInfo.has_version(dataID):
            # ups - we already have same version
            # let's add 1,2,3... to the end to make absolutely unique version ID
            i = 1
            while itemInfo.has_version(dataID + str(i)):
                i += 1
            dataID += str(i)
        self.backupID = packetid.MakeBackupID(
            customer=self.fullCustomerID,
            path_id=self.remotePath,
            version=dataID,
        )
        if self.backupID in jobs():
            lg.warn('backup job %s already started' % self.backupID)
            return 'backup job %s already started' % self.backupID
        try:
            backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), self.backupID)
        except:
            lg.exc()
            lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID)
            # self.defer.callback('error', self.pathID)
            # self._on_job_failed(self.backupID)
            err = 'failed creating destination folder for "%s"' % self.backupID
            return OnTaskFailed(self.backupID, err)
        compress_mode = 'bz2'  # 'none' # 'gz'
        arcname = os.path.basename(sourcePath)
        if bpio.pathIsDir(self.localPath):
            backupPipe = backup_tar.backuptardir(self.localPath, arcname=arcname, compress=compress_mode)
        else:
            backupPipe = backup_tar.backuptarfile(self.localPath, arcname=arcname, compress=compress_mode)
        backupPipe.make_nonblocking()
        job = backup.backup(
            self.backupID,
            backupPipe,
            finishCallback=OnJobDone,
            blockResultCallback=OnBackupBlockReport,
            blockSize=settings.getBackupBlockSize(),
            sourcePath=self.localPath,
            keyID=self.keyID or itemInfo.key_id,
        )
        jobs()[self.backupID] = job
        itemInfo.add_version(dataID)
        if itemInfo.type == backup_fs.DIR:
            dirsize.ask(self.localPath, OnFoundFolderSize, (self.pathID, dataID))
        else:
            sz = os.path.getsize(self.localPath)
            jobs()[self.backupID].totalSize = sz
            itemInfo.set_size(sz)
            backup_fs.Calculate()
            Save()
        jobs()[self.backupID].automat('start')
        reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID)
        lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % (
            self.number, self.pathID, dataID, itemInfo.size, self.localPath))
        return None