コード例 #1
0
 def doScanExistingPackets(self, *args, **kwargs):
     """
     Action method.
     """
     for SupplierNumber in range(self.EccMap.datasegments):
         PacketID = packetid.MakePacketID(self.backup_id, self.block_number, SupplierNumber, 'Data')
         customerID, remotePath = packetid.SplitPacketID(PacketID)
         self.OnHandData[SupplierNumber] = bool(os.path.exists(os.path.join(
             settings.getLocalBackupsDir(), customerID, remotePath)))
     for SupplierNumber in range(self.EccMap.paritysegments):
         PacketID = packetid.MakePacketID(self.backup_id, self.block_number, SupplierNumber, 'Parity')
         customerID, remotePath = packetid.SplitPacketID(PacketID)
         self.OnHandParity[SupplierNumber] = bool(os.path.exists(os.path.join(
             settings.getLocalBackupsDir(), customerID, remotePath)))
コード例 #2
0
ファイル: restore.py プロジェクト: hack-bitdust/devel
 def doScanExistingPackets(self, arg):
     for SupplierNumber in range(self.EccMap.datasegments):
         PacketID = packetid.MakePacketID(self.BackupID, self.BlockNumber,
                                          SupplierNumber, 'Data')
         customer, remotePath = packetid.SplitPacketID(PacketID)
         self.OnHandData[SupplierNumber] = os.path.exists(
             os.path.join(settings.getLocalBackupsDir(), customer,
                          remotePath))
     for SupplierNumber in range(self.EccMap.paritysegments):
         PacketID = packetid.MakePacketID(self.BackupID, self.BlockNumber,
                                          SupplierNumber, 'Parity')
         customer, remotePath = packetid.SplitPacketID(PacketID)
         self.OnHandParity[SupplierNumber] = os.path.exists(
             os.path.join(settings.getLocalBackupsDir(), customer,
                          remotePath))
コード例 #3
0
ファイル: backup.py プロジェクト: vesellov/bitdust.devel
 def doBlockPushAndRaid(self, arg):
     """
     Action method.
     """
     newblock = arg
     if self.terminating:
         self.automat('block-raid-done', (newblock.BlockNumber, None))
         lg.out(_DebugLevel, 'backup.doBlockPushAndRaid SKIP, terminating=True')
         return
     fileno, filename = tmpfile.make('raid')
     serializedblock = newblock.Serialize()
     blocklen = len(serializedblock)
     os.write(fileno, str(blocklen) + ":" + serializedblock)
     os.close(fileno)
     self.workBlocks[newblock.BlockNumber] = filename
     dt = time.time()
     outputpath = os.path.join(settings.getLocalBackupsDir(), self.backupID)
     task_params = (filename, self.eccmap.name, self.backupID, newblock.BlockNumber, outputpath)
     raid_worker.add_task('make', task_params,
                          lambda cmd, params, result: self._raidmakeCallback(params, result, dt),)
     self.automat('block-raid-started', newblock)
     del serializedblock
     if _Debug:
         lg.out(_DebugLevel, 'backup.doBlockPushAndRaid %s : start process data from %s to %s, %d' % (
             newblock.BlockNumber, filename, outputpath, id(self.terminating)))
コード例 #4
0
 def QueueRequestFile(self, callOnReceived, creatorID, packetID, ownerID,
                      remoteID):
     # make sure that we don't actually already have the file
     # if packetID != settings.BackupInfoFileName():
     remoteID = id_url.field(remoteID)
     ownerID = id_url.field(ownerID)
     creatorID = id_url.field(creatorID)
     if packetID not in [
             settings.BackupInfoFileName(),
             settings.BackupInfoFileNameOld(),
             settings.BackupInfoEncryptedFileName(),
     ]:
         customer, pathID = packetid.SplitPacketID(packetID)
         filename = os.path.join(settings.getLocalBackupsDir(), customer,
                                 pathID)
         if os.path.exists(filename):
             lg.warn("%s already exist " % filename)
             if callOnReceived:
                 reactor.callLater(0, callOnReceived, packetID,
                                   'exist')  # @UndefinedVariable
             return False
     if remoteID not in list(self.supplierQueues.keys()):
         # made a new queue for this man
         self.supplierQueues[remoteID] = SupplierQueue(
             remoteID, self.creatorID)
         lg.info("made a new receiving queue for %s" %
                 nameurl.GetName(remoteID))
     # lg.out(10, "io_throttle.QueueRequestFile asking for %s from %s" % (packetID, nameurl.GetName(remoteID)))
     return self.supplierQueues[remoteID].SupplierRequestFile(
         callOnReceived, creatorID, packetID, ownerID)
コード例 #5
0
 def _start_one_block(self):
     from storage import backup_matrix
     if self.blockIndex < 0:
         if _Debug:
             lg.out(
                 _DebugLevel,
                 'backup_rebuilder._start_one_block finish all blocks blockIndex=%d'
                 % self.blockIndex)
         reactor.callLater(0, self._finish_rebuilding)  # @UndefinedVariable
         return
     BlockNumber = self.workingBlocksQueue[self.blockIndex]
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_rebuilder._start_one_block %d to rebuild, blockIndex=%d, other blocks: %s'
             % (
                 BlockNumber,
                 self.blockIndex,
                 str(self.workingBlocksQueue),
             ))
     task_params = (
         self.currentBackupID,
         BlockNumber,
         eccmap.Current().name,
         backup_matrix.GetActiveArray(),
         backup_matrix.GetRemoteMatrix(self.currentBackupID, BlockNumber),
         backup_matrix.GetLocalMatrix(self.currentBackupID, BlockNumber),
         settings.getLocalBackupsDir(),
     )
     raid_worker.add_task(
         'rebuild', task_params,
         lambda cmd, params, result: self._block_finished(result, params))
コード例 #6
0
 def doSavePacket(self, *args, **kwargs):
     """
     Action method.
     """
     if not args or not args[0]:
         raise Exception('no input found')
     NewPacket, PacketID = args[0]
     glob_path = global_id.ParseGlobalID(PacketID, detect_version=True)
     packetID = global_id.CanonicalID(PacketID)
     customer_id, _, _, _, SupplierNumber, dataORparity = packetid.SplitFull(packetID)
     if dataORparity == 'Data':
         self.OnHandData[SupplierNumber] = True
     elif dataORparity == 'Parity':
         self.OnHandParity[SupplierNumber] = True
     if not NewPacket:
         lg.warn('packet %r already exists locally' % packetID)
         return
     filename = os.path.join(settings.getLocalBackupsDir(), customer_id, glob_path['path'])
     dirpath = os.path.dirname(filename)
     if not os.path.exists(dirpath):
         try:
             bpio._dirs_make(dirpath)
         except:
             lg.exc()
     # either way the payload of packet is saved
     if not bpio.WriteBinaryFile(filename, NewPacket.Payload):
         lg.err("unable to write to %s" % filename)
         return
     if self.packetInCallback is not None:
         self.packetInCallback(self.backup_id, NewPacket)
     if _Debug:
         lg.out(_DebugLevel, "restore_worker.doSavePacket %s saved to %s" % (packetID, filename))
コード例 #7
0
ファイル: restore.py プロジェクト: HandsomeJeff/bitdust-io
 def doSavePacket(self, NewPacket):
     glob_path = global_id.ParseGlobalID(NewPacket.PacketID,
                                         detect_version=True)
     packetID = global_id.CanonicalID(NewPacket.PacketID)
     customer_id, _, _, _, SupplierNumber, dataORparity = packetid.SplitFull(
         packetID)
     if dataORparity == 'Data':
         self.OnHandData[SupplierNumber] = True
     elif NewPacket.DataOrParity() == 'Parity':
         self.OnHandParity[SupplierNumber] = True
     filename = os.path.join(settings.getLocalBackupsDir(), customer_id,
                             glob_path['path'])
     dirpath = os.path.dirname(filename)
     if not os.path.exists(dirpath):
         try:
             bpio._dirs_make(dirpath)
         except:
             lg.exc()
     # either way the payload of packet is saved
     if not bpio.WriteFile(filename, NewPacket.Payload):
         lg.warn("unable to write to %s" % filename)
         return
     if self.packetInCallback is not None:
         self.packetInCallback(self.BackupID, NewPacket)
     lg.out(6, "restore.doSavePacket %s saved to %s" % (packetID, filename))
コード例 #8
0
ファイル: accounting.py プロジェクト: vesellov/bitdust.devel
def report_local_storage():
    # TODO
    # if customers folder placed outside of BaseDir()
    # need to add: total = total + customers
    r = {}
    r['backups'] = bpio.getDirectorySize(settings.getLocalBackupsDir())
    r['backups_str'] = diskspace.MakeStringFromBytes(r['backups'])
    r['temp'] = bpio.getDirectorySize(settings.getTempDir())
    r['temp_str'] = diskspace.MakeStringFromBytes(r['temp'])
    r['customers'] = bpio.getDirectorySize(settings.getCustomersFilesDir())
    r['customers_str'] = diskspace.MakeStringFromBytes(r['customers'])
    r['total'] = bpio.getDirectorySize(settings.GetBaseDir())
    r['total_str'] = diskspace.MakeStringFromBytes(r['total'])
    dataDriveFreeSpace, dataDriveTotalSpace = diskusage.GetDriveSpace(settings.getCustomersFilesDir())
    if dataDriveFreeSpace is None:
        dataDriveFreeSpace = 0
    r['disktotal'] = int(dataDriveTotalSpace)
    r['disktotal_str'] = diskspace.MakeStringFromBytes(r['disktotal'])
    r['diskfree'] = int(dataDriveFreeSpace)
    r['diskfree_str'] = diskspace.MakeStringFromBytes(r['diskfree'])
    try:
        r['total_percent'] = misc.percent2string(float(r['total']) / float(r['disktotal']), 5)
    except:
        r['total_percent'] = ''
    try:
        r['diskfree_percent'] = misc.percent2string(float(r['diskfree']) / float(r['disktotal']), 5)
    except:
        r['diskfree_percent'] = ''
    return r
コード例 #9
0
ファイル: accounting.py プロジェクト: vesellov/devel
def report_local_storage():
    # TODO
    # if customers folder placed outside of BaseDir()
    # need to add: total = total + customers
    r = {}
    r['backups'] = bpio.getDirectorySize(settings.getLocalBackupsDir())
    # r['backups_str'] = diskspace.MakeStringFromBytes(r['backups'])
    r['temp'] = bpio.getDirectorySize(settings.getTempDir())
    # r['temp_str'] = diskspace.MakeStringFromBytes(r['temp'])
    r['customers'] = bpio.getDirectorySize(settings.getCustomersFilesDir())
    # r['customers_str'] = diskspace.MakeStringFromBytes(r['customers'])
    r['total'] = bpio.getDirectorySize(settings.BaseDir())
    # r['total_str'] = diskspace.MakeStringFromBytes(r['total'])
    dataDriveFreeSpace, dataDriveTotalSpace = diskusage.GetDriveSpace(
        settings.getCustomersFilesDir())
    if dataDriveFreeSpace is None:
        dataDriveFreeSpace = 0
    r['disktotal'] = int(dataDriveTotalSpace)
    # r['disktotal_str'] = diskspace.MakeStringFromBytes(r['disktotal'])
    r['diskfree'] = int(dataDriveFreeSpace)
    # r['diskfree_str'] = diskspace.MakeStringFromBytes(r['diskfree'])
    try:
        r['total_percent'] = misc.value2percent(float(r['total']),
                                                float(r['disktotal']), 5)
    except:
        r['total_percent'] = ''
    try:
        r['diskfree_percent'] = misc.value2percent(float(r['diskfree']),
                                                   float(r['disktotal']), 5)
    except:
        r['diskfree_percent'] = ''
    return r
コード例 #10
0
def _delete(params):
    # localPath = params['path'].lstrip('/')
    pathID = params['id']
    if not packetid.Valid(pathID):
        return {
            'result': {
                "success": False,
                "error": "path %s is not valid" % pathID
            }
        }
    if not backup_fs.ExistsID(pathID):
        return {
            'result': {
                "success": False,
                "error": "path %s not found" % pathID
            }
        }
    backup_control.DeletePathBackups(pathID, saveDB=False, calculate=False)
    backup_fs.DeleteLocalDir(settings.getLocalBackupsDir(), pathID)
    backup_fs.DeleteByID(pathID)
    backup_fs.Scan()
    backup_fs.Calculate()
    backup_control.Save()
    control.request_update([
        ('pathID', pathID),
    ])
    backup_monitor.A('restart')
    return {'result': {"success": True, "error": None}}
コード例 #11
0
def OnJobDone(backupID, result):
    """
    A callback method fired when backup is finished.

    Here we need to save the index data base.
    """
    from storage import backup_rebuilder
    # from customer import io_throttle
    lg.info('job done [%s] with result "%s", %d more tasks' %
            (backupID, result, len(tasks())))
    jobs().pop(backupID)
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    customer_idurl = global_id.GlobalUserToIDURL(customerGlobalID)
    if result == 'done':
        maxBackupsNum = settings.getBackupsMaxCopies()
        if maxBackupsNum:
            item = backup_fs.GetByID(remotePath,
                                     iterID=backup_fs.fsID(customer_idurl))
            if item:
                versions = item.list_versions(sorted=True, reverse=True)
                if len(versions) > maxBackupsNum:
                    for version in versions[maxBackupsNum:]:
                        item.delete_version(version)
                        backupID = packetid.MakeBackupID(
                            customerGlobalID, remotePath, version)
                        backup_rebuilder.RemoveBackupToWork(backupID)
                        # io_throttle.DeleteBackupRequests(backupID)
                        # io_throttle.DeleteBackupSendings(backupID)
                        # callback.delete_backup_interest(backupID)
                        backup_fs.DeleteLocalBackup(
                            settings.getLocalBackupsDir(), backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
        backup_fs.ScanID(remotePath)
        backup_fs.Calculate()
        Save()
        control.request_update([
            ('pathID', remotePath),
        ])
        # TODO: check used space, if we have over use - stop all tasks immediately
        backup_matrix.RepaintBackup(backupID)
    elif result == 'abort':
        DeleteBackup(backupID)
    if len(tasks()) == 0:
        # do we really need to restart backup_monitor after each backup?
        # if we have a lot tasks started this will produce a lot unneeded actions
        # will be smarter to restart it once we finish all tasks
        # because user will probably leave BitDust working after starting a long running operations
        from storage import backup_monitor
        if _Debug:
            lg.out(
                _DebugLevel,
                'backup_control.OnJobDone restarting backup_monitor() machine because no tasks left'
            )
        backup_monitor.A('restart')
    reactor.callLater(0, RunTask)  # @UndefinedVariable
    reactor.callLater(0, FireTaskFinishedCallbacks, remotePath, version,
                      result)  # @UndefinedVariable
コード例 #12
0
def DeleteBackup(backupID,
                 removeLocalFilesToo=True,
                 saveDB=True,
                 calculate=True):
    """
    This removes a single backup ID completely. Perform several operations:

    1) abort backup if it just started and is running at the moment
    2) if we requested for files for this backup we do not need it anymore - remove 'Data' requests
    3) remove interests in transport_control, see ``lib.transport_control.DeleteBackupInterest()``
    4) remove that ID from the index data base
    5) remove local files for this backup ID
    6) remove all remote info for this backup from the memory, see ``p2p.backup_matrix.EraseBackupRemoteInfo()``
    7) also remove local info from memory, see ``p2p.backup_matrix.EraseBackupLocalInfo()``
    8) stop any rebuilding, we will restart it soon
    9) check and calculate used space
    10) save the modified index data base, soon it will be synchronized with "index_synchronizer()" state machine
    """
    backupID = global_id.CanonicalID(backupID)
    # if the user deletes a backup, make sure we remove any work we're doing on it
    # abort backup if it just started and is running at the moment
    if AbortRunningBackup(backupID):
        lg.out(
            8, 'backup_control.DeleteBackup %s is in process, stopping' %
            backupID)
        return True
    from customer import io_throttle
    from . import backup_rebuilder
    lg.out(8, 'backup_control.DeleteBackup ' + backupID)
    # if we requested for files for this backup - we do not need it anymore
    io_throttle.DeleteBackupRequests(backupID)
    io_throttle.DeleteBackupSendings(backupID)
    # remove interests in transport_control
    # callback.delete_backup_interest(backupID)
    # mark it as being deleted in the db, well... just remove it from the index now
    if not backup_fs.DeleteBackupID(backupID):
        return False
    # finally remove local files for this backupID
    if removeLocalFilesToo:
        backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
    # remove all remote info for this backup from the memory
    backup_matrix.EraseBackupRemoteInfo(backupID)
    # also remove local info
    backup_matrix.EraseBackupLocalInfo(backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # in some cases we want to save the DB later
    if saveDB:
        Save()
        control.request_update([
            ('backupID', backupID),
        ])
    return True
コード例 #13
0
ファイル: backup.py プロジェクト: vesellov/bitdust.devel
 def _bk_done(bid, result):
     from crypt import signed
     try:
         os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.out'))
     except:
         pass
     for filename in os.listdir(os.path.join(settings.getLocalBackupsDir(), bid)):
         filepath = os.path.join(settings.getLocalBackupsDir(), bid, filename)
         payld = str(bpio.ReadBinaryFile(filepath))
         newpacket = signed.Packet(
             'Data',
             my_id.getLocalID(),
             my_id.getLocalID(),
             filename,
             payld,
             'http://megafaq.ru/cvps1010.xml')
         newfilepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out', filename)
         bpio.AtomicWriteFile(newfilepath, newpacket.Serialize())
     reactor.stop()
コード例 #14
0
 def _bk_done(bid, result):
     from crypt import signed
     customer, remotePath = packetid.SplitPacketID(bid)
     try:
         os.mkdir(os.path.join(settings.getLocalBackupsDir(), customer, remotePath + '.out'))
     except:
         pass
     for filename in os.listdir(os.path.join(settings.getLocalBackupsDir(), customer, remotePath)):
         filepath = os.path.join(settings.getLocalBackupsDir(), customer, remotePath, filename)
         payld = bpio.ReadBinaryFile(filepath)
         newpacket = signed.Packet(
             'Data',
             my_id.getLocalID(),
             my_id.getLocalID(),
             filename,
             payld,
             'http://megafaq.ru/cvps1010.xml')
         newfilepath = os.path.join(settings.getLocalBackupsDir(), customer, remotePath + '.out', filename)
         bpio.WriteBinaryFile(newfilepath, newpacket.Serialize())
     reactor.stop()
コード例 #15
0
def ReadLocalFiles():
    """
    This method scans local backups and build the whole "local" matrix.
    """
    global _LocalFilesNotifyCallback
    local_files().clear()
    local_max_block_numbers().clear()
    local_backup_size().clear()
    _counter = [0]

    def visit(realpath, subpath, name):
        # subpath is something like 0/0/1/0/F20131120053803PM/0-1-Data
        if not os.path.isfile(realpath):
            return True
        if realpath.startswith("newblock-"):
            return False
        if subpath in [
            settings.BackupIndexFileName(),
            settings.BackupInfoFileName(),
            settings.BackupInfoFileNameOld(),
            settings.BackupInfoEncryptedFileName(),
        ]:
            return False
        try:
            version = subpath.split("/")[-2]
        except:
            return False
        if not packetid.IsCanonicalVersion(version):
            return True
        LocalFileReport(packetID=subpath)
        _counter[0] += 1
        return False

    bpio.traverse_dir_recursive(visit, settings.getLocalBackupsDir())
    lg.out(8, "backup_matrix.ReadLocalFiles  %d files indexed" % _counter[0])
    if lg.is_debug(8):
        try:
            if sys.version_info >= (2, 6):
                # localSZ = sys.getsizeof(local_files())
                # remoteSZ = sys.getsizeof(remote_files())
                import lib.getsizeof

                localSZ = lib.getsizeof.total_size(local_files())
                remoteSZ = lib.getsizeof.total_size(remote_files())
                indexByName = lib.getsizeof.total_size(backup_fs.fs())
                indexByID = lib.getsizeof.total_size(backup_fs.fsID())
                lg.out(10, "    all local info uses %d bytes in the memory" % localSZ)
                lg.out(10, "    all remote info uses %d bytes in the memory" % remoteSZ)
                lg.out(10, "    index by name takes %d bytes in the memory" % indexByName)
                lg.out(10, "    index by ID takes %d bytes in the memory" % indexByID)
        except:
            lg.exc()
    if _LocalFilesNotifyCallback is not None:
        _LocalFilesNotifyCallback()
コード例 #16
0
 def doRemoveTempFile(self, *args, **kwargs):
     """
     Action method.
     """
     if not args or not len(args) > 1:
         return
     filename = args[1]
     if filename:
         tmpfile.throw_out(filename, 'block restored')
     if settings.getBackupsKeepLocalCopies():
         return
     from storage import backup_matrix
     from storage import backup_rebuilder
     if not backup_rebuilder.ReadStoppedFlag():
         if backup_rebuilder.A().currentBackupID is not None:
             if backup_rebuilder.A().currentBackupID == self.backup_id:
                 if _Debug:
                     lg.out(
                         _DebugLevel,
                         'restore_worker.doRemoveTempFile SKIP because rebuilding in process'
                     )
                 return
     count = 0
     for supplierNum in range(
             contactsdb.num_suppliers(customer_idurl=self.customer_idurl)):
         supplierIDURL = contactsdb.supplier(
             supplierNum, customer_idurl=self.customer_idurl)
         if not supplierIDURL:
             continue
         for dataORparity in [
                 'Data',
                 'Parity',
         ]:
             packetID = packetid.MakePacketID(self.backup_id,
                                              self.block_number,
                                              supplierNum, dataORparity)
             customer, remotePath = packetid.SplitPacketID(packetID)
             filename = os.path.join(settings.getLocalBackupsDir(),
                                     customer, remotePath)
             if os.path.isfile(filename):
                 try:
                     os.remove(filename)
                 except:
                     lg.exc()
                     continue
                 count += 1
     backup_matrix.LocalBlockReport(self.backup_id, self.block_number,
                                    *args, **kwargs)
     if _Debug:
         lg.out(
             _DebugLevel,
             'restore_worker.doRemoveTempFile %d files were removed' %
             count)
コード例 #17
0
def DeletePathBackups(pathID,
                      removeLocalFilesToo=True,
                      saveDB=True,
                      calculate=True):
    """
    This removes all backups of given path ID
    Doing same operations as ``DeleteBackup()``.
    """
    from . import backup_rebuilder
    from customer import io_throttle
    pathID = global_id.CanonicalID(pathID)
    # get the working item
    customer, remotePath = packetid.SplitPacketID(pathID)
    customer_idurl = global_id.GlobalUserToIDURL(customer)
    item = backup_fs.GetByID(remotePath, iterID=backup_fs.fsID(customer_idurl))
    if item is None:
        return False
    lg.out(8, 'backup_control.DeletePathBackups ' + pathID)
    # this is a list of all known backups of this path
    versions = item.list_versions()
    for version in versions:
        backupID = packetid.MakeBackupID(customer, remotePath, version)
        lg.out(8, '        removing %s' % backupID)
        # abort backup if it just started and is running at the moment
        AbortRunningBackup(backupID)
        # if we requested for files for this backup - we do not need it anymore
        io_throttle.DeleteBackupRequests(backupID)
        io_throttle.DeleteBackupSendings(backupID)
        # remove interests in transport_control
        # callback.delete_backup_interest(backupID)
        # remove local files for this backupID
        if removeLocalFilesToo:
            backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(),
                                        backupID)
        # remove remote info for this backup from the memory
        backup_matrix.EraseBackupRemoteInfo(backupID)
        # also remove local info
        backup_matrix.EraseBackupLocalInfo(backupID)
        # finally remove this backup from the index
        item.delete_version(version)
        # lg.out(8, 'backup_control.DeletePathBackups ' + backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # save the index if needed
    if saveDB:
        Save()
        control.request_update()
    return True
コード例 #18
0
 def doReadRaid(self, *args, **kwargs):
     """
     Action method.
     """
     _, outfilename = tmpfile.make(
         'restore',
         extension='.raid',
         prefix=self.backup_id.replace(':', '_').replace('@', '_').replace('/', '_') + '_' + str(self.block_number) + '_',
         close_fd=True,
     )
     inputpath = os.path.join(settings.getLocalBackupsDir(), self.customer_id, self.path_id)
     task_params = (outfilename, self.EccMap.name, self.version, self.block_number, inputpath)
     raid_worker.add_task('read', task_params, lambda cmd, params, result: self._on_block_restored(result, outfilename))
コード例 #19
0
def DeleteBackup(backupID, removeLocalFilesToo=True, saveDB=True, calculate=True):
    """
    This removes a single backup ID completely. Perform several operations:

    1) abort backup if it just started and is running at the moment
    2) if we requested for files for this backup we do not need it anymore - remove 'Data' requests
    3) remove interests in transport_control, see ``lib.transport_control.DeleteBackupInterest()``
    4) remove that ID from the index data base
    5) remove local files for this backup ID
    6) remove all remote info for this backup from the memory, see ``p2p.backup_matrix.EraseBackupRemoteInfo()``
    7) also remove local info from memory, see ``p2p.backup_matrix.EraseBackupLocalInfo()``
    8) stop any rebuilding, we will restart it soon
    9) check and calculate used space
    10) save the modified index data base, soon it will be synchronized with "index_synchronizer()" state machine
    """
    # if the user deletes a backup, make sure we remove any work we're doing on it
    # abort backup if it just started and is running at the moment
    if AbortRunningBackup(backupID):
        lg.out(8, 'backup_control.DeleteBackup %s is in process, stopping' % backupID)
        return True
    from customer import io_throttle
    import backup_rebuilder
    lg.out(8, 'backup_control.DeleteBackup ' + backupID)
    # if we requested for files for this backup - we do not need it anymore
    io_throttle.DeleteBackupRequests(backupID)
    io_throttle.DeleteBackupSendings(backupID)
    # remove interests in transport_control
    callback.delete_backup_interest(backupID)
    # mark it as being deleted in the db, well... just remove it from the index now
    if not backup_fs.DeleteBackupID(backupID):
        return False
    # finally remove local files for this backupID
    if removeLocalFilesToo:
        backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
    # remove all remote info for this backup from the memory
    backup_matrix.EraseBackupRemoteInfo(backupID)
    # also remove local info
    backup_matrix.EraseBackupLocalInfo(backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # in some cases we want to save the DB later
    if saveDB:
        Save()
        control.request_update([('backupID', backupID), ])
    return True
コード例 #20
0
def LocalFileReport(packetID=None, backupID=None, blockNum=None, supplierNum=None, dataORparity=None):
    """
    Writes info for a single piece of data into "local" matrix.

    You can use two forms:
    * pass ``packetID`` parameter only
    * pass all other parameters and do not use ``packetID``

    This is called when new local file created, for example during rebuilding process.
    """
    if packetID is not None:
        backupID, blockNum, supplierNum, dataORparity = packetid.Split(packetID)
        if backupID is None:
            lg.warn("incorrect filename: " + packetID)
            return
    else:
        blockNum = int(blockNum)
        supplierNum = int(supplierNum)
        dataORparity = dataORparity
        packetID = packetid.MakePacketID(backupID, blockNum, supplierNum, dataORparity)
    filename = packetID
    if dataORparity not in ["Data", "Parity"]:
        lg.warn("Data or Parity? " + filename)
        return
    if supplierNum >= contactsdb.num_suppliers():
        # lg.warn('supplier number? %d > %d : %s' % (supplierNum, contactsdb.num_suppliers(), filename))
        return
    localDest = os.path.join(settings.getLocalBackupsDir(), filename)
    if backupID not in local_files():
        local_files()[backupID] = {}
    if blockNum not in local_files()[backupID]:
        local_files()[backupID][blockNum] = {
            "D": [0] * contactsdb.num_suppliers(),
            "P": [0] * contactsdb.num_suppliers(),
        }
    if not os.path.isfile(localDest):
        local_files()[backupID][blockNum][dataORparity[0]][supplierNum] = 0
        return
    local_files()[backupID][blockNum][dataORparity[0]][supplierNum] = 1
    if backupID not in local_max_block_numbers():
        local_max_block_numbers()[backupID] = -1
    if local_max_block_numbers()[backupID] < blockNum:
        local_max_block_numbers()[backupID] = blockNum
    if backupID not in local_backup_size():
        local_backup_size()[backupID] = 0
    try:
        local_backup_size()[backupID] += os.path.getsize(localDest)
    except:
        lg.exc()
    RepaintBackup(backupID)
コード例 #21
0
def LocalBlockReport(backupID, blockNumber, result):
    """
    This updates "local" matrix - a several pieces corresponding to given block of data.
    """
    # if contactsdb.num_suppliers() != num_suppliers:
    #     lg.out(6, 'backup_matrix.LocalBlockReport %s skipped, because number of suppliers were changed' % str(newblock))
    #     return
    if result is None:
        lg.warn("result is None")
        return
    try:
        blockNum = int(blockNumber)
    except:
        lg.exc()
        return
    repaint_flag = False
    for supplierNum in xrange(contactsdb.num_suppliers()):
        for dataORparity in ("Data", "Parity"):
            packetID = packetid.MakePacketID(backupID, blockNum, supplierNum, dataORparity)
            local_file = os.path.join(settings.getLocalBackupsDir(), packetID)
            if backupID not in local_files():
                local_files()[backupID] = {}
                repaint_flag = True
                # lg.out(14, 'backup_matrix.LocalFileReport new local entry for %s created in the memory' % backupID)
            if blockNum not in local_files()[backupID]:
                local_files()[backupID][blockNum] = {
                    "D": [0] * contactsdb.num_suppliers(),
                    "P": [0] * contactsdb.num_suppliers(),
                }
                repaint_flag = True
            if not os.path.isfile(local_file):
                local_files()[backupID][blockNum][dataORparity[0]][supplierNum] = 0
                repaint_flag = True
                continue
            local_files()[backupID][blockNum][dataORparity[0]][supplierNum] = 1
            # lg.out(6, 'backup_matrix.LocalFileReport %s max block num is %d' % (backupID, local_max_block_numbers()[backupID]))
            if backupID not in local_backup_size():
                local_backup_size()[backupID] = 0
                repaint_flag = True
            try:
                local_backup_size()[backupID] += os.path.getsize(local_file)
                repaint_flag = True
            except:
                lg.exc()
    if backupID not in local_max_block_numbers():
        local_max_block_numbers()[backupID] = -1
    if local_max_block_numbers()[backupID] < blockNum:
        local_max_block_numbers()[backupID] = blockNum
    if repaint_flag:
        RepaintBackup(backupID)
コード例 #22
0
ファイル: restore.py プロジェクト: hack-bitdust/devel
 def doReadRaid(self, arg):
     fd, outfilename = tmpfile.make(
         'restore',
         prefix=self.BackupID.replace(':', '_').replace('@', '_').replace(
             '/', '_') + '_' + str(self.BlockNumber) + '_',
     )
     os.close(fd)
     inputpath = os.path.join(settings.getLocalBackupsDir(),
                              self.CustomerGlobalID, self.PathID)
     task_params = (outfilename, eccmap.CurrentName(), self.Version,
                    self.BlockNumber, inputpath)
     raid_worker.add_task(
         'read', task_params,
         lambda cmd, params, result: self._on_block_restored(
             result, outfilename))
コード例 #23
0
ファイル: backup.py プロジェクト: HandsomeJeff/bitdust-io
 def doBlockPushAndRaid(self, arg):
     """
     Action method.
     """
     newblock = arg
     if newblock is None:
         self.abort()
         self.automat('fail')
         lg.out(
             _DebugLevel,
             'backup.doBlockPushAndRaid ERROR newblock is empty, terminating=%s'
             % self.terminating)
         lg.warn('failed to encrypt block, ABORTING')
         return
     if self.terminating:
         self.automat('block-raid-done', (newblock.BlockNumber, None))
         lg.out(_DebugLevel,
                'backup.doBlockPushAndRaid SKIP, terminating=True')
         return
     fileno, filename = tmpfile.make('raid')
     serializedblock = newblock.Serialize()
     blocklen = len(serializedblock)
     os.write(fileno, str(blocklen) + ":" + serializedblock)
     os.close(fileno)
     self.workBlocks[newblock.BlockNumber] = filename
     # key_alias = 'master'
     # if self.keyID:
     #     key_alias = packetid.KeyAlias(self.keyID)
     dt = time.time()
     customer_dir = self.customerGlobalID  # global_id.MakeGlobalID(customer=self.customerGlobalID, key_alias=key_alias)
     outputpath = os.path.join(settings.getLocalBackupsDir(), customer_dir,
                               self.pathID, self.version)
     task_params = (filename, self.eccmap.name, self.version,
                    newblock.BlockNumber, outputpath)
     raid_worker.add_task(
         'make',
         task_params,
         lambda cmd, params, result: self._raidmakeCallback(
             params, result, dt),
     )
     self.automat('block-raid-started', newblock)
     del serializedblock
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup.doBlockPushAndRaid %s : start process data from %s to %s, %d'
             % (newblock.BlockNumber, filename, outputpath,
                id(self.terminating)))
コード例 #24
0
def _delete(params):
    # localPath = params['path'].lstrip('/')
    pathID = params['id']
    if not packetid.Valid(pathID):
        return {'result': {"success": False, "error": "path %s is not valid" % pathID}}
    if not backup_fs.ExistsID(pathID):
        return {'result': {"success": False, "error": "path %s not found" % pathID}}
    backup_control.DeletePathBackups(pathID, saveDB=False, calculate=False)
    backup_fs.DeleteLocalDir(settings.getLocalBackupsDir(), pathID)
    backup_fs.DeleteByID(pathID)
    backup_fs.Scan()
    backup_fs.Calculate()
    backup_control.Save()
    control.request_update([('pathID', pathID), ])
    backup_monitor.A('restart')
    return {'result': {"success": True, "error": None}}
コード例 #25
0
def main():
    sourcePath = sys.argv[1]
    backupID = sys.argv[2]
    lg.set_debug_level(24)
    compress_mode = 'none'  # 'gz'
    raid_worker.A('init')
    backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID)
    if bpio.pathIsDir(sourcePath):
        backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode)
    else:
        backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode)
    backupPipe.make_nonblocking()

    job = backup.backup(backupID, backupPipe, backup_done)
    reactor.callLater(1, job.automat, 'start')
    reactor.run()
コード例 #26
0
 def RunRequest(self):
     packetsToRemove = {}
     for i in range(
             0, min(self.fileRequestMaxLength, len(self.fileRequestQueue))):
         packetID = self.fileRequestQueue[i]
         # must never happen, but just in case
         if packetID not in self.fileRequestDict:
             packetsToRemove[packetID] = 'broken'
             lg.err('file %r not found in downloading queue for %r' %
                    (packetID, self.remoteID))
             continue
         f_down = self.fileRequestDict[packetID]
         if f_down.state == 'IN_QUEUE':
             customer, pathID = packetid.SplitPacketID(packetID)
             if os.path.exists(
                     os.path.join(settings.getLocalBackupsDir(), customer,
                                  pathID)):
                 # we have the data file, no need to request it
                 packetsToRemove[packetID] = 'exist'
             else:
                 f_down.event('start')
     # remember requests results
     result = len(packetsToRemove)
     # remove finished requests
     for packetID, why in packetsToRemove.items():
         if _Debug:
             lg.out(
                 _DebugLevel,
                 "io_throttle.RunRequest %r to be removed from [%s] downloading queue because %r, %d more items"
                 %
                 (packetID, self.remoteID, why, len(self.fileRequestQueue)))
         if packetID in self.fileRequestQueue:
             f_down = self.fileRequestDict[packetID]
             if why == 'exist':
                 f_down.event('file-already-exists')
             else:
                 lg.warn(
                     'unexpected result "%r" for %r in downloading queue for %s'
                     % (why, packetID, self.remoteID))
                 f_down.event('stop')
         else:
             lg.warn('packet %r not found in request queue for [%s]' %
                     (packetID, self.remoteID))
     del packetsToRemove
     if result:
         self.DoRequest()
     return result
コード例 #27
0
def OnJobDone(backupID, result):
    """
    A callback method fired when backup is finished.

    Here we need to save the index data base.
    """
    import backup_rebuilder
    from customer import io_throttle
    lg.out(4, '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
    lg.out(4, 'backup_control.OnJobDone [%s] %s, %d more tasks' % (backupID, result, len(tasks())))
    jobs().pop(backupID)
    pathID, version = packetid.SplitBackupID(backupID)
    if result == 'done':
        maxBackupsNum = settings.getBackupsMaxCopies()
        if maxBackupsNum:
            item = backup_fs.GetByID(pathID)
            if item:
                versions = item.list_versions(sorted=True, reverse=True)
                if len(versions) > maxBackupsNum:
                    for version in versions[maxBackupsNum:]:
                        item.delete_version(version)
                        backupID = pathID + '/' + version
                        backup_rebuilder.RemoveBackupToWork(backupID)
                        io_throttle.DeleteBackupRequests(backupID)
                        io_throttle.DeleteBackupSendings(backupID)
                        callback.delete_backup_interest(backupID)
                        backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
        backup_fs.ScanID(pathID)
        backup_fs.Calculate()
        Save()
        control.request_update([('pathID', pathID), ])
        # TODO: check used space, if we have over use - stop all tasks immediately
        backup_matrix.RepaintBackup(backupID)
    elif result == 'abort':
        DeleteBackup(backupID)
    if len(tasks()) == 0:
        # do we really need to restart backup_monitor after each backup?
        # if we have a lot tasks started this will produce a lot unneeded actions
        # will be smarter to restart it once we finish all tasks
        # because user will probably leave BitDust working after starting a long running operations
        from storage import backup_monitor
        backup_monitor.A('restart')
    RunTasks()
    reactor.callLater(0, FireTaskFinishedCallbacks, pathID, version, result)
コード例 #28
0
def backup_done(bid, result):
    from crypt import signed
    try:
        os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.out'))
    except:
        pass
    for filename in os.listdir(os.path.join(settings.getLocalBackupsDir(),
                                            bid)):
        filepath = os.path.join(settings.getLocalBackupsDir(), bid, filename)
        payld = str(bpio.ReadBinaryFile(filepath))
        outpacket = signed.Packet('Data', my_id.getLocalID(),
                                  my_id.getLocalID(), filename, payld,
                                  'http://megafaq.ru/cvps1010.xml')
        newfilepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out',
                                   filename)
        bpio.WriteBinaryFile(newfilepath, outpacket.Serialize())
    # Assume we delivered all pieces from ".out" to suppliers and lost original data
    # Then we requested the data back and got it into ".inp"
    try:
        os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.inp'))
    except:
        pass
    for filename in os.listdir(
            os.path.join(settings.getLocalBackupsDir(), bid + '.out')):
        filepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out',
                                filename)
        data = bpio.ReadBinaryFile(filepath)
        inppacket = signed.Unserialize(data)
        assert inppacket
        assert inppacket.Valid()
        newfilepath = os.path.join(settings.getLocalBackupsDir(), bid + '.inp',
                                   filename)
        bpio.WriteBinaryFile(newfilepath, inppacket.Payload)
    # Now do restore from input data
    backupID = bid + '.inp'
    outfd, tarfilename = tmpfile.make(
        'restore',
        extension='.tar.gz',
        prefix=backupID.replace('/', '_') + '_',
    )
    r = restore_worker.RestoreWorker(backupID, outfd)
    r.MyDeferred.addBoth(restore_done, tarfilename)
    reactor.callLater(1, r.automat, 'init')
コード例 #29
0
ファイル: backup.py プロジェクト: riyazudheen/devel
def main():
    from system import bpio
    from . import backup_tar
    from . import backup_fs
    lg.set_debug_level(24)
    sourcePath = sys.argv[1]
    compress_mode = 'none'  # 'gz'
    backupID = sys.argv[2]
    raid_worker.A('init')
    backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(),
                                        backupID)
    if bpio.pathIsDir(sourcePath):
        backupPipe = backup_tar.backuptardir(sourcePath,
                                             compress=compress_mode)
    else:
        backupPipe = backup_tar.backuptarfile(sourcePath,
                                              compress=compress_mode)
    backupPipe.make_nonblocking()

    def _bk_done(bid, result):
        from crypt import signed
        customer, remotePath = packetid.SplitPacketID(bid)
        try:
            os.mkdir(
                os.path.join(settings.getLocalBackupsDir(), customer,
                             remotePath + '.out'))
        except:
            pass
        for filename in os.listdir(
                os.path.join(settings.getLocalBackupsDir(), customer,
                             remotePath)):
            filepath = os.path.join(settings.getLocalBackupsDir(), customer,
                                    remotePath, filename)
            payld = bpio.ReadBinaryFile(filepath)
            newpacket = signed.Packet('Data', my_id.getLocalID(),
                                      my_id.getLocalID(), filename, payld,
                                      'http://megafaq.ru/cvps1010.xml')
            newfilepath = os.path.join(settings.getLocalBackupsDir(), customer,
                                       remotePath + '.out', filename)
            bpio.WriteBinaryFile(newfilepath, newpacket.Serialize())
        reactor.stop()  # @UndefinedVariable

    job = backup(backupID, backupPipe, _bk_done)
    reactor.callLater(1, job.automat, 'start')  # @UndefinedVariable
    reactor.run()  # @UndefinedVariable
コード例 #30
0
 def doBlockPushAndRaid(self, *args, **kwargs):
     """
     Action method.
     """
     newblock = args[0]
     if newblock is None:
         self.abort()
         self.automat('fail')
         if _Debug:
             lg.out(
                 _DebugLevel,
                 'backup.doBlockPushAndRaid ERROR newblock is empty, terminating=%s'
                 % self.terminating)
         lg.warn('failed to encrypt block, ABORTING')
         return
     if self.terminating:
         self.automat('block-raid-done', (newblock.BlockNumber, None))
         if _Debug:
             lg.out(_DebugLevel,
                    'backup.doBlockPushAndRaid SKIP, terminating=True')
         return
     fileno, filename = tmpfile.make('raid', extension='.raid')
     serializedblock = newblock.Serialize()
     blocklen = len(serializedblock)
     os.write(fileno, strng.to_bin(blocklen) + b":" + serializedblock)
     os.close(fileno)
     self.workBlocks[newblock.BlockNumber] = filename
     dt = time.time()
     outputpath = os.path.join(settings.getLocalBackupsDir(),
                               self.customerGlobalID, self.pathID,
                               self.version)
     task_params = (filename, self.eccmap.name, self.version,
                    newblock.BlockNumber, outputpath)
     raid_worker.add_task(
         'make', task_params,
         lambda cmd, params, result: self._raidmakeCallback(
             params, result, dt))
     self.automat('block-raid-started', newblock)
     del serializedblock
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup.doBlockPushAndRaid %s : start process data from %s to %s, %d'
             % (newblock.BlockNumber, filename, outputpath,
                id(self.terminating)))
コード例 #31
0
def DeletePathBackups(pathID, removeLocalFilesToo=True, saveDB=True, calculate=True):
    """
    This removes all backups of given path ID.

    Doing same operations as ``DeleteBackup()``.
    """
    import backup_rebuilder
    from customer import io_throttle
    # get the working item
    item = backup_fs.GetByID(pathID)
    if item is None:
        return False
    # this is a list of all known backups of this path
    versions = item.list_versions()
    for version in versions:
        backupID = pathID + '/' + version
        # abort backup if it just started and is running at the moment
        AbortRunningBackup(backupID)
        # if we requested for files for this backup - we do not need it anymore
        io_throttle.DeleteBackupRequests(backupID)
        io_throttle.DeleteBackupSendings(backupID)
        # remove interests in transport_control
        callback.delete_backup_interest(backupID)
        # remove local files for this backupID
        if removeLocalFilesToo:
            backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
        # remove remote info for this backup from the memory
        backup_matrix.EraseBackupLocalInfo(backupID)
        # also remove local info
        backup_matrix.EraseBackupLocalInfo(backupID)
        # finally remove this backup from the index
        item.delete_version(version)
        # lg.out(8, 'backup_control.DeletePathBackups ' + backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # save the index if needed
    if saveDB:
        Save()
        control.request_update()
    return True
コード例 #32
0
 def _file_received(self, newpacket, state):
     if state in ['in queue', 'shutdown', 'exist', 'failed']:
         return
     if state != 'received':
         lg.warn("incorrect state [%s] for packet %s" %
                 (str(state), str(newpacket)))
         return
     if not newpacket.Valid():
         # TODO: if we didn't get a valid packet ... re-request it or delete
         # it?
         lg.warn("%s is not a valid packet: %r" %
                 (newpacket.PacketID, newpacket))
         return
     # packetID = newpacket.PacketID
     packetID = global_id.CanonicalID(newpacket.PacketID)
     customer, remotePath = packetid.SplitPacketID(packetID)
     filename = os.path.join(settings.getLocalBackupsDir(), customer,
                             remotePath)
     if os.path.isfile(filename):
         lg.warn("found existed file" + filename)
         self.automat('inbox-data-packet', packetID)
         return
         # try:
         #     os.remove(filename)
         # except:
         #     lg.exc()
     dirname = os.path.dirname(filename)
     if not os.path.exists(dirname):
         try:
             bpio._dirs_make(dirname)
         except:
             lg.out(
                 2,
                 "backup_rebuilder._file_received ERROR can not create sub dir "
                 + dirname)
             return
     if not bpio.WriteFile(filename, newpacket.Payload):
         lg.out(2,
                "backup_rebuilder._file_received ERROR writing " + filename)
         return
     from storage import backup_matrix
     backup_matrix.LocalFileReport(packetID)
     lg.out(10, "backup_rebuilder._file_received and wrote to " + filename)
     self.automat('inbox-data-packet', packetID)
コード例 #33
0
def main():
    sourcePath = sys.argv[1]
    backupID = sys.argv[2]
    lg.set_debug_level(24)
    compress_mode = 'none'  # 'gz'
    raid_worker.A('init')
    backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(),
                                        backupID)
    if bpio.pathIsDir(sourcePath):
        backupPipe = backup_tar.backuptardir(sourcePath,
                                             compress=compress_mode)
    else:
        backupPipe = backup_tar.backuptarfile(sourcePath,
                                              compress=compress_mode)
    backupPipe.make_nonblocking()

    job = backup.backup(backupID, backupPipe, backup_done)
    reactor.callLater(1, job.automat, 'start')
    reactor.run()
コード例 #34
0
def backup_done(bid, result):
    from crypt import signed
    try:
        os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.out'))
    except:
        pass
    for filename in os.listdir(os.path.join(settings.getLocalBackupsDir(), bid)):
        filepath = os.path.join(settings.getLocalBackupsDir(), bid, filename)
        payld = str(bpio.ReadBinaryFile(filepath))
        outpacket = signed.Packet(
            'Data',
            my_id.getLocalID(),
            my_id.getLocalID(),
            filename,
            payld,
            'http://megafaq.ru/cvps1010.xml')
        newfilepath = os.path.join(settings.getLocalBackupsDir(),
                                   bid + '.out', filename)
        bpio.AtomicWriteFile(newfilepath, outpacket.Serialize())
    # Assume we delivered all pieces from ".out" to suppliers and lost original data
    # Then we requested the data back and got it into ".inp"
    try:
        os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.inp'))
    except:
        pass
    for filename in os.listdir(os.path.join(settings.getLocalBackupsDir(), bid + '.out')):
        filepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out', filename)
        data = bpio.ReadBinaryFile(filepath)
        inppacket = signed.Unserialize(data)
        assert inppacket.Valid()
        newfilepath = os.path.join(settings.getLocalBackupsDir(),
                                   bid + '.inp', filename)
        bpio.AtomicWriteFile(newfilepath, inppacket.Payload)
    # Now do restore from input data
    backupID = bid + '.inp'
    outfd, tarfilename = tmpfile.make('restore', '.tar.gz', backupID.replace('/', '_') + '_')
    r = restore.restore(backupID, outfd)
    r.MyDeferred.addBoth(restore_done, tarfilename)
    reactor.callLater(1, r.automat, 'init')
コード例 #35
0
ファイル: restore.py プロジェクト: hack-bitdust/devel
 def doRemoveTempFile(self, arg):
     try:
         filename = arg[1]
     except:
         return
     tmpfile.throw_out(filename, 'block restored')
     if settings.getBackupsKeepLocalCopies():
         return
     import backup_rebuilder
     import backup_matrix
     if not backup_rebuilder.ReadStoppedFlag():
         if backup_rebuilder.A().currentBackupID is not None:
             if backup_rebuilder.A().currentBackupID == self.BackupID:
                 lg.out(
                     6,
                     'restore.doRemoveTempFile SKIP because rebuilding in process'
                 )
                 return
     count = 0
     for supplierNum in xrange(
             contactsdb.num_suppliers(customer_idurl=self.CustomerIDURL)):
         supplierIDURL = contactsdb.supplier(
             supplierNum, customer_idurl=self.CustomerIDURL)
         if not supplierIDURL:
             continue
         for dataORparity in ['Data', 'Parity']:
             packetID = packetid.MakePacketID(self.BackupID,
                                              self.BlockNumber, supplierNum,
                                              dataORparity)
             customer, remotePath = packetid.SplitPacketID(packetID)
             filename = os.path.join(settings.getLocalBackupsDir(),
                                     customer, remotePath)
             if os.path.isfile(filename):
                 try:
                     os.remove(filename)
                 except:
                     lg.exc()
                     continue
                 count += 1
     backup_matrix.LocalBlockReport(self.BackupID, self.BlockNumber, arg)
     lg.out(6, 'restore.doRemoveTempFile %d files were removed' % count)
コード例 #36
0
 def _do_start_archive_backup(self):
     local_path = self.local_data_callback(self.queue_id,
                                           self.latest_sequence_id)
     supplier_path_id = os.path.join(self.archive_folder_path,
                                     strng.to_text(self.latest_sequence_id))
     dataID = misc.NewBackupID()
     backup_id = packetid.MakeBackupID(
         customer=self.queue_owner_id,
         path_id=supplier_path_id,
         key_alias=self.queue_alias,
         version=dataID,
     )
     backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backup_id)
     if bpio.Android():
         compress_mode = 'none'
     else:
         compress_mode = 'bz2'
     arcname = os.path.basename(local_path)
     backupPipe = backup_tar.backuptarfile_thread(local_path,
                                                  arcname=arcname,
                                                  compress=compress_mode)
     self.backup_job = backup.backup(
         backupID=backup_id,
         pipe=backupPipe,
         blockResultCallback=self._on_archive_backup_block_result,
         finishCallback=self._on_archive_backup_done,
         blockSize=1024 * 1024 * 10,
         sourcePath=local_path,
         keyID=self.group_key_id,
         ecc_map=eccmap.eccmap(self.ecc_map),
         creatorIDURL=self.queue_owner_idurl,
     )
     self.backup_job.automat('start')
     if _Debug:
         lg.args(_DebugLevel,
                 job=self.backup_job,
                 backup_id=backup_id,
                 local_path=local_path,
                 group_key_id=self.group_key_id)
コード例 #37
0
ファイル: backup.py プロジェクト: vesellov/bitdust.devel
def main():
    from system import bpio
    import backup_tar
    import backup_fs
    lg.set_debug_level(24)
    sourcePath = sys.argv[1]
    compress_mode = 'none'  # 'gz'
    backupID = sys.argv[2]
    raid_worker.A('init')
    backupPath = backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID)
    if bpio.pathIsDir(sourcePath):
        backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode)
    else:
        backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode)
    backupPipe.make_nonblocking()

    def _bk_done(bid, result):
        from crypt import signed
        try:
            os.mkdir(os.path.join(settings.getLocalBackupsDir(), bid + '.out'))
        except:
            pass
        for filename in os.listdir(os.path.join(settings.getLocalBackupsDir(), bid)):
            filepath = os.path.join(settings.getLocalBackupsDir(), bid, filename)
            payld = str(bpio.ReadBinaryFile(filepath))
            newpacket = signed.Packet(
                'Data',
                my_id.getLocalID(),
                my_id.getLocalID(),
                filename,
                payld,
                'http://megafaq.ru/cvps1010.xml')
            newfilepath = os.path.join(settings.getLocalBackupsDir(), bid + '.out', filename)
            bpio.AtomicWriteFile(newfilepath, newpacket.Serialize())
        reactor.stop()
    job = backup(backupID, backupPipe, _bk_done)
    reactor.callLater(1, job.automat, 'start')
    reactor.run()
コード例 #38
0
ファイル: views.py プロジェクト: hack-bitdust/devel
 def __init__(self):
     self.installer_state_to_page = {
         'AT_STARTUP': self.renderSelectPage,
         'WHAT_TO_DO?': self.renderSelectPage,
         'INPUT_NAME': self.renderInputNamePage,
         'REGISTER': self.renderRegisterNewUserPage,
         'AUTHORIZED': self.renderRegisterNewUserPage,
         'LOAD_KEY': self.renderLoadKeyPage,
         'RECOVER': self.renderRestorePage,
         'RESTORED': self.renderRestorePage,
         'WIZARD': self.renderWizardPage,
         'DONE': self.renderLastPage,
     }
     self.install_wizard_state_to_page = {
         'READY': self.renderWizardStartPage,
         'STORAGE': self.renderWizardStoragePage,
         'CONTACTS': self.renderWizardContactsPage,
         'LAST_PAGE': self.renderLastPage,
         'DONE': self.renderLastPage,
     }
     self.data = {
         'username': bpio.ReadTextFile(settings.UserNameFilename()).strip(),
         'pksize': settings.DefaultPrivateKeySize(),
         'needed': str(int(settings.DefaultNeededBytes() / (1024 * 1024))),
         'donated': str(int(settings.DefaultDonatedBytes() / (1024 * 1024))),
         'suppliers': str(settings.DefaultDesiredSuppliers()),
         'customersdir': unicode(settings.getCustomersFilesDir()),
         'localbackupsdir': unicode(settings.getLocalBackupsDir()),
         'restoredir': unicode(settings.getRestoreDir()),
         'idurl': '',
         'keysrc': '',
         'name': '',
         'surname': '',
         'nickname': '',
     }
     installer.A('init')
コード例 #39
0
 def doRemoveUnusedFiles(self, arg):
     # we want to remove files for this block
     # because we only need them during rebuilding
     if settings.getBackupsKeepLocalCopies() is True:
         # if user set this in settings - he want to keep the local files
         return
     # ... user do not want to keep local backups
     if settings.getGeneralWaitSuppliers() is True:
         from customer import fire_hire
         # but he want to be sure - all suppliers are green for a long time
         if len(contact_status.listOfflineSuppliers(
         )) > 0 or time.time() - fire_hire.GetLastFireTime() < 24 * 60 * 60:
             # some people are not there or we do not have stable team yet
             # do not remove the files because we need it to rebuild
             return
     count = 0
     from storage import backup_matrix
     from storage import restore_monitor
     from storage import backup_rebuilder
     if _Debug:
         lg.out(_DebugLevel, 'data_sender.doRemoveUnusedFiles')
     for backupID in misc.sorted_backup_ids(
             backup_matrix.local_files().keys()):
         if restore_monitor.IsWorking(backupID):
             if _Debug:
                 lg.out(_DebugLevel,
                        '        %s : SKIP, because restoring' % backupID)
             continue
         if backup_rebuilder.IsBackupNeedsWork(backupID):
             if _Debug:
                 lg.out(
                     _DebugLevel,
                     '        %s : SKIP, because needs rebuilding' %
                     backupID)
             continue
         if not backup_rebuilder.ReadStoppedFlag():
             if backup_rebuilder.A().currentBackupID is not None:
                 if backup_rebuilder.A().currentBackupID == backupID:
                     if _Debug:
                         lg.out(
                             _DebugLevel,
                             '        %s : SKIP, because rebuilding is in process'
                             % backupID)
                     continue
         packets = backup_matrix.ScanBlocksToRemove(
             backupID, settings.getGeneralWaitSuppliers())
         for packetID in packets:
             customer, pathID = packetid.SplitPacketID(packetID)
             filename = os.path.join(settings.getLocalBackupsDir(),
                                     customer, pathID)
             if os.path.isfile(filename):
                 try:
                     os.remove(filename)
                     # lg.out(6, '    ' + os.path.basename(filename))
                 except:
                     lg.exc()
                     continue
                 count += 1
     if _Debug:
         lg.out(_DebugLevel, '    %d files were removed' % count)
     backup_matrix.ReadLocalFiles()
コード例 #40
0
    def run(self):
        """
        Runs a new ``Job`` from that ``Task``.

        Called from ``RunTasks()`` method if it is possible to start a
        new task - the maximum number of simultaneously running ``Jobs``
        is limited.
        """
        import backup_tar
        import backup
        iter_and_path = backup_fs.WalkByID(self.pathID)
        if iter_and_path is None:
            lg.out(4, 'backup_control.Task.run ERROR %s not found in the index' % self.pathID)
            # self.defer.callback('error', self.pathID)
            return
        itemInfo, sourcePath = iter_and_path
        if isinstance(itemInfo, dict):
            try:
                itemInfo = itemInfo[backup_fs.INFO_KEY]
            except:
                lg.exc()
                return
        if self.localPath and self.localPath != sourcePath:
            lg.warn('local path were changed: %s -> %s' % (self.localPath, sourcePath))
        self.localPath = sourcePath
        if not bpio.pathExist(sourcePath):
            lg.warn('path not exist: %s' % sourcePath)
            reactor.callLater(0, OnTaskFailed, self.pathID, 'not exist')
            return
        dataID = misc.NewBackupID()
        if itemInfo.has_version(dataID):
            # ups - we already have same version
            # let's add 1,2,3... to the end to make absolutely unique version ID
            i = 1
            while itemInfo.has_version(dataID + str(i)):
                i += 1
            dataID += str(i)
        backupID = self.pathID + '/' + dataID
        try:
            backup_fs.MakeLocalDir(settings.getLocalBackupsDir(), backupID)
        except:
            lg.exc()
            lg.out(4, 'backup_control.Task.run ERROR creating destination folder for %s' % self.pathID)
            # self.defer.callback('error', self.pathID)
            return
        compress_mode = 'bz2'  # 'none' # 'gz'
        if bpio.pathIsDir(sourcePath):
            backupPipe = backup_tar.backuptar(sourcePath, compress=compress_mode)
        else:
            backupPipe = backup_tar.backuptarfile(sourcePath, compress=compress_mode)
        backupPipe.make_nonblocking()
        job = backup.backup(
            backupID, backupPipe,
            OnJobDone, OnBackupBlockReport,
            settings.getBackupBlockSize(),
            sourcePath)
        jobs()[backupID] = job
        itemInfo.add_version(dataID)
        if itemInfo.type in [backup_fs.PARENT, backup_fs.DIR]:
            dirsize.ask(sourcePath, OnFoundFolderSize, (self.pathID, dataID))
        else:
            jobs()[backupID].totalSize = os.path.getsize(sourcePath)
        jobs()[backupID].automat('start')
        reactor.callLater(0, FireTaskStartedCallbacks, self.pathID, dataID)
        lg.out(4, 'backup_control.Task-%d.run [%s/%s], size=%d, %s' % (
            self.number, self.pathID, dataID, itemInfo.size, sourcePath))
コード例 #41
0
    def RunRequest(self):
        #out(6, 'io_throttle.RunRequest')
        packetsToRemove = {}
        for i in range(0, min(self.fileRequestMaxLength, len(self.fileRequestQueue))):
            packetID = self.fileRequestQueue[i]
            # we got notify that this packet was failed to send
            if packetID in self.requestFailedPacketIDs:
                self.requestFailedPacketIDs.remove(packetID)
                packetsToRemove[packetID] = 'failed'
                continue
            # request timeouts are disabled for now
#             currentTime = time.time()
#             if self.fileRequestDict[packetID].requestTime is not None:
#                 # the packet was requested
#                 if self.fileRequestDict[packetID].fileReceivedTime is None:
#                     # but no answer yet ...
#                     if currentTime - self.fileRequestDict[packetID].requestTime > self.fileRequestDict[packetID].requestTimeout:
#                         # and time is out!!!
#                         self.fileRequestDict[packetID].report = 'timeout'
#                         packetsToRemove[packetID] = 'timeout'
#                 else:
#                     # the packet were received (why it is not removed from the queue yet ???)
#                     self.fileRequestDict[packetID].result = 'received'
#                     packetsToRemove[packetID] = 'received'
            # the packet was not requested yet
            if self.fileRequestDict[packetID].requestTime is None:
                customer, pathID = packetid.SplitPacketID(packetID)
                if not os.path.exists(os.path.join(settings.getLocalBackupsDir(), customer, pathID)):
                    fileRequest = self.fileRequestDict[packetID]
                    if _Debug:
                        lg.out(_DebugLevel, "io_throttle.RunRequest for packetID " + fileRequest.packetID)
                    # transport_control.RegisterInterest(self.DataReceived,fileRequest.creatorID,fileRequest.packetID)
                    # callback.register_interest(self.DataReceived, fileRequest.creatorID, fileRequest.packetID)
                    p2p_service.SendRetreive(
                        fileRequest.ownerID,
                        fileRequest.creatorID,
                        fileRequest.packetID,
                        fileRequest.remoteID,
                        callbacks={
                            commands.Data(): self.OnDataReceived,
                            commands.Fail(): self.OnDataReceived,
                            # None: lambda pkt_out: self.OnDataReceived(fileRequest.packetID, 'timeout'),  # timeout
                        },
                        # response_timeout=10,
                    )
#                     newpacket = signed.Packet(
#                         commands.Retrieve(),
#                         fileRequest.ownerID,
#                         fileRequest.creatorID,
#                         packetid.RemotePath(fileRequest.packetID),
#                         "",
#                         fileRequest.remoteID)
#                     gateway.outbox(newpacket, callbacks={
#                         commands.Data(): self.DataReceived,
#                         commands.Fail(): self.DataReceived})
                    fileRequest.requestTime = time.time()
                else:
                    # we have the data file, no need to request it
                    self.fileRequestDict[packetID].result = 'exist'
                    packetsToRemove[packetID] = 'exist'
        # if request queue is empty - remove all records about packets failed to request
        if len(self.fileRequestQueue) == 0:
            del self.requestFailedPacketIDs[:]
        # remember requests results
        result = len(packetsToRemove)
        # remove finished requests
        for packetID, why in packetsToRemove.items():
            # self.fileRequestQueue.remove(packetID)
            if _Debug:
                lg.out(_DebugLevel, "io_throttle.RunRequest removed %s from %s receiving queue, %d more items" % (
                    packetID, self.remoteName, len(self.fileRequestQueue)))
            self.OnDataRequestFailed(packetID, why)
        del packetsToRemove
        return result
コード例 #42
0
 def _request_files(self):
     from storage import backup_matrix
     from customer import io_throttle
     from customer import data_sender
     self.missingPackets = 0
     # here we want to request some packets before we start working to
     # rebuild the missed blocks
     availableSuppliers = backup_matrix.GetActiveArray(customer_idurl=self.currentCustomerIDURL)
     # remember how many requests we did on this iteration
     total_requests_count = 0
     # at the moment I do download everything I have available and needed
     if '' in contactsdb.suppliers(customer_idurl=self.currentCustomerIDURL):
         lg.out(8, 'backup_rebuilder._request_files SKIP - empty supplier')
         self.automat('no-requests')
         return
     for supplierNum in range(contactsdb.num_suppliers(customer_idurl=self.currentCustomerIDURL)):
         supplierID = contactsdb.supplier(supplierNum, customer_idurl=self.currentCustomerIDURL)
         if not supplierID:
             continue
         requests_count = 0
         # we do requests in reverse order because we start rebuilding from
         # the last block
         for blockIndex in range(len(self.workingBlocksQueue) - 1, -1, -1):
             blockNum = self.workingBlocksQueue[blockIndex]
             # do not keep too many requests in the queue
             if io_throttle.GetRequestQueueLength(supplierID) >= 16:
                 break
             # also don't do too many requests at once
             if requests_count > 16:
                 break
             remoteData = backup_matrix.GetRemoteDataArray(
                 self.currentBackupID, blockNum)
             remoteParity = backup_matrix.GetRemoteParityArray(
                 self.currentBackupID, blockNum)
             localData = backup_matrix.GetLocalDataArray(
                 self.currentBackupID, blockNum)
             localParity = backup_matrix.GetLocalParityArray(
                 self.currentBackupID, blockNum)
             if supplierNum >= len(remoteData) or supplierNum >= len(remoteParity):
                 break
             if supplierNum >= len(localData) or supplierNum >= len(localParity):
                 break
             # if remote Data exist and is available because supplier is on-line,
             # but we do not have it on hand - do request
             if localData[supplierNum] == 0:
                 PacketID = packetid.MakePacketID(
                     self.currentBackupID, blockNum, supplierNum, 'Data')
                 if remoteData[supplierNum] == 1:
                     if availableSuppliers[supplierNum]:
                         # if supplier is not alive - we can't request from him
                         if not io_throttle.HasPacketInRequestQueue(supplierID, PacketID):
                             customer, remotePath = packetid.SplitPacketID(PacketID)
                             filename = os.path.join(
                                 settings.getLocalBackupsDir(),
                                 customer,
                                 remotePath,
                             )
                             if not os.path.exists(filename):
                                 if io_throttle.QueueRequestFile(
                                         self._file_received,
                                         my_id.getLocalID(),
                                         PacketID,
                                         my_id.getLocalID(),
                                         supplierID):
                                     requests_count += 1
                 else:
                     # count this packet as missing
                     self.missingPackets += 1
                     # also mark this guy as one who dont have any data - nor local nor remote
             else:
                 # but if local Data already exists, but was not sent - do it now
                 if remoteData[supplierNum] != 1:
                     data_sender.A('new-data')
             # same for Parity
             if localParity[supplierNum] == 0:
                 PacketID = packetid.MakePacketID(
                     self.currentBackupID, blockNum, supplierNum, 'Parity')
                 if remoteParity[supplierNum] == 1:
                     if availableSuppliers[supplierNum]:
                         if not io_throttle.HasPacketInRequestQueue(
                                 supplierID, PacketID):
                             customer, remotePath = packetid.SplitPacketID(PacketID)
                             filename = os.path.join(
                                 settings.getLocalBackupsDir(),
                                 customer,
                                 remotePath,
                             )
                             if not os.path.exists(filename):
                                 if io_throttle.QueueRequestFile(
                                     self._file_received,
                                     my_id.getLocalID(),
                                     PacketID,
                                     my_id.getLocalID(),
                                     supplierID,
                                 ):
                                     requests_count += 1
                 else:
                     self.missingPackets += 1
             else:
                 # but if local Parity already exists, but was not sent - do it now
                 if remoteParity[supplierNum] != 1:
                     data_sender.A('new-data')
         total_requests_count += requests_count
     if total_requests_count > 0:
         lg.out(8, 'backup_rebuilder._request_files : %d chunks requested' % total_requests_count)
         self.automat('requests-sent', total_requests_count)
     else:
         if self.missingPackets:
             lg.out(8, 'backup_rebuilder._request_files : found %d missing packets' % self.missingPackets)
             self.automat('found-missing')
         else:
             lg.out(8, 'backup_rebuilder._request_files : nothing was requested')
             self.automat('no-requests')
コード例 #43
0
ファイル: archive_writer.py プロジェクト: vesellov/devel
 def _do_send_packets(self, backup_id, block_num):
     customer_id, path_id, version_name = packetid.SplitBackupID(backup_id)
     archive_snapshot_dir = os.path.join(settings.getLocalBackupsDir(),
                                         customer_id, path_id, version_name)
     if _Debug:
         lg.args(_DebugLevel,
                 backup_id=backup_id,
                 block_num=block_num,
                 archive_snapshot_dir=archive_snapshot_dir)
     if not os.path.isdir(archive_snapshot_dir):
         self.block_failed = True
         lg.err('archive snapshot folder was not found in %r' %
                archive_snapshot_dir)
         return None
     failed_supliers = 0
     for supplier_num in range(len(self.suppliers_list)):
         supplier_idurl = self.suppliers_list[supplier_num]
         if not supplier_idurl:
             failed_supliers += 1
             lg.warn('unknown supplier supplier_num=%d' % supplier_num)
             continue
         for dataORparity in (
                 'Data',
                 'Parity',
         ):
             packet_id = packetid.MakePacketID(backup_id, block_num,
                                               supplier_num, dataORparity)
             packet_filename = os.path.join(
                 archive_snapshot_dir, '%d-%d-%s' % (
                     block_num,
                     supplier_num,
                     dataORparity,
                 ))
             if not os.path.isfile(packet_filename):
                 lg.err('%s is not a file' % packet_filename)
                 continue
             packet_payload = bpio.ReadBinaryFile(packet_filename)
             if not packet_payload:
                 lg.err('file %r reading error' % packet_filename)
                 continue
             if block_num not in self.packets_out:
                 self.packets_out[block_num] = {}
             self.packets_out[block_num][packet_id] = None
             p2p_service.SendData(
                 raw_data=packet_payload,
                 ownerID=self.queue_owner_idurl,
                 creatorID=my_id.getIDURL(),
                 remoteID=supplier_idurl,
                 packetID=packet_id,
                 callbacks={
                     commands.Ack():
                     lambda newpacket, _: self.automat('ack',
                                                       newpacket=newpacket),
                     commands.Fail():
                     lambda newpacket, _: self.automat('fail',
                                                       newpacket=newpacket),
                 },
             )
     if failed_supliers > self.correctable_errors:
         self.block_failed = True
         lg.err('too many failed suppliers %d in block %d' % (
             failed_supliers,
             block_num,
         ))
コード例 #44
0
    def doScanAndQueue(self, arg):
        global _ShutdownFlag
        if _Debug:
            lg.out(
                _DebugLevel,
                'data_sender.doScanAndQueue _ShutdownFlag=%r' % _ShutdownFlag)
        if _Debug:
            log = open(os.path.join(settings.LogsDir(), 'data_sender.log'),
                       'w')
            log.write('doScanAndQueue %s\n' % time.asctime())
        if _ShutdownFlag:
            if _Debug:
                log.write('doScanAndQueue _ShutdownFlag is True\n')
            self.automat('scan-done')
            if _Debug:
                log.flush()
                log.close()
            return
        for customer_idurl in contactsdb.known_customers():
            if '' not in contactsdb.suppliers(customer_idurl):
                from storage import backup_matrix
                for backupID in misc.sorted_backup_ids(
                        backup_matrix.local_files().keys(), True):
                    packetsBySupplier = backup_matrix.ScanBlocksToSend(
                        backupID)
                    if _Debug:
                        log.write('%s\n' % packetsBySupplier)
                    for supplierNum in packetsBySupplier.keys():
                        supplier_idurl = contactsdb.supplier(
                            supplierNum, customer_idurl=customer_idurl)
                        if not supplier_idurl:
                            lg.warn('?supplierNum? %s for %s' %
                                    (supplierNum, backupID))
                            continue
                        for packetID in packetsBySupplier[supplierNum]:
                            backupID_, _, supplierNum_, _ = packetid.BidBnSnDp(
                                packetID)
                            if backupID_ != backupID:
                                lg.warn('?backupID? %s for %s' %
                                        (packetID, backupID))
                                continue
                            if supplierNum_ != supplierNum:
                                lg.warn('?supplierNum? %s for %s' %
                                        (packetID, backupID))
                                continue
                            if io_throttle.HasPacketInSendQueue(
                                    supplier_idurl, packetID):
                                if _Debug:
                                    log.write(
                                        '%s already in sending queue for %s\n'
                                        % (packetID, supplier_idurl))
                                continue
                            if not io_throttle.OkToSend(supplier_idurl):
                                if _Debug:
                                    log.write('ok to send %s ? - NO!\n' %
                                              supplier_idurl)
                                continue
                            customerGlobalID, pathID = packetid.SplitPacketID(
                                packetID)
                            # tranByID = gate.transfers_out_by_idurl().get(supplier_idurl, [])
                            # if len(tranByID) > 3:
                            #     log.write('transfers by %s: %d\n' % (supplier_idurl, len(tranByID)))
                            #     continue
                            customerGlobalID, pathID = packetid.SplitPacketID(
                                packetID)
                            filename = os.path.join(
                                settings.getLocalBackupsDir(),
                                customerGlobalID,
                                pathID,
                            )
                            if not os.path.isfile(filename):
                                if _Debug:
                                    log.write('%s is not a file\n' % filename)
                                continue
                            if io_throttle.QueueSendFile(
                                    filename,
                                    packetID,
                                    supplier_idurl,
                                    my_id.getLocalID(),
                                    self._packetAcked,
                                    self._packetFailed,
                            ):
                                if _Debug:
                                    log.write(
                                        'io_throttle.QueueSendFile %s\n' %
                                        packetID)
                            else:
                                if _Debug:
                                    log.write(
                                        'io_throttle.QueueSendFile FAILED %s\n'
                                        % packetID)
                            # lg.out(6, '  %s for %s' % (packetID, backupID))
                            # DEBUG
                            # break

        self.automat('scan-done')
        if _Debug:
            log.flush()
            log.close()