def _list_active_tasks(params):
    result = []
    for tsk in backup_control.ListPendingTasks():
        result.append({
            'name': os.path.basename(tsk.localPath),
            'path': os.path.dirname(tsk.localPath),
            'id': tsk.pathID,
            'version': '',
            'customer': '',
            'mode': 'up',
            'progress': '0%',
        })
    for backupID in backup_control.ListRunningBackups():
        backup_obj = backup_control.GetRunningBackupObject(backupID)
        customerGlobalID, remotePath, versionName = packetid.SplitBackupID(
            backupID)
        result.append({
            'name': os.path.basename(backup_obj.sourcePath),
            'path': os.path.dirname(backup_obj.sourcePath),
            'id': remotePath,
            'version': versionName,
            'customer': customerGlobalID,
            'mode': 'up',
            'progress': misc.percent2string(backup_obj.progress()),
        })
    # for backupID in restore_monitor.GetWorkingIDs():
    #     result.append(backupID)
    return {
        'result': result,
    }
Beispiel #2
0
 def _on_restore_done(self, result, backupID, outfd, tarfilename):
     try:
         os.close(outfd)
     except:
         lg.exc()
     if result == 'done':
         lg.info('archive %r restore success from %r' % (
             backupID,
             tarfilename,
         ))
     else:
         lg.err('archive %r restore failed from %r with : %r' % (
             backupID,
             tarfilename,
             result,
         ))
     if result == 'done':
         _, pathID, versionName = packetid.SplitBackupID(backupID)
         service_dir = settings.ServiceDir('service_private_groups')
         queues_dir = os.path.join(service_dir, 'queues')
         queue_dir = os.path.join(queues_dir, self.group_key_id)
         snapshot_dir = os.path.join(queue_dir, pathID, versionName)
         if not os.path.isdir(snapshot_dir):
             bpio._dirs_make(snapshot_dir)
         d = backup_tar.extracttar_thread(tarfilename, snapshot_dir)
         d.addCallback(self._on_extract_done, backupID, tarfilename,
                       snapshot_dir)
         d.addErrback(self._on_extract_failed, backupID, tarfilename,
                      snapshot_dir)
         return d
     tmpfile.throw_out(tarfilename, 'restore ' + result)
     return None
def _delete_version(params):
    lg.out(6, '_delete_version %s' % str(params))
    backupID = params['backupid']
    if not packetid.Valid(backupID):
        return {
            'result': {
                "success": False,
                "error": "backupID %s is not valid" % backupID
            }
        }
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    if not customerGlobalID:
        customerGlobalID = my_id.getGlobalID()
    if not backup_fs.ExistsID(
            remotePath,
            iterID=backup_fs.fsID(
                global_id.GlobalUserToIDURL(customerGlobalID))):
        return {
            'result': {
                "success": False,
                "error": "path %s not found" % remotePath
            }
        }
    if version:
        backup_control.DeleteBackup(backupID, saveDB=False, calculate=False)
    backup_fs.Scan()
    backup_fs.Calculate()
    backup_control.Save()
    backup_monitor.A('restart')
    control.request_update([
        ('backupID', backupID),
    ])
    return {'result': {"success": True, "error": None}}
Beispiel #4
0
def _download(params):
    # localName = params['name']
    backupID = global_id.CanonicalID(params['backupid'])
    destpath = params['dest_path']
    if bpio.Linux() or bpio.Mac():
        destpath = '/' + destpath.lstrip('/')
    restorePath = bpio.portablePath(destpath)
    # overwrite = params['overwrite']
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    pathID = packetid.MakeBackupID(customerGlobalID, remotePath)
    if not customerGlobalID:
        customerGlobalID = my_id.getGlobalID()
    if not packetid.IsCanonicalVersion(version):
        return {'result': {"success": False, "error": "path %s is not valid" % backupID}}
    if not remotePath:
        return {'result': {"success": False, "error": "path %s is not valid" % backupID}}
    if not packetid.Valid(remotePath):
        return {'result': {"success": False, "error": "path %s is not valid" % backupID}}
    if backup_control.IsBackupInProcess(backupID):
        return {'result': {"success": True, "error": None}}
    if backup_control.HasTask(pathID):
        return {'result': {"success": True, "error": None}}
    localPath = backup_fs.ToPath(remotePath)
    if localPath == restorePath:
        restorePath = os.path.dirname(restorePath)

    def _itemRestored(backupID, result):
        customerGlobalID, remotePath, _ = packetid.SplitBackupID(backupID)
        backup_fs.ScanID(remotePath, customer_idurl=global_id.GlobalUserToIDURL(customerGlobalID))
        backup_fs.Calculate()

    restore_monitor.Start(backupID, restorePath, callback=_itemRestored)
    return {'result': {"success": True, "error": None}}
Beispiel #5
0
def backup_id_compare(backupID1, backupID2):
    """
    Compare two 'complex' backupID's: at first compare paths, than version.
    """
    if isinstance(backupID1, tuple):
        backupID1 = backupID1[0]
        backupID2 = backupID2[0]
    customerGlobalID1, remotePath1, version1 = packetid.SplitBackupID(backupID1)
    customerGlobalID2, remotePath2, version2 = packetid.SplitBackupID(backupID2)
    if remotePath1 is None or remotePath2 is None:
        return 0
    if remotePath1 != remotePath2:
        return cmp(remotePath1, remotePath2)
    if customerGlobalID1 != customerGlobalID2:
        return cmp(customerGlobalID1, customerGlobalID2)
    return version_compare(version1, version2)
Beispiel #6
0
    def __init__(self, BackupID, OutputFile, KeyID=None):  # OutputFileName
        self.CreatorID = my_id.getLocalID()
        self.BackupID = BackupID
        _parts = packetid.SplitBackupID(self.BackupID)
        self.CustomerGlobalID = _parts[0]
        self.CustomerIDURL = global_id.GlobalUserToIDURL(self.CustomerGlobalID)
        self.PathID = _parts[1]
        self.Version = _parts[2]
        self.File = OutputFile
        self.KeyID = KeyID
        # is current active block - so when add 1 we get to first, which is 0
        self.BlockNumber = -1
        self.BytesWritten = 0
        self.OnHandData = []
        self.OnHandParity = []
        self.AbortState = False
        self.Done = False
        self.EccMap = eccmap.Current()
        self.Started = time.time()
        self.LastAction = time.time()
        self.InboxPacketsQueue = []
        self.InboxQueueWorker = None
        self.RequestFails = []
        self.InboxQueueDelay = 1
        # For anyone who wants to know when we finish
        self.MyDeferred = Deferred()
        self.packetInCallback = None
        self.blockRestoredCallback = None

        automat.Automat.__init__(self, 'restore_%s' % self.BackupID,
                                 'AT_STARTUP', _DebugLevel, _Debug)
        events.send('restore-started', dict(backup_id=self.BackupID))
Beispiel #7
0
def OnJobDone(backupID, result):
    """
    A callback method fired when backup is finished.

    Here we need to save the index data base.
    """
    from storage import backup_rebuilder
    # from customer import io_throttle
    lg.info('job done [%s] with result "%s", %d more tasks' %
            (backupID, result, len(tasks())))
    jobs().pop(backupID)
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    customer_idurl = global_id.GlobalUserToIDURL(customerGlobalID)
    if result == 'done':
        maxBackupsNum = settings.getBackupsMaxCopies()
        if maxBackupsNum:
            item = backup_fs.GetByID(remotePath,
                                     iterID=backup_fs.fsID(customer_idurl))
            if item:
                versions = item.list_versions(sorted=True, reverse=True)
                if len(versions) > maxBackupsNum:
                    for version in versions[maxBackupsNum:]:
                        item.delete_version(version)
                        backupID = packetid.MakeBackupID(
                            customerGlobalID, remotePath, version)
                        backup_rebuilder.RemoveBackupToWork(backupID)
                        # io_throttle.DeleteBackupRequests(backupID)
                        # io_throttle.DeleteBackupSendings(backupID)
                        # callback.delete_backup_interest(backupID)
                        backup_fs.DeleteLocalBackup(
                            settings.getLocalBackupsDir(), backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
        backup_fs.ScanID(remotePath)
        backup_fs.Calculate()
        Save()
        control.request_update([
            ('pathID', remotePath),
        ])
        # TODO: check used space, if we have over use - stop all tasks immediately
        backup_matrix.RepaintBackup(backupID)
    elif result == 'abort':
        DeleteBackup(backupID)
    if len(tasks()) == 0:
        # do we really need to restart backup_monitor after each backup?
        # if we have a lot tasks started this will produce a lot unneeded actions
        # will be smarter to restart it once we finish all tasks
        # because user will probably leave BitDust working after starting a long running operations
        from storage import backup_monitor
        if _Debug:
            lg.out(
                _DebugLevel,
                'backup_control.OnJobDone restarting backup_monitor() machine because no tasks left'
            )
        backup_monitor.A('restart')
    reactor.callLater(0, RunTask)  # @UndefinedVariable
    reactor.callLater(0, FireTaskFinishedCallbacks, remotePath, version,
                      result)  # @UndefinedVariable
Beispiel #8
0
 def _do_select_archive_snapshots(self):
     iterID_and_path = backup_fs.WalkByID(self.archive_folder_path,
                                          iterID=backup_fs.fsID(
                                              self.queue_owner_idurl,
                                              self.queue_alias))
     if iterID_and_path is None:
         lg.err('did not found archive folder in the catalog: %r' %
                self.archive_folder_path)
         self.automat('restore-failed')
         return False
     iterID, _ = iterID_and_path
     known_archive_snapshots_list = backup_fs.ListAllBackupIDsFull(
         iterID=iterID)
     if not known_archive_snapshots_list:
         lg.err(
             'failed to restore data from archive, no snapshots found in folder: %r'
             % self.archive_folder_path)
         self.automat('restore-failed')
         return False
     snapshots_list = []
     for archive_item in known_archive_snapshots_list:
         snapshots_list.append(archive_item[1])
     if _Debug:
         lg.args(_DebugLevel, snapshots_list=snapshots_list)
     if not snapshots_list:
         lg.err('no available snapshots found in archive list: %r' %
                known_archive_snapshots_list)
         self.automat('restore-failed')
         return False
     snapshot_sequence_ids = []
     for backup_id in snapshots_list:
         _, path_id, _ = packetid.SplitBackupID(backup_id)
         if not path_id:
             continue
         try:
             snapshot_sequence_id = int(path_id.split('/')[-1])
         except:
             lg.exc()
             continue
         if self.start_sequence_id is not None and self.start_sequence_id > snapshot_sequence_id:
             continue
         if self.end_sequence_id is not None and self.end_sequence_id < snapshot_sequence_id:
             continue
         snapshot_sequence_ids.append((
             snapshot_sequence_id,
             backup_id,
         ))
     snapshot_sequence_ids.sort(key=lambda item: int(item[0]))
     if _Debug:
         lg.args(_DebugLevel, snapshot_sequence_ids=snapshot_sequence_ids)
     self.selected_backups = [item[1] for item in snapshot_sequence_ids]
     if not self.selected_backups:
         lg.err('no backups selected from snapshot list')
         self.automat('restore-failed')
         return False
     if _Debug:
         lg.args(_DebugLevel, selected_backups=self.selected_backups)
     return True
Beispiel #9
0
    def __init__(self,
                 BackupID,
                 OutputFile,
                 KeyID=None,
                 ecc_map=None,
                 debug_level=_DebugLevel,
                 log_events=False,
                 log_transitions=_Debug,
                 publish_events=False,
                 **kwargs):
        """
        Builds `restore_worker()` state machine.
        """
        self.creator_id = my_id.getIDURL()
        self.backup_id = BackupID
        _parts = packetid.SplitBackupID(self.backup_id)
        self.customer_id = _parts[0]
        self.customer_idurl = global_id.GlobalUserToIDURL(self.customer_id)
        self.known_suppliers = []
        self.path_id = _parts[1]
        self.version = _parts[2]
        self.output_stream = OutputFile
        self.key_id = KeyID
        # is current active block - so when add 1 we get to first, which is 0
        self.block_number = -1
        self.bytes_written = 0
        self.OnHandData = []
        self.OnHandParity = []
        self.abort_flag = False
        self.done_flag = False
        self.EccMap = ecc_map or None
        self.max_errors = 0
        self.Started = time.time()
        self.LastAction = time.time()
        self.RequestFails = []
        self.block_requests = {}
        self.AlreadyRequestedCounts = {}
        # For anyone who wants to know when we finish
        self.MyDeferred = Deferred()
        self.packetInCallback = None
        self.blockRestoredCallback = None
        self.Attempts = 0

        super(RestoreWorker, self).__init__(
            name='restore_worker_%s' % self.version,
            state="AT_STARTUP",
            debug_level=debug_level,
            log_events=log_events,
            log_transitions=log_transitions,
            publish_events=publish_events,
            **kwargs
        )
        events.send('restore-started', dict(backup_id=self.backup_id))
Beispiel #10
0
 def __init__(
     self,
     backupID,
     pipe,
     finishCallback=None,
     blockResultCallback=None,
     notifyNewDataCallback=None,
     blockSize=None,
     sourcePath=None,
     keyID=None,
     ecc_map=None,
     creatorIDURL=None,
 ):
     self.backupID = backupID
     self.creatorIDURL = creatorIDURL or my_id.getIDURL()
     _parts = packetid.SplitBackupID(self.backupID)
     self.customerGlobalID = _parts[0]
     self.pathID = _parts[1]
     self.version = _parts[2]
     self.customerIDURL = global_id.GlobalUserToIDURL(self.customerGlobalID)
     self.sourcePath = sourcePath
     self.keyID = keyID
     self.eccmap = ecc_map or eccmap.Current()
     self.pipe = pipe
     self.blockSize = blockSize
     if self.blockSize is None:
         self.blockSize = settings.getBackupBlockSize()
     self.ask4abort = False
     self.terminating = False
     self.stateEOF = False
     self.stateReading = False
     self.closed = False
     self.currentBlockData = BytesIO()
     self.currentBlockSize = 0
     self.workBlocks = {}
     self.blockNumber = 0
     self.dataSent = 0
     self.blocksSent = 0
     self.totalSize = -1
     self.resultDefer = Deferred()
     self.finishCallback = finishCallback
     self.blockResultCallback = blockResultCallback
     self.notifyNewDataCallback = notifyNewDataCallback
     automat.Automat.__init__(
         self,
         name='backup_%s' % self.version,
         state='AT_STARTUP',
         debug_level=_DebugLevel,
         log_events=_Debug,
         log_transitions=_Debug,
     )
    def __init__(self,
                 BackupID,
                 OutputFile,
                 KeyID=None,
                 debug_level=_DebugLevel,
                 log_events=_Debug,
                 log_transitions=_Debug,
                 publish_events=False,
                 **kwargs):
        """
        Builds `restore_worker()` state machine.
        """
        self.CreatorID = my_id.getLocalID()
        self.BackupID = BackupID
        _parts = packetid.SplitBackupID(self.BackupID)
        self.CustomerGlobalID = _parts[0]
        self.CustomerIDURL = global_id.GlobalUserToIDURL(self.CustomerGlobalID)
        self.PathID = _parts[1]
        self.Version = _parts[2]
        self.File = OutputFile
        self.KeyID = KeyID
        # is current active block - so when add 1 we get to first, which is 0
        self.BlockNumber = -1
        self.BytesWritten = 0
        self.OnHandData = []
        self.OnHandParity = []
        self.AbortState = False
        self.Done = False
        self.EccMap = eccmap.Current()
        self.Started = time.time()
        self.LastAction = time.time()
        self.RequestFails = []
        # For anyone who wants to know when we finish
        self.MyDeferred = Deferred()
        self.packetInCallback = None
        self.blockRestoredCallback = None

        super(RestoreWorker,
              self).__init__(name='restore_worker_%s' % self.Version,
                             state="AT_STARTUP",
                             debug_level=debug_level,
                             log_events=log_events,
                             log_transitions=log_transitions,
                             publish_events=publish_events,
                             **kwargs)
        events.send('restore-started', dict(backup_id=self.BackupID))
Beispiel #12
0
 def __init__(
     self,
     backupID,
     pipe,
     finishCallback=None,
     blockResultCallback=None,
     blockSize=None,
     sourcePath=None,
     keyID=None,
 ):
     self.backupID = backupID
     _parts = packetid.SplitBackupID(self.backupID)
     self.customerGlobalID = _parts[0]
     self.pathID = _parts[1]
     self.version = _parts[2]
     self.customerIDURL = global_id.GlobalUserToIDURL(self.customerGlobalID)
     self.sourcePath = sourcePath
     self.keyID = keyID
     self.eccmap = eccmap.Current()
     self.pipe = pipe
     self.blockSize = blockSize
     if self.blockSize is None:
         self.blockSize = settings.getBackupBlockSize()
     self.ask4abort = False
     self.terminating = False
     self.stateEOF = False
     self.stateReading = False
     self.closed = False
     self.currentBlockData = BytesIO()
     self.currentBlockSize = 0
     self.workBlocks = {}
     self.blockNumber = 0
     self.dataSent = 0
     self.blocksSent = 0
     self.totalSize = -1
     self.finishCallback = finishCallback
     self.blockResultCallback = blockResultCallback
     automat.Automat.__init__(self, 'backup_%s' % self.version,
                              'AT_STARTUP', _DebugLevel)
Beispiel #13
0
 def doRemoveUnusedFiles(self, *args, **kwargs):
     """
     Action method.
     """
     if not list_files_orator.is_synchronized():
         # always make sure we have a very fresh info about remote files before take any actions
         return
     # we want to remove files for this block
     # because we only need them during rebuilding
     if settings.getBackupsKeepLocalCopies() is True:
         # if user set this in settings - he want to keep the local files
         return
     # ... user do not want to keep local backups
     if settings.getGeneralWaitSuppliers() is True:
         from customer import fire_hire
         # but he want to be sure - all suppliers are green for a long time
         if len(online_status.listOfflineSuppliers()) > 0 or (
                 time.time() - fire_hire.GetLastFireTime() < 24 * 60 * 60):
             # some people are not there or we do not have stable team yet
             # do not remove the files because we need it to rebuild
             return
     count = 0
     from storage import backup_matrix
     from storage import restore_monitor
     from storage import backup_rebuilder
     if _Debug:
         lg.out(_DebugLevel, 'data_sender.doRemoveUnusedFiles')
     for backupID in misc.sorted_backup_ids(
             list(backup_matrix.local_files().keys())):
         if restore_monitor.IsWorking(backupID):
             if _Debug:
                 lg.out(_DebugLevel,
                        '        %s : SKIP, because restoring' % backupID)
             continue
         if backup_rebuilder.IsBackupNeedsWork(backupID):
             if _Debug:
                 lg.out(
                     _DebugLevel,
                     '        %s : SKIP, because needs rebuilding' %
                     backupID)
             continue
         if not backup_rebuilder.ReadStoppedFlag():
             if backup_rebuilder.A().currentBackupID is not None:
                 if backup_rebuilder.A().currentBackupID == backupID:
                     if _Debug:
                         lg.out(
                             _DebugLevel,
                             '        %s : SKIP, because rebuilding is in process'
                             % backupID)
                     continue
         if backupID not in backup_matrix.remote_files():
             if _Debug:
                 lg.out(
                     _DebugLevel,
                     '        going to erase %s because not found in remote files'
                     % backupID)
             customer, pathID, version = packetid.SplitBackupID(backupID)
             dirpath = os.path.join(settings.getLocalBackupsDir(), customer,
                                    pathID, version)
             if os.path.isdir(dirpath):
                 try:
                     count += bpio.rmdir_recursive(dirpath,
                                                   ignore_errors=True)
                 except:
                     lg.exc()
             continue
         packets = backup_matrix.ScanBlocksToRemove(
             backupID,
             check_all_suppliers=settings.getGeneralWaitSuppliers())
         for packetID in packets:
             customer, pathID = packetid.SplitPacketID(packetID)
             filename = os.path.join(settings.getLocalBackupsDir(),
                                     customer, pathID)
             if os.path.isfile(filename):
                 try:
                     os.remove(filename)
                 except:
                     lg.exc()
                     continue
                 count += 1
     if _Debug:
         lg.out(_DebugLevel, '    %d files were removed' % count)
     backup_matrix.ReadLocalFiles()
 def _itemRestored(backupID, result):
     customerGlobalID, remotePath, _ = packetid.SplitBackupID(backupID)
     backup_fs.ScanID(
         remotePath,
         customer_idurl=global_id.GlobalUserToIDURL(customerGlobalID))
     backup_fs.Calculate()
Beispiel #15
0
 def _do_send_packets(self, backup_id, block_num):
     customer_id, path_id, version_name = packetid.SplitBackupID(backup_id)
     archive_snapshot_dir = os.path.join(settings.getLocalBackupsDir(),
                                         customer_id, path_id, version_name)
     if _Debug:
         lg.args(_DebugLevel,
                 backup_id=backup_id,
                 block_num=block_num,
                 archive_snapshot_dir=archive_snapshot_dir)
     if not os.path.isdir(archive_snapshot_dir):
         self.block_failed = True
         lg.err('archive snapshot folder was not found in %r' %
                archive_snapshot_dir)
         return None
     failed_supliers = 0
     for supplier_num in range(len(self.suppliers_list)):
         supplier_idurl = self.suppliers_list[supplier_num]
         if not supplier_idurl:
             failed_supliers += 1
             lg.warn('unknown supplier supplier_num=%d' % supplier_num)
             continue
         for dataORparity in (
                 'Data',
                 'Parity',
         ):
             packet_id = packetid.MakePacketID(backup_id, block_num,
                                               supplier_num, dataORparity)
             packet_filename = os.path.join(
                 archive_snapshot_dir, '%d-%d-%s' % (
                     block_num,
                     supplier_num,
                     dataORparity,
                 ))
             if not os.path.isfile(packet_filename):
                 lg.err('%s is not a file' % packet_filename)
                 continue
             packet_payload = bpio.ReadBinaryFile(packet_filename)
             if not packet_payload:
                 lg.err('file %r reading error' % packet_filename)
                 continue
             if block_num not in self.packets_out:
                 self.packets_out[block_num] = {}
             self.packets_out[block_num][packet_id] = None
             p2p_service.SendData(
                 raw_data=packet_payload,
                 ownerID=self.queue_owner_idurl,
                 creatorID=my_id.getIDURL(),
                 remoteID=supplier_idurl,
                 packetID=packet_id,
                 callbacks={
                     commands.Ack():
                     lambda newpacket, _: self.automat('ack',
                                                       newpacket=newpacket),
                     commands.Fail():
                     lambda newpacket, _: self.automat('fail',
                                                       newpacket=newpacket),
                 },
             )
     if failed_supliers > self.correctable_errors:
         self.block_failed = True
         lg.err('too many failed suppliers %d in block %d' % (
             failed_supliers,
             block_num,
         ))
Beispiel #16
0
 def doScanAndQueue(self, *args, **kwargs):
     """
     Action method.
     """
     global _ShutdownFlag
     if _ShutdownFlag:
         if _Debug:
             lg.out(_DebugLevel,
                    'data_sender.doScanAndQueue   _ShutdownFlag is True\n')
         self.automat('scan-done', 0)
         return
     from storage import backup_matrix
     from storage import backup_fs
     backup_matrix.ReadLocalFiles()
     progress = 0
     # if _Debug:
     #     lg.out(_DebugLevel, 'data_sender.doScanAndQueue    with %d known customers' % len(contactsdb.known_customers()))
     for customer_idurl in contactsdb.known_customers():
         if customer_idurl != my_id.getIDURL():
             # TODO: check that later
             if _Debug:
                 lg.out(
                     _DebugLevel + 2,
                     'data_sender.doScanAndQueue  skip sending to another customer: %r'
                     % customer_idurl)
             continue
         known_suppliers = contactsdb.suppliers(customer_idurl)
         if not known_suppliers or id_url.is_some_empty(known_suppliers):
             if _Debug:
                 lg.out(
                     _DebugLevel,
                     'data_sender.doScanAndQueue    found empty supplier(s) for customer %r, SKIP'
                     % customer_idurl)
             continue
         known_backups = misc.sorted_backup_ids(
             list(backup_matrix.local_files().keys()), True)
         if _Debug:
             lg.out(
                 _DebugLevel,
                 'data_sender.doScanAndQueue    found %d known suppliers for customer %r with %d backups'
                 %
                 (len(known_suppliers), customer_idurl, len(known_backups)))
         for backupID in known_backups:
             this_customer_idurl = packetid.CustomerIDURL(backupID)
             if this_customer_idurl != customer_idurl:
                 continue
             customerGlobalID, pathID, _ = packetid.SplitBackupID(
                 backupID, normalize_key_alias=True)
             keyAlias = packetid.KeyAlias(customerGlobalID)
             item = backup_fs.GetByID(pathID,
                                      iterID=backup_fs.fsID(
                                          customer_idurl, keyAlias))
             if not item:
                 if _Debug:
                     lg.out(
                         _DebugLevel,
                         'data_sender.doScanAndQueue    skip sending backup %r path not exist in catalog'
                         % backupID)
                 continue
             if item.key_id and customerGlobalID and customerGlobalID != item.key_id:
                 if _Debug:
                     lg.out(
                         _DebugLevel,
                         'data_sender.doScanAndQueue    skip sending backup %r key is different in the catalog: %r ~ %r'
                         % (
                             backupID,
                             customerGlobalID,
                             item.key_id,
                         ))
                 continue
             packetsBySupplier = backup_matrix.ScanBlocksToSend(
                 backupID, limit_per_supplier=None)
             total_for_customer = sum(
                 [len(v) for v in packetsBySupplier.values()])
             if total_for_customer:
                 if _Debug:
                     lg.out(
                         _DebugLevel,
                         'data_sender.doScanAndQueue    sending %r for customer %r with %d pieces'
                         %
                         (item.name(), customer_idurl, total_for_customer))
                 for supplierNum in packetsBySupplier.keys():
                     # supplier_idurl = contactsdb.supplier(supplierNum, customer_idurl=customer_idurl)
                     if supplierNum >= 0 and supplierNum < len(
                             known_suppliers):
                         supplier_idurl = known_suppliers[supplierNum]
                     else:
                         supplier_idurl = None
                     if not supplier_idurl:
                         lg.warn(
                             'skip sending, unknown supplier_idurl supplierNum=%s for %s, customer_idurl=%r'
                             % (supplierNum, backupID, customer_idurl))
                         continue
                     for packetID in packetsBySupplier[supplierNum]:
                         backupID_, _, supplierNum_, _ = packetid.BidBnSnDp(
                             packetID)
                         if backupID_ != backupID:
                             lg.warn(
                                 'skip sending, unexpected backupID supplierNum=%s for %s, customer_idurl=%r'
                                 % (packetID, backupID, customer_idurl))
                             continue
                         if supplierNum_ != supplierNum:
                             lg.warn(
                                 'skip sending, unexpected supplierNum %s for %s, customer_idurl=%r'
                                 % (packetID, backupID, customer_idurl))
                             continue
                         if io_throttle.HasPacketInSendQueue(
                                 supplier_idurl, packetID):
                             if _Debug:
                                 lg.out(
                                     _DebugLevel,
                                     'data_sender.doScanAndQueue    %s already in sending queue for %r'
                                     % (packetID, supplier_idurl))
                             continue
                         latest_progress = self.statistic.get(
                             supplier_idurl, {}).get('latest', '')
                         if len(latest_progress
                                ) >= 3 and latest_progress.endswith('---'):
                             if _Debug:
                                 lg.out(
                                     _DebugLevel + 2,
                                     'data_sender.doScanAndQueue     skip sending to supplier %r because multiple packets already failed'
                                     % supplier_idurl)
                             continue
                         if not io_throttle.OkToSend(supplier_idurl):
                             if _Debug:
                                 lg.out(
                                     _DebugLevel + 2,
                                     'data_sender.doScanAndQueue     skip sending, queue is busy for %r'
                                     % supplier_idurl)
                             continue
                         customerGlobalID, pathID = packetid.SplitPacketID(
                             packetID)
                         filename = os.path.join(
                             settings.getLocalBackupsDir(),
                             customerGlobalID,
                             pathID,
                         )
                         if not os.path.isfile(filename):
                             if _Debug:
                                 lg.out(
                                     _DebugLevel,
                                     'data_sender.doScanAndQueue     %s is not a file'
                                     % filename)
                             continue
                         itemInfo = item.to_json()
                         if io_throttle.QueueSendFile(
                                 filename,
                                 packetID,
                                 supplier_idurl,
                                 my_id.getIDURL(),
                                 lambda packet, ownerID,
                                 packetID: self._packetAcked(
                                     packet, ownerID, packetID, itemInfo),
                                 lambda remoteID, packetID,
                                 why: self._packetFailed(
                                     remoteID, packetID, why, itemInfo),
                         ):
                             progress += 1
                             if _Debug:
                                 lg.out(
                                     _DebugLevel,
                                     'data_sender.doScanAndQueue   for %r put %s in the queue  progress=%d'
                                     % (
                                         item.name(),
                                         packetID,
                                         progress,
                                     ))
                         else:
                             if _Debug:
                                 lg.out(
                                     _DebugLevel,
                                     'data_sender.doScanAndQueue    io_throttle.QueueSendFile FAILED %s'
                                     % packetID)
     if _Debug:
         lg.out(_DebugLevel,
                'data_sender.doScanAndQueue    progress=%s' % progress)
     self.automat('scan-done', progress)
Beispiel #17
0
 def doScanAndQueue(self, *args, **kwargs):
     """
     Action method.
     """
     global _ShutdownFlag
     if _ShutdownFlag:
         if _Debug:
             lg.out(_DebugLevel, 'data_sender.doScanAndQueue   _ShutdownFlag is True\n')
         self.automat('scan-done', 0)
         return
     from storage import backup_matrix
     from storage import backup_fs
     backup_matrix.ReadLocalFiles()
     progress = 0
     if _Debug:
         lg.out(_DebugLevel, 'data_sender.doScanAndQueue    with %d known customers' % len(contactsdb.known_customers()))
     for customer_idurl in contactsdb.known_customers():
         if customer_idurl != my_id.getLocalID():
             # TODO: check that later
             if _Debug:
                 lg.out(_DebugLevel + 6, 'data_sender.doScanAndQueue  skip sending to another customer: %r' % customer_idurl)
             continue
         known_suppliers = contactsdb.suppliers(customer_idurl)
         if not known_suppliers or id_url.is_some_empty(known_suppliers):
             if _Debug:
                 lg.out(_DebugLevel, 'data_sender.doScanAndQueue    found empty supplier(s) for customer %r, SKIP' % customer_idurl)
             continue
         known_backups = misc.sorted_backup_ids(list(backup_matrix.local_files().keys()), True)
         if _Debug:
             lg.out(_DebugLevel, 'data_sender.doScanAndQueue    found %d known suppliers for customer %r with %d backups' % (
                 len(known_suppliers), customer_idurl, len(known_backups)))
         for backupID in known_backups:
             this_customer_idurl = packetid.CustomerIDURL(backupID)
             if this_customer_idurl != customer_idurl:
                 continue
             customerGlobalID, pathID, _ = packetid.SplitBackupID(backupID, normalize_key_alias=True)
             item = backup_fs.GetByID(pathID, iterID=backup_fs.fsID(customer_idurl=customer_idurl))
             if not item:
                 if _Debug:
                     lg.out(_DebugLevel, 'data_sender.doScanAndQueue    skip sending backup %r path not exist in catalog' % backupID)
                 continue
             if item.key_id and customerGlobalID and customerGlobalID != item.key_id:
                 if _Debug:
                     lg.out(_DebugLevel, 'data_sender.doScanAndQueue    skip sending backup %r key is different in the catalog' % backupID)
                 continue
             packetsBySupplier = backup_matrix.ScanBlocksToSend(backupID, limit_per_supplier=None)
             total_for_customer = sum([len(v) for v in packetsBySupplier.values()])
             if _Debug:
                 lg.out(_DebugLevel, 'data_sender.doScanAndQueue    to be delivered for customer %r : %d' % (customer_idurl, total_for_customer))
             for supplierNum in packetsBySupplier.keys():
                 # supplier_idurl = contactsdb.supplier(supplierNum, customer_idurl=customer_idurl)
                 if supplierNum >= 0 and supplierNum < len(known_suppliers):
                     supplier_idurl = known_suppliers[supplierNum]
                 else:
                     supplier_idurl = None
                 if not supplier_idurl:
                     lg.warn('skip sending, unknown supplier_idurl supplierNum=%s for %s, customer_idurl=%r' % (
                         supplierNum, backupID, customer_idurl))
                     continue
                 for packetID in packetsBySupplier[supplierNum]:
                     backupID_, _, supplierNum_, _ = packetid.BidBnSnDp(packetID)
                     if backupID_ != backupID:
                         lg.warn('skip sending, unexpected backupID supplierNum=%s for %s, customer_idurl=%r' % (
                             packetID, backupID, customer_idurl))
                         continue
                     if supplierNum_ != supplierNum:
                         lg.warn('skip sending, unexpected supplierNum %s for %s, customer_idurl=%r' % (
                             packetID, backupID, customer_idurl))
                         continue
                     if io_throttle.HasPacketInSendQueue(supplier_idurl, packetID):
                         if _Debug:
                             lg.out(_DebugLevel, 'data_sender.doScanAndQueue    %s already in sending queue for %r' % (packetID, supplier_idurl))
                         continue
                     if not io_throttle.OkToSend(supplier_idurl):
                         if _Debug:
                             lg.out(_DebugLevel + 6, 'data_sender.doScanAndQueue     skip sending, queue is busy for %r\n' % supplier_idurl)
                         continue
                     # customerGlobalID, pathID = packetid.SplitPacketID(packetID)
                     # tranByID = gate.transfers_out_by_idurl().get(supplier_idurl, [])
                     # if len(tranByID) > 3:
                     #     log.write(u'transfers by %s: %d\n' % (supplier_idurl, len(tranByID)))
                     #     continue
                     customerGlobalID, pathID = packetid.SplitPacketID(packetID)
                     filename = os.path.join(
                         settings.getLocalBackupsDir(),
                         customerGlobalID,
                         pathID,
                     )
                     if not os.path.isfile(filename):
                         if _Debug:
                             lg.out(_DebugLevel, 'data_sender.doScanAndQueue     %s is not a file\n' % filename)
                         continue
                     if io_throttle.QueueSendFile(
                         filename,
                         packetID,
                         supplier_idurl,
                         my_id.getIDURL(),
                         self._packetAcked,
                         self._packetFailed,
                     ):
                         progress += 1
                         if _Debug:
                             lg.out(_DebugLevel, 'data_sender.doScanAndQueue   put %s in the queue  progress=%d' % (packetID, progress, ))
                     else:
                         if _Debug:
                             lg.out(_DebugLevel, 'data_sender.doScanAndQueue    io_throttle.QueueSendFile FAILED %s' % packetID)
     if _Debug:
         lg.out(_DebugLevel, 'data_sender.doScanAndQueue progress=%s' % progress)
     self.automat('scan-done', progress)