def _upload(params):
    path = params['path']
    if bpio.Linux() or bpio.Mac():
        path = '/' + (path.lstrip('/'))
    localPath = unicode(path)
    if not bpio.pathExist(localPath):
        return {
            'result': {
                "success": False,
                "error": 'local path %s was not found' % path
            }
        }
    result = []
    pathID = backup_fs.ToID(localPath)
    if pathID is None:
        if bpio.pathIsDir(localPath):
            pathID, iter, iterID = backup_fs.AddDir(localPath, read_stats=True)
            result.append('new folder was added: %s' % localPath)
        else:
            pathID, iter, iterID = backup_fs.AddFile(localPath,
                                                     read_stats=True)
            result.append('new file was added: %s' % localPath)
    pathID = global_id.CanonicalID(pathID)
    backup_control.StartSingle(pathID=pathID, localPath=localPath)
    backup_fs.Calculate()
    backup_control.Save()
    control.request_update([
        ('pathID', pathID),
    ])
    result.append('backup started: %s' % pathID)
    return {
        'result': result,
    }
Пример #2
0
def on_outbox_packet(outpacket,
                     wide,
                     callbacks,
                     target=None,
                     route=None,
                     response_timeout=None,
                     keep_alive=True):
    """
    """
    started_packets = packet_out.search_similar_packets(outpacket)
    if started_packets:
        for active_packet, active_item in started_packets:
            if callbacks:
                for command, cb in callbacks.items():
                    active_packet.set_callback(command, cb)
            lg.warn(
                'skip creating new outbox packet because found similar packet: %r'
                % active_packet)
            return active_packet
    pkt_out = packet_out.create(outpacket, wide, callbacks, target, route,
                                response_timeout, keep_alive)
    # if _Debug and lg.is_debug(_DebugLevel):
    #     monitoring()
    control.request_update([('packet', outpacket.PacketID)])
    return pkt_out
Пример #3
0
def on_cancelled_file_sending(proto,
                              host,
                              filename,
                              size,
                              description='',
                              error_message=None):
    """
    """
    pkt_out, work_item = packet_out.search(proto, host, filename)
    if pkt_out is None:
        if _Debug:
            lg.out(
                _DebugLevel,
                'gateway.on_cancelled_file_sending packet_out %s %s %s not found - IT IS OK'
                % (proto, host, os.path.basename(filename)))
        return True
    pkt_out.automat('item-cancelled',
                    (proto, host, filename, size, description, error_message))
    if pkt_out.outpacket:
        control.request_update([('packet', pkt_out.outpacket.PacketID)])
    if _Debug:
        lg.out(
            _DebugLevel, '>>> OUT >>>  {%s} CANCELLED via [%s] to %s : %s' %
            (os.path.basename(filename), proto, host, error_message))
    return True
Пример #4
0
def on_unregister_file_receiving(transfer_id,
                                 status,
                                 bytes_received,
                                 error_message=''):
    """
    Called from transport plug-in after finish receiving a single file.
    """
    pkt_in = packet_in.get(transfer_id)
    if not pkt_in:
        lg.exc(exc_value=Exception(
            'incoming packet with transfer_id=%r not exist' % transfer_id))
        return False
    if _Debug:
        if status == 'finished':
            lg.out(
                _DebugLevel, '<<< IN <<< (%d) [%s://%s] %s with %d bytes' %
                (transfer_id, pkt_in.proto, pkt_in.host, status.upper(),
                 bytes_received))
        else:
            lg.out(
                _DebugLevel, '<<< IN <<< (%d) [%s://%s] %s : %s' %
                (transfer_id, pkt_in.proto, pkt_in.host, status.upper(),
                 error_message))
    pkt_in.automat('unregister-item', (status, bytes_received, error_message))
    control.request_update([('stream', transfer_id)])
    return True
Пример #5
0
def on_unregister_file_sending(transfer_id,
                               status,
                               bytes_sent,
                               error_message=None):
    """
    Called from transport plug-in after finish sending a single file.
    """
    if transfer_id is None:
        return False
    if _Debug:
        lg.out(
            _DebugLevel,
            'gateway.on_unregister_file_sending %s %s' % (transfer_id, status))
    pkt_out, work_item = packet_out.search_by_transfer_id(transfer_id)
    if pkt_out is None:
        if _Debug:
            lg.out(_DebugLevel, '        %s is not found' % str(transfer_id))
        return False
    pkt_out.automat('unregister-item',
                    (transfer_id, status, bytes_sent, error_message))
    control.request_update([('stream', transfer_id)])
    if status == 'finished':
        if _Debug:
            lg.out(
                _DebugLevel, '>>> OUT >>> %s (%d) [%s://%s] %s with %d bytes' %
                (pkt_out.description, transfer_id, work_item.proto,
                 work_item.host, status.upper(), bytes_sent))
    else:
        if _Debug:
            lg.out(
                _DebugLevel, '>>> OUT >>> %s (%d) [%s://%s] %s : %s' %
                (pkt_out.description, transfer_id, work_item.proto,
                 work_item.host, str(status).upper(), error_message))
    return True
def _delete(params):
    # localPath = params['path'].lstrip('/')
    pathID = params['id']
    if not packetid.Valid(pathID):
        return {
            'result': {
                "success": False,
                "error": "path %s is not valid" % pathID
            }
        }
    if not backup_fs.ExistsID(pathID):
        return {
            'result': {
                "success": False,
                "error": "path %s not found" % pathID
            }
        }
    backup_control.DeletePathBackups(pathID, saveDB=False, calculate=False)
    backup_fs.DeleteLocalDir(settings.getLocalBackupsDir(), pathID)
    backup_fs.DeleteByID(pathID)
    backup_fs.Scan()
    backup_fs.Calculate()
    backup_control.Save()
    control.request_update([
        ('pathID', pathID),
    ])
    backup_monitor.A('restart')
    return {'result': {"success": True, "error": None}}
def _delete_version(params):
    lg.out(6, '_delete_version %s' % str(params))
    backupID = params['backupid']
    if not packetid.Valid(backupID):
        return {
            'result': {
                "success": False,
                "error": "backupID %s is not valid" % backupID
            }
        }
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    if not customerGlobalID:
        customerGlobalID = my_id.getGlobalID()
    if not backup_fs.ExistsID(
            remotePath,
            iterID=backup_fs.fsID(
                global_id.GlobalUserToIDURL(customerGlobalID))):
        return {
            'result': {
                "success": False,
                "error": "path %s not found" % remotePath
            }
        }
    if version:
        backup_control.DeleteBackup(backupID, saveDB=False, calculate=False)
    backup_fs.Scan()
    backup_fs.Calculate()
    backup_control.Save()
    backup_monitor.A('restart')
    control.request_update([
        ('backupID', backupID),
    ])
    return {'result': {"success": True, "error": None}}
Пример #8
0
 def doUpdate(self, arg):
     # lg.out(4, 'installer.doUpdate')
     from main import control
     control.request_update([
         {
             'state': self.state
         },
     ])
Пример #9
0
def DeleteBackup(backupID,
                 removeLocalFilesToo=True,
                 saveDB=True,
                 calculate=True):
    """
    This removes a single backup ID completely. Perform several operations:

    1) abort backup if it just started and is running at the moment
    2) if we requested for files for this backup we do not need it anymore - remove 'Data' requests
    3) remove interests in transport_control, see ``lib.transport_control.DeleteBackupInterest()``
    4) remove that ID from the index data base
    5) remove local files for this backup ID
    6) remove all remote info for this backup from the memory, see ``p2p.backup_matrix.EraseBackupRemoteInfo()``
    7) also remove local info from memory, see ``p2p.backup_matrix.EraseBackupLocalInfo()``
    8) stop any rebuilding, we will restart it soon
    9) check and calculate used space
    10) save the modified index data base, soon it will be synchronized with "index_synchronizer()" state machine
    """
    backupID = global_id.CanonicalID(backupID)
    # if the user deletes a backup, make sure we remove any work we're doing on it
    # abort backup if it just started and is running at the moment
    if AbortRunningBackup(backupID):
        lg.out(
            8, 'backup_control.DeleteBackup %s is in process, stopping' %
            backupID)
        return True
    from customer import io_throttle
    from . import backup_rebuilder
    lg.out(8, 'backup_control.DeleteBackup ' + backupID)
    # if we requested for files for this backup - we do not need it anymore
    io_throttle.DeleteBackupRequests(backupID)
    io_throttle.DeleteBackupSendings(backupID)
    # remove interests in transport_control
    # callback.delete_backup_interest(backupID)
    # mark it as being deleted in the db, well... just remove it from the index now
    if not backup_fs.DeleteBackupID(backupID):
        return False
    # finally remove local files for this backupID
    if removeLocalFilesToo:
        backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
    # remove all remote info for this backup from the memory
    backup_matrix.EraseBackupRemoteInfo(backupID)
    # also remove local info
    backup_matrix.EraseBackupLocalInfo(backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # in some cases we want to save the DB later
    if saveDB:
        Save()
        control.request_update([
            ('backupID', backupID),
        ])
    return True
Пример #10
0
def OnJobDone(backupID, result):
    """
    A callback method fired when backup is finished.

    Here we need to save the index data base.
    """
    from storage import backup_rebuilder
    # from customer import io_throttle
    lg.info('job done [%s] with result "%s", %d more tasks' %
            (backupID, result, len(tasks())))
    jobs().pop(backupID)
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    customer_idurl = global_id.GlobalUserToIDURL(customerGlobalID)
    if result == 'done':
        maxBackupsNum = settings.getBackupsMaxCopies()
        if maxBackupsNum:
            item = backup_fs.GetByID(remotePath,
                                     iterID=backup_fs.fsID(customer_idurl))
            if item:
                versions = item.list_versions(sorted=True, reverse=True)
                if len(versions) > maxBackupsNum:
                    for version in versions[maxBackupsNum:]:
                        item.delete_version(version)
                        backupID = packetid.MakeBackupID(
                            customerGlobalID, remotePath, version)
                        backup_rebuilder.RemoveBackupToWork(backupID)
                        # io_throttle.DeleteBackupRequests(backupID)
                        # io_throttle.DeleteBackupSendings(backupID)
                        # callback.delete_backup_interest(backupID)
                        backup_fs.DeleteLocalBackup(
                            settings.getLocalBackupsDir(), backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
        backup_fs.ScanID(remotePath)
        backup_fs.Calculate()
        Save()
        control.request_update([
            ('pathID', remotePath),
        ])
        # TODO: check used space, if we have over use - stop all tasks immediately
        backup_matrix.RepaintBackup(backupID)
    elif result == 'abort':
        DeleteBackup(backupID)
    if len(tasks()) == 0:
        # do we really need to restart backup_monitor after each backup?
        # if we have a lot tasks started this will produce a lot unneeded actions
        # will be smarter to restart it once we finish all tasks
        # because user will probably leave BitDust working after starting a long running operations
        from storage import backup_monitor
        if _Debug:
            lg.out(
                _DebugLevel,
                'backup_control.OnJobDone restarting backup_monitor() machine because no tasks left'
            )
        backup_monitor.A('restart')
    reactor.callLater(0, RunTask)  # @UndefinedVariable
    reactor.callLater(0, FireTaskFinishedCallbacks, remotePath, version,
                      result)  # @UndefinedVariable
Пример #11
0
 def state_changed(self, oldstate, newstate, event, *args, **kwargs):
     if newstate == 'CONTACTS' and oldstate == 'STORAGE':
         self.event('next', {})
         # TODO:
         # here just skip Contacts page!
         # we do not need that now, but can back to that soon when add chat
     from main import control
     control.request_update()
     installer.A('install_wizard.state', newstate)
Пример #12
0
def DeletePathBackups(pathID,
                      removeLocalFilesToo=True,
                      saveDB=True,
                      calculate=True):
    """
    This removes all backups of given path ID
    Doing same operations as ``DeleteBackup()``.
    """
    from . import backup_rebuilder
    from customer import io_throttle
    pathID = global_id.CanonicalID(pathID)
    # get the working item
    customer, remotePath = packetid.SplitPacketID(pathID)
    customer_idurl = global_id.GlobalUserToIDURL(customer)
    item = backup_fs.GetByID(remotePath, iterID=backup_fs.fsID(customer_idurl))
    if item is None:
        return False
    lg.out(8, 'backup_control.DeletePathBackups ' + pathID)
    # this is a list of all known backups of this path
    versions = item.list_versions()
    for version in versions:
        backupID = packetid.MakeBackupID(customer, remotePath, version)
        lg.out(8, '        removing %s' % backupID)
        # abort backup if it just started and is running at the moment
        AbortRunningBackup(backupID)
        # if we requested for files for this backup - we do not need it anymore
        io_throttle.DeleteBackupRequests(backupID)
        io_throttle.DeleteBackupSendings(backupID)
        # remove interests in transport_control
        # callback.delete_backup_interest(backupID)
        # remove local files for this backupID
        if removeLocalFilesToo:
            backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(),
                                        backupID)
        # remove remote info for this backup from the memory
        backup_matrix.EraseBackupRemoteInfo(backupID)
        # also remove local info
        backup_matrix.EraseBackupLocalInfo(backupID)
        # finally remove this backup from the index
        item.delete_version(version)
        # lg.out(8, 'backup_control.DeletePathBackups ' + backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # save the index if needed
    if saveDB:
        Save()
        control.request_update()
    return True
Пример #13
0
def IncomingSupplierBackupIndex(newpacket):
    """
    Called by ``p2p.p2p_service`` when a remote copy of our local index data
    base ( in the "Data" packet ) is received from one of our suppliers.

    The index is also stored on suppliers to be able to restore it.
    """
    b = encrypted.Unserialize(newpacket.Payload)
    if b is None:
        lg.out(2, 'backup_control.IncomingSupplierBackupIndex ERROR reading data from %s' % newpacket.RemoteID)
        return
    try:
        session_key = key.DecryptLocalPrivateKey(b.EncryptedSessionKey)
        padded_data = key.DecryptWithSessionKey(session_key, b.EncryptedData)
        inpt = cStringIO.StringIO(padded_data[:int(b.Length)])
        supplier_revision = inpt.readline().rstrip('\n')
        if supplier_revision:
            supplier_revision = int(supplier_revision)
        else:
            supplier_revision = -1
        # inpt.seek(0)
    except:
        lg.out(2, 'backup_control.IncomingSupplierBackupIndex ERROR reading data from %s' % newpacket.RemoteID)
        lg.out(2, '\n' + padded_data)
        lg.exc()
        try:
            inpt.close()
        except:
            pass
        return
    if driver.is_on('service_backup_db'):
        from storage import index_synchronizer
        index_synchronizer.A('index-file-received', (newpacket, supplier_revision))
    if revision() >= supplier_revision:
        inpt.close()
        lg.out(4, 'backup_control.IncomingSupplierBackupIndex SKIP, supplier %s revision=%d, local revision=%d' % (
            newpacket.RemoteID, supplier_revision, revision(), ))
        return
    raw_data = inpt.read()
    inpt.close()
    if ReadIndex(raw_data):
        commit(supplier_revision)
        backup_fs.Scan()
        backup_fs.Calculate()
        WriteIndex()
        control.request_update()
        lg.out(4, 'backup_control.IncomingSupplierBackupIndex updated to revision %d from %s' % (
            revision(), newpacket.RemoteID))
    else:
        lg.warn('failed to read catalog index from supplier')
Пример #14
0
def IncomingSupplierBackupIndex(newpacket):
    """
    Called by ``p2p.p2p_service`` when a remote copy of our local index data
    base ( in the "Data" packet ) is received from one of our suppliers.

    The index is also stored on suppliers to be able to restore it.
    """
    b = encrypted.Unserialize(newpacket.Payload)
    if b is None:
        lg.err('failed reading data from %s' % newpacket.RemoteID)
        return None
    try:
        session_key = key.DecryptLocalPrivateKey(b.EncryptedSessionKey)
        padded_data = key.DecryptWithSessionKey(session_key, b.EncryptedData, session_key_type=b.SessionKeyType)
        inpt = StringIO(strng.to_text(padded_data[:int(b.Length)]))
        supplier_revision = inpt.readline().rstrip('\n')
        if supplier_revision:
            supplier_revision = int(supplier_revision)
        else:
            supplier_revision = -1
    except:
        lg.exc()
        try:
            inpt.close()
        except:
            pass
        return None
    if revision() > supplier_revision:
        inpt.close()
        if _Debug:
            lg.out(_DebugLevel, 'backup_control.IncomingSupplierBackupIndex SKIP, supplier %s revision=%d, local revision=%d' % (
                newpacket.RemoteID, supplier_revision, revision(), ))
        return supplier_revision
    text_data = inpt.read()
    inpt.close()
    if ReadIndex(text_data):
        commit(supplier_revision)
        backup_fs.Scan()
        backup_fs.Calculate()
        WriteIndex()
        control.request_update()
        if _Debug:
            lg.out(_DebugLevel, 'backup_control.IncomingSupplierBackupIndex updated to revision %d from %s' % (
                revision(), newpacket.RemoteID))
    else:
        lg.warn('failed to read catalog index from supplier')
    return supplier_revision
Пример #15
0
def DeleteAllBackups():
    """
    Remove all backup IDs from index data base, see ``DeleteBackup()`` method.
    """
    # prepare a list of all known backup IDs
    all_ids = set(backup_fs.ListAllBackupIDs())
    all_ids.update(backup_matrix.GetBackupIDs(remote=True, local=True))
    lg.out(4, 'backup_control.DeleteAllBackups %d ID\'s to kill' % len(all_ids))
    # delete one by one
    for backupID in all_ids:
        DeleteBackup(backupID, saveDB=False, calculate=False)
    # scan all files
    backup_fs.Scan()
    # check and calculate used space
    backup_fs.Calculate()
    # save the index
    Save()
    # refresh the GUI
    control.request_update()
Пример #16
0
def on_register_file_receiving(proto, host, sender_idurl, filename, size=0):
    """
    Called from transport plug-in when receiving a single file were started
    from some peer.

    Must return a unique transfer ID, create a `FileTransferInfo` object
    and put it into "transfers" list. Plug-in's code must create a
    temporary file and write incoming data into that file.
    """
    transfer_id = make_transfer_ID()
    if _Debug:
        lg.out(
            _DebugLevel, '... IN ... %d receive {%s} via [%s] from %s at %s' %
            (transfer_id, os.path.basename(filename), proto,
             nameurl.GetName(sender_idurl), host))
    incoming_packet = packet_in.create(transfer_id)
    incoming_packet.automat('register-item',
                            (proto, host, sender_idurl, filename, size))
    control.request_update([('stream', transfer_id)])
    return transfer_id
Пример #17
0
def on_register_file_sending(proto,
                             host,
                             receiver_idurl,
                             filename,
                             size=0,
                             description=''):
    """
    Called from transport plug-in when sending of a single file started towards remote peer.
    Must return a unique transfer ID so plug-in will know that ID.
    After finishing that given transfer - that ID is passed to `unregister_file_sending()`.
    Need to first find existing outgoing packet and register that item.
    """
    if _Debug:
        lg.out(
            _DebugLevel, 'gateway.on_register_file_sending %s %s to %r' %
            (filename, description, receiver_idurl))


#     if id_url.field(receiver_idurl).to_bin() == my_id.getLocalID().to_bin():
#         pkt_out, work_item = packet_out.search(proto, host, filename)
#     else:
#         pkt_out, work_item = packet_out.search(proto, host, filename, remote_idurl=receiver_idurl)
    pkt_out, work_item = packet_out.search(proto, host, filename)
    if pkt_out is None:
        lg.warn(
            'skip register file sending, packet_out not found: %r %r %r %r' % (
                proto,
                host,
                os.path.basename(filename),
                receiver_idurl,
            ))
        return None
    transfer_id = make_transfer_ID()
    if _Debug:
        lg.out(
            _DebugLevel, '... OUT ... %s (%d) send {%s} via [%s] to %s at %s' %
            (pkt_out.description, transfer_id, os.path.basename(filename),
             proto, nameurl.GetName(receiver_idurl), host))
    pkt_out.automat('register-item', (proto, host, filename, transfer_id))
    control.request_update([('stream', transfer_id)])
    return transfer_id
Пример #18
0
def on_register_file_sending(proto,
                             host,
                             receiver_idurl,
                             filename,
                             size=0,
                             description=''):
    """
    Called from transport plug-in when sending a single file were started to
    some remote peer. Must return a unique transfer ID so plug-in will know
    that ID.

    After finishing that given transfer - that ID is passed to `unregister_file_sending()`.
    """
    if _Debug:
        lg.out(
            _DebugLevel,
            'gateway.on_register_file_sending %s %s' % (filename, description))
    pkt_out, work_item = packet_out.search(proto,
                                           host,
                                           filename,
                                           remote_idurl=receiver_idurl)
    if pkt_out is None:
        if _Debug:
            lg.out(
                _DebugLevel, '    skip, packet_out not found: %r %r %r' %
                (proto, host, os.path.basename(filename)))
        return None
    transfer_id = make_transfer_ID()
    if _Debug:
        lg.out(
            _DebugLevel, '... OUT ... %s (%d) send {%s} via [%s] to %s at %s' %
            (pkt_out.description, transfer_id, os.path.basename(filename),
             proto, nameurl.GetName(receiver_idurl), host))


#    if pkt_out.remote_idurl != receiver_idurl and receiver_idurl:
#        if _Debug:
#            lg.out(_DebugLevel, 'gateway.on_register_file_sending [%s] receiver idurl is different [%s]' % (pkt_out.remote_idurl, receiver_idurl))
    pkt_out.automat('register-item', (proto, host, filename, transfer_id))
    control.request_update([('stream', transfer_id)])
    return transfer_id
Пример #19
0
def on_unregister_file_receiving(transfer_id,
                                 status,
                                 bytes_received,
                                 error_message=''):
    """
    Called from transport plug-in after finish receiving a single file.
    """
    pkt_in = packet_in.get(transfer_id)
    assert pkt_in is not None
    if status == 'finished':
        if _Debug:
            lg.out(
                _DebugLevel, '<<< IN <<< (%d) [%s://%s] %s with %d bytes' %
                (transfer_id, pkt_in.proto, pkt_in.host, status.upper(),
                 bytes_received))
    else:
        if _Debug:
            lg.out(
                _DebugLevel, '<<< IN <<< (%d) [%s://%s] %s : %s' %
                (transfer_id, pkt_in.proto, pkt_in.host, status.upper(),
                 error_message))
    pkt_in.automat('unregister-item', (status, bytes_received, error_message))
    control.request_update([('stream', transfer_id)])
    return True
Пример #20
0
 def doUpdate(self, arg):
     from main import control
     control.request_update()
Пример #21
0
 def doCleanUpBackups(self, *args, **kwargs):
     # here we check all backups we have and remove the old one
     # user can set how many versions of that file or folder to keep
     # other versions (older) will be removed here
     from storage import backup_rebuilder
     try:
         self.backups_progress_last_iteration = len(
             backup_rebuilder.A().backupsWasRebuilt)
     except:
         self.backups_progress_last_iteration = 0
     versionsToKeep = settings.getBackupsMaxCopies()
     if not contactsdb.num_suppliers():
         bytesUsed = 0
     else:
         bytesUsed = backup_fs.sizebackups() / contactsdb.num_suppliers()
     bytesNeeded = diskspace.GetBytesFromString(settings.getNeededString(),
                                                0)
     customerGlobID = my_id.getGlobalID()
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_monitor.doCleanUpBackups backupsToKeep=%d used=%d needed=%d'
             % (versionsToKeep, bytesUsed, bytesNeeded))
     delete_count = 0
     if versionsToKeep > 0:
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             pathID = global_id.CanonicalID(pathID)
             if backup_control.IsPathInProcess(pathID):
                 continue
             versions = itemInfo.list_versions()
             # TODO: do we need to sort the list? it comes from a set, so must be sorted may be
             while len(versions) > versionsToKeep:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  versions.pop(0))
                 if _Debug:
                     lg.out(
                         _DebugLevel,
                         'backup_monitor.doCleanUpBackups %d of %d backups for %s, so remove older %s'
                         % (len(versions), versionsToKeep, localPath,
                            backupID))
                 backup_control.DeleteBackup(backupID,
                                             saveDB=False,
                                             calculate=False)
                 delete_count += 1
     # we need also to fit used space into needed space (given from other users)
     # they trust us - do not need to take extra space from our friends
     # so remove oldest backups, but keep at least one for every folder - at least locally!
     # still our suppliers will remove our "extra" files by their "local_tester"
     if bytesNeeded <= bytesUsed:
         sizeOk = False
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             if sizeOk:
                 break
             pathID = global_id.CanonicalID(pathID)
             versions = itemInfo.list_versions(True, False)
             if len(versions) <= 1:
                 continue
             for version in versions[1:]:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  version)
                 versionInfo = itemInfo.get_version_info(version)
                 if versionInfo[1] > 0:
                     if _Debug:
                         lg.out(
                             _DebugLevel,
                             'backup_monitor.doCleanUpBackups over use %d of %d, so remove %s of %s'
                             %
                             (bytesUsed, bytesNeeded, backupID, localPath))
                     backup_control.DeleteBackup(backupID,
                                                 saveDB=False,
                                                 calculate=False)
                     delete_count += 1
                     bytesUsed -= versionInfo[1]
                     if bytesNeeded > bytesUsed:
                         sizeOk = True
                         break
     if delete_count > 0:
         backup_fs.Scan()
         backup_fs.Calculate()
         backup_control.Save()
         from main import control
         control.request_update()
     collected = gc.collect()
     if self.backups_progress_last_iteration > 0:
         if _Debug:
             lg.out(
                 _DebugLevel,
                 'backup_monitor.doCleanUpBackups  sending "restart", backups_progress_last_iteration=%s'
                 % self.backups_progress_last_iteration)
         reactor.callLater(1, self.automat, 'restart')  # @UndefinedVariable
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_monitor.doCleanUpBackups collected %d objects' %
             collected)
Пример #22
0
 def doUpdate(self, *args, **kwargs):
     from main import control
     control.request_update()
Пример #23
0
 def doRepaint(self, arg):
     """
     Action method.
     """
     from main import control
     control.request_update([('contact', self.idurl)])