예제 #1
0
def _delete(params):
    # localPath = params['path'].lstrip('/')
    pathID = params['id']
    if not packetid.Valid(pathID):
        return {
            'result': {
                "success": False,
                "error": "path %s is not valid" % pathID
            }
        }
    if not backup_fs.ExistsID(pathID):
        return {
            'result': {
                "success": False,
                "error": "path %s not found" % pathID
            }
        }
    backup_control.DeletePathBackups(pathID, saveDB=False, calculate=False)
    backup_fs.DeleteLocalDir(settings.getLocalBackupsDir(), pathID)
    backup_fs.DeleteByID(pathID)
    backup_fs.Scan()
    backup_fs.Calculate()
    backup_control.Save()
    control.request_update([
        ('pathID', pathID),
    ])
    backup_monitor.A('restart')
    return {'result': {"success": True, "error": None}}
예제 #2
0
def _upload(params):
    path = params['path']
    if bpio.Linux() or bpio.Mac():
        path = '/' + (path.lstrip('/'))
    localPath = unicode(path)
    if not bpio.pathExist(localPath):
        return {
            'result': {
                "success": False,
                "error": 'local path %s was not found' % path
            }
        }
    result = []
    pathID = backup_fs.ToID(localPath)
    if pathID is None:
        if bpio.pathIsDir(localPath):
            pathID, iter, iterID = backup_fs.AddDir(localPath, read_stats=True)
            result.append('new folder was added: %s' % localPath)
        else:
            pathID, iter, iterID = backup_fs.AddFile(localPath,
                                                     read_stats=True)
            result.append('new file was added: %s' % localPath)
    pathID = global_id.CanonicalID(pathID)
    backup_control.StartSingle(pathID=pathID, localPath=localPath)
    backup_fs.Calculate()
    backup_control.Save()
    control.request_update([
        ('pathID', pathID),
    ])
    result.append('backup started: %s' % pathID)
    return {
        'result': result,
    }
예제 #3
0
def _delete_version(params):
    lg.out(6, '_delete_version %s' % str(params))
    backupID = params['backupid']
    if not packetid.Valid(backupID):
        return {
            'result': {
                "success": False,
                "error": "backupID %s is not valid" % backupID
            }
        }
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    if not customerGlobalID:
        customerGlobalID = my_id.getGlobalID()
    if not backup_fs.ExistsID(
            remotePath,
            iterID=backup_fs.fsID(
                global_id.GlobalUserToIDURL(customerGlobalID))):
        return {
            'result': {
                "success": False,
                "error": "path %s not found" % remotePath
            }
        }
    if version:
        backup_control.DeleteBackup(backupID, saveDB=False, calculate=False)
    backup_fs.Scan()
    backup_fs.Calculate()
    backup_control.Save()
    backup_monitor.A('restart')
    control.request_update([
        ('backupID', backupID),
    ])
    return {'result': {"success": True, "error": None}}
예제 #4
0
def on_cancelled_file_sending(proto,
                              host,
                              filename,
                              size,
                              description='',
                              error_message=None):
    """
    
    """
    pkt_out, work_item = packet_out.search(proto, host, filename)
    if pkt_out is None:
        if _Debug:
            lg.out(
                _DebugLevel,
                'gateway.on_cancelled_file_sending packet_out %s %s %s not found - IT IS OK'
                % (proto, host, os.path.basename(filename)))
        return True
    pkt_out.automat('item-cancelled',
                    (proto, host, filename, size, description, error_message))
    control.request_update([('packet', pkt_out.outpacket.PacketID)])
    if _Debug:
        lg.out(
            _DebugLevel, '>>> OUT >>>  {%s} CANCELLED via [%s] to %s : %s' %
            (os.path.basename(filename), proto, host, error_message))
    return True
예제 #5
0
 def doUpdate(self, arg):
     if not settings.NewWebGUI():
         from web import webcontrol
         reactor.callLater(0, webcontrol.OnUpdateStartingPage)
     else:
         from web import control
         control.request_update()
예제 #6
0
 def doUpdate(self, arg):
     if not settings.NewWebGUI():
         from web import webcontrol
         reactor.callLater(0, webcontrol.OnUpdateStartingPage)
     else:
         from web import control
         control.request_update()
예제 #7
0
def on_unregister_file_sending(transfer_id,
                               status,
                               bytes_sent,
                               error_message=None):
    """
    Called from transport plug-in after finish sending a single file.
    """
    if transfer_id is None:
        return False
    if _Debug:
        lg.out(
            _DebugLevel,
            'gateway.on_unregister_file_sending %s %s' % (transfer_id, status))
    pkt_out, work_item = packet_out.search_by_transfer_id(transfer_id)
    if pkt_out is None:
        if _Debug:
            lg.out(_DebugLevel, '        %s is not found' % str(transfer_id))
        return False
    pkt_out.automat('unregister-item',
                    (transfer_id, status, bytes_sent, error_message))
    control.request_update([('stream', transfer_id)])
    if status == 'finished':
        if _Debug:
            lg.out(
                _DebugLevel, '>>> OUT >>> %s (%d) [%s://%s] %s with %d bytes' %
                (pkt_out.description, transfer_id, work_item.proto,
                 work_item.host, status.upper(), bytes_sent))
    else:
        if _Debug:
            lg.out(
                _DebugLevel, '>>> OUT >>> %s (%d) [%s://%s] %s : %s' %
                (pkt_out.description, transfer_id, work_item.proto,
                 work_item.host, str(status).upper(), error_message))
    return True
예제 #8
0
def on_register_file_sending(proto, host, receiver_idurl, filename, size=0, description=""):
    """
    Called from transport plug-in when sending a single file were started to
    some remote peer. Must return a unique transfer ID so plug-in will know
    that ID.

    After finishing that given transfer - that ID is passed to `unregister_file_sending()`.
    """
    if _Debug:
        lg.out(_DebugLevel, "gateway.on_register_file_sending %s %s" % (filename, description))
    pkt_out, work_item = packet_out.search(proto, host, filename, remote_idurl=receiver_idurl)
    if pkt_out is None:
        if _Debug:
            lg.out(_DebugLevel, "    skip, packet_out not found: %r %r %r" % (proto, host, os.path.basename(filename)))
        return None
    transfer_id = make_transfer_ID()
    if _Debug:
        lg.out(
            _DebugLevel,
            "... OUT ... %s (%d) send {%s} via [%s] to %s at %s"
            % (
                pkt_out.description,
                transfer_id,
                os.path.basename(filename),
                proto,
                nameurl.GetName(receiver_idurl),
                host,
            ),
        )
    #    if pkt_out.remote_idurl != receiver_idurl and receiver_idurl:
    #        if _Debug:
    #            lg.out(_DebugLevel, 'gateway.on_register_file_sending [%s] receiver idurl is different [%s]' % (pkt_out.remote_idurl, receiver_idurl))
    pkt_out.automat("register-item", (proto, host, filename, transfer_id))
    control.request_update([("stream", transfer_id)])
    return transfer_id
예제 #9
0
 def doUpdate(self, arg):
     # lg.out(4, 'installer.doUpdate')
     if not settings.NewWebGUI():
         from web import webcontrol
         reactor.callLater(0, webcontrol.OnUpdateInstallPage)
     else:
         from web import control
         control.request_update([{'state': self.state}, ])
예제 #10
0
 def doRepaint(self, arg):
     """
     Action method.
     """
     if not settings.NewWebGUI():
         from web import webcontrol
         webcontrol.OnAliveStateChanged(self.idurl)
     else:
         from web import control
         control.request_update([('contact', self.idurl)])
예제 #11
0
 def doRepaint(self, arg):
     """
     Action method.
     """
     if not settings.NewWebGUI():
         from web import webcontrol
         webcontrol.OnAliveStateChanged(self.idurl)
     else:
         from web import control
         control.request_update([('contact', self.idurl)])
예제 #12
0
 def doUpdate(self, arg):
     # lg.out(4, 'installer.doUpdate')
     if not settings.NewWebGUI():
         from web import webcontrol
         reactor.callLater(0, webcontrol.OnUpdateInstallPage)
     else:
         from web import control
         control.request_update([
             {
                 'state': self.state
             },
         ])
예제 #13
0
 def doCleanUpBackups(self, arg):
     # here we check all backups we have and remove the old one
     # user can set how many versions of that file or folder to keep
     # other versions (older) will be removed here
     versionsToKeep = settings.getBackupsMaxCopies()
     bytesUsed = backup_fs.sizebackups() / contactsdb.num_suppliers()
     bytesNeeded = diskspace.GetBytesFromString(settings.getNeededString(), 0)
     lg.out(6, 'backup_monitor.doCleanUpBackups backupsToKeep=%d used=%d needed=%d' % (versionsToKeep, bytesUsed, bytesNeeded))
     delete_count = 0
     if versionsToKeep > 0:
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             if backup_control.IsPathInProcess(pathID):
                 continue
             versions = itemInfo.list_versions()
             # TODO: do we need to sort the list? it comes from a set, so must be sorted may be
             while len(versions) > versionsToKeep:
                 backupID = pathID + '/' + versions.pop(0)
                 lg.out(6, 'backup_monitor.doCleanUpBackups %d of %d backups for %s, so remove older %s' % (len(versions), versionsToKeep, localPath, backupID))
                 backup_control.DeleteBackup(backupID, saveDB=False, calculate=False)
                 delete_count += 1
     # we need also to fit used space into needed space (given from other users)
     # they trust us - do not need to take extra space from our friends
     # so remove oldest backups, but keep at least one for every folder - at least locally!
     # still our suppliers will remove our "extra" files by their "local_tester"
     if bytesNeeded <= bytesUsed:
         sizeOk = False
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             if sizeOk:
                 break
             versions = itemInfo.list_versions(True, False)
             if len(versions) <= 1:
                 continue
             for version in versions[1:]:
                 backupID = pathID + '/' + version
                 versionInfo = itemInfo.get_version_info(version)
                 if versionInfo[1] > 0:
                     lg.out(6, 'backup_monitor.doCleanUpBackups over use %d of %d, so remove %s of %s' % (
                         bytesUsed, bytesNeeded, backupID, localPath))
                     backup_control.DeleteBackup(backupID, saveDB=False, calculate=False)
                     delete_count += 1
                     bytesUsed -= versionInfo[1]
                     if bytesNeeded > bytesUsed:
                         sizeOk = True
                         break
     if delete_count > 0:
         backup_fs.Scan()
         backup_fs.Calculate()
         backup_control.Save()
         from web import control
         control.request_update()
     collected = gc.collect()
     lg.out(6, 'backup_monitor.doCleanUpBackups collected %d objects' % collected)
예제 #14
0
 def state_changed(self, oldstate, newstate, event, arg):
     if newstate == 'CONTACTS' and oldstate == 'STORAGE':
         self.event('next', {})
         # TODO:
         # here just skip Contacts page!
         # we do not need that now, but can back to that soon when add chat
     if not settings.NewWebGUI():
         from web import webcontrol
         reactor.callLater(0, webcontrol.OnUpdateInstallPage)
     else:
         from web import control
         control.request_update()
     installer.A('install_wizard.state', newstate)
예제 #15
0
 def state_changed(self, oldstate, newstate, event, arg):
     if newstate == 'CONTACTS' and oldstate == 'STORAGE':
         self.event('next', {})
         # TODO:
         # here just skip Contacts page!
         # we do not need that now, but can back to that soon when add chat
     if not settings.NewWebGUI():
         from web import webcontrol
         reactor.callLater(0, webcontrol.OnUpdateInstallPage)
     else:
         from web import control
         control.request_update()
     installer.A('install_wizard.state', newstate)
예제 #16
0
def DeleteBackup(backupID, removeLocalFilesToo=True, saveDB=True, calculate=True):
    """
    This removes a single backup ID completely. Perform several operations:

    1) abort backup if it just started and is running at the moment
    2) if we requested for files for this backup we do not need it anymore - remove 'Data' requests
    3) remove interests in transport_control, see ``lib.transport_control.DeleteBackupInterest()``
    4) remove that ID from the index data base
    5) remove local files for this backup ID
    6) remove all remote info for this backup from the memory, see ``p2p.backup_matrix.EraseBackupRemoteInfo()``
    7) also remove local info from memory, see ``p2p.backup_matrix.EraseBackupLocalInfo()``
    8) stop any rebuilding, we will restart it soon
    9) check and calculate used space
    10) save the modified index data base, soon it will be synchronized with "index_synchronizer()" state machine
    """
    backupID = global_id.CanonicalID(backupID)
    # if the user deletes a backup, make sure we remove any work we're doing on it
    # abort backup if it just started and is running at the moment
    if AbortRunningBackup(backupID):
        lg.out(8, 'backup_control.DeleteBackup %s is in process, stopping' % backupID)
        return True
    from customer import io_throttle
    import backup_rebuilder
    lg.out(8, 'backup_control.DeleteBackup ' + backupID)
    # if we requested for files for this backup - we do not need it anymore
    io_throttle.DeleteBackupRequests(backupID)
    io_throttle.DeleteBackupSendings(backupID)
    # remove interests in transport_control
    # callback.delete_backup_interest(backupID)
    # mark it as being deleted in the db, well... just remove it from the index now
    if not backup_fs.DeleteBackupID(backupID):
        return False
    # finally remove local files for this backupID
    if removeLocalFilesToo:
        backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
    # remove all remote info for this backup from the memory
    backup_matrix.EraseBackupRemoteInfo(backupID)
    # also remove local info
    backup_matrix.EraseBackupLocalInfo(backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # in some cases we want to save the DB later
    if saveDB:
        Save()
        control.request_update([('backupID', backupID), ])
    return True
예제 #17
0
def DeleteBackup(backupID, removeLocalFilesToo=True, saveDB=True, calculate=True):
    """
    This removes a single backup ID completely. Perform several operations:

    1) abort backup if it just started and is running at the moment
    2) if we requested for files for this backup we do not need it anymore - remove 'Data' requests
    3) remove interests in transport_control, see ``lib.transport_control.DeleteBackupInterest()``
    4) remove that ID from the index data base
    5) remove local files for this backup ID
    6) remove all remote info for this backup from the memory, see ``p2p.backup_matrix.EraseBackupRemoteInfo()``
    7) also remove local info from memory, see ``p2p.backup_matrix.EraseBackupLocalInfo()``
    8) stop any rebuilding, we will restart it soon
    9) check and calculate used space
    10) save the modified index data base, soon it will be synchronized with "index_synchronizer()" state machine
    """
    # if the user deletes a backup, make sure we remove any work we're doing on it
    # abort backup if it just started and is running at the moment
    if AbortRunningBackup(backupID):
        lg.out(8, 'backup_control.DeleteBackup %s is in process, stopping' % backupID)
        return True
    from customer import io_throttle
    import backup_rebuilder
    lg.out(8, 'backup_control.DeleteBackup ' + backupID)
    # if we requested for files for this backup - we do not need it anymore
    io_throttle.DeleteBackupRequests(backupID)
    io_throttle.DeleteBackupSendings(backupID)
    # remove interests in transport_control
    callback.delete_backup_interest(backupID)
    # mark it as being deleted in the db, well... just remove it from the index now
    if not backup_fs.DeleteBackupID(backupID):
        return False
    # finally remove local files for this backupID
    if removeLocalFilesToo:
        backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
    # remove all remote info for this backup from the memory
    backup_matrix.EraseBackupRemoteInfo(backupID)
    # also remove local info
    backup_matrix.EraseBackupLocalInfo(backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # in some cases we want to save the DB later
    if saveDB:
        Save()
        control.request_update([('backupID', backupID), ])
    return True
예제 #18
0
def IncomingSupplierBackupIndex(newpacket):
    """
    Called by ``p2p.p2p_service`` when a remote copy of our local index data
    base ( in the "Data" packet ) is received from one of our suppliers.

    The index is also stored on suppliers to be able to restore it.
    """
    b = encrypted.Unserialize(newpacket.Payload)
    if b is None:
        lg.out(2, 'backup_control.IncomingSupplierBackupIndex ERROR reading data from %s' % newpacket.RemoteID)
        return
    try:
        session_key = key.DecryptLocalPrivateKey(b.EncryptedSessionKey)
        padded_data = key.DecryptWithSessionKey(session_key, b.EncryptedData)
        inpt = cStringIO.StringIO(padded_data[:int(b.Length)])
        supplier_revision = inpt.readline().rstrip('\n')
        if supplier_revision:
            supplier_revision = int(supplier_revision)
        else:
            supplier_revision = -1
        # inpt.seek(0)
    except:
        lg.out(2, 'backup_control.IncomingSupplierBackupIndex ERROR reading data from %s' % newpacket.RemoteID)
        lg.out(2, '\n' + padded_data)
        lg.exc()
        try:
            inpt.close()
        except:
            pass
        return
    if driver.is_on('service_backup_db'):
        from storage import index_synchronizer
        index_synchronizer.A('index-file-received', (newpacket, supplier_revision))
    if revision() >= supplier_revision:
        inpt.close()
        lg.out(4, 'backup_control.IncomingSupplierBackupIndex SKIP, supplier %s revision=%d, local revision=%d' % (
            newpacket.RemoteID, supplier_revision, revision(), ))
        return
    raw_data = inpt.read()
    inpt.close()
    if ReadIndex(raw_data):
        commit(supplier_revision)
        backup_fs.Scan()
        backup_fs.Calculate()
        WriteIndex()
        control.request_update()
        lg.out(4, 'backup_control.IncomingSupplierBackupIndex updated to revision %d from %s' % (
            revision(), newpacket.RemoteID))
    else:
        lg.warn('failed to read catalog index from supplier')
예제 #19
0
def DeletePathBackups(pathID, removeLocalFilesToo=True, saveDB=True, calculate=True):
    """
    This removes all backups of given path ID
    Doing same operations as ``DeleteBackup()``.
    """
    import backup_rebuilder
    from customer import io_throttle
    pathID = global_id.CanonicalID(pathID)
    # get the working item
    customer, remotePath = packetid.SplitPacketID(pathID)
    customer_idurl = global_id.GlobalUserToIDURL(customer)
    item = backup_fs.GetByID(remotePath, iterID=backup_fs.fsID(customer_idurl))
    if item is None:
        return False
    lg.out(8, 'backup_control.DeletePathBackups ' + pathID)
    # this is a list of all known backups of this path
    versions = item.list_versions()
    for version in versions:
        backupID = packetid.MakeBackupID(customer, remotePath, version)
        lg.out(8, '        removing %s' % backupID)
        # abort backup if it just started and is running at the moment
        AbortRunningBackup(backupID)
        # if we requested for files for this backup - we do not need it anymore
        io_throttle.DeleteBackupRequests(backupID)
        io_throttle.DeleteBackupSendings(backupID)
        # remove interests in transport_control
        # callback.delete_backup_interest(backupID)
        # remove local files for this backupID
        if removeLocalFilesToo:
            backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
        # remove remote info for this backup from the memory
        backup_matrix.EraseBackupRemoteInfo(backupID)
        # also remove local info
        backup_matrix.EraseBackupLocalInfo(backupID)
        # finally remove this backup from the index
        item.delete_version(version)
        # lg.out(8, 'backup_control.DeletePathBackups ' + backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # save the index if needed
    if saveDB:
        Save()
        control.request_update()
    return True
예제 #20
0
def on_outbox_packet(outpacket, wide, callbacks, target=None, route=None):
    """
    """
    started_packets = packet_out.search_similar_packets(outpacket)
    if started_packets:
        for active_packet, active_item in started_packets:
            if callbacks:
                for command, cb in callbacks.items():
                    active_packet.set_callback(command, cb)
            return active_packet
    pkt_out = packet_out.create(outpacket, wide, callbacks, target, route)
    if _Debug and lg.is_debug(_DebugLevel):
        monitoring()
    control.request_update([('packet', outpacket.PacketID)])
    return pkt_out
예제 #21
0
def on_outbox_packet(outpacket, wide, callbacks, target=None, route=None):
    """
    
    """
    started_packets = packet_out.search_similar_packets(outpacket)
    if started_packets:
        for active_packet, active_item in started_packets:
            if callbacks:
                for command, cb in callbacks.items():
                    active_packet.set_callback(command, cb)
            return active_packet
    pkt_out = packet_out.create(outpacket, wide, callbacks, target, route)
    if _Debug and lg.is_debug(_DebugLevel):
        monitoring()
    control.request_update([("packet", outpacket.PacketID)])
    return pkt_out
예제 #22
0
def _delete_version(params):
    lg.out(6, '_delete_version %s' % str(params))
    backupID = params['backupid']
    if not packetid.Valid(backupID):
        return {'result': {"success": False, "error": "backupID %s is not valid" % backupID}}
    pathID, version = packetid.SplitBackupID(backupID)
    if not backup_fs.ExistsID(pathID):
        return {'result': {"success": False, "error": "path %s not found" % pathID}}
    if version:
        backup_control.DeleteBackup(backupID, saveDB=False, calculate=False)
    backup_fs.Scan()
    backup_fs.Calculate()
    backup_control.Save()
    backup_monitor.A('restart')
    control.request_update([('backupID', backupID), ])
    return {'result': {"success": True, "error": None}}
예제 #23
0
def _delete(params):
    # localPath = params['path'].lstrip('/')
    pathID = params['id']
    if not packetid.Valid(pathID):
        return {'result': {"success": False, "error": "path %s is not valid" % pathID}}
    if not backup_fs.ExistsID(pathID):
        return {'result': {"success": False, "error": "path %s not found" % pathID}}
    backup_control.DeletePathBackups(pathID, saveDB=False, calculate=False)
    backup_fs.DeleteLocalDir(settings.getLocalBackupsDir(), pathID)
    backup_fs.DeleteByID(pathID)
    backup_fs.Scan()
    backup_fs.Calculate()
    backup_control.Save()
    control.request_update([('pathID', pathID), ])
    backup_monitor.A('restart')
    return {'result': {"success": True, "error": None}}
예제 #24
0
def OnJobDone(backupID, result):
    """
    A callback method fired when backup is finished.

    Here we need to save the index data base.
    """
    import backup_rebuilder
    from customer import io_throttle
    lg.out(4, '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
    lg.out(4, 'backup_control.OnJobDone [%s] %s, %d more tasks' % (backupID, result, len(tasks())))
    jobs().pop(backupID)
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    customer_idurl = global_id.GlobalUserToIDURL(customerGlobalID)
    if result == 'done':
        maxBackupsNum = settings.getBackupsMaxCopies()
        if maxBackupsNum:
            item = backup_fs.GetByID(remotePath, iterID=backup_fs.fsID(customer_idurl))
            if item:
                versions = item.list_versions(sorted=True, reverse=True)
                if len(versions) > maxBackupsNum:
                    for version in versions[maxBackupsNum:]:
                        item.delete_version(version)
                        backupID = packetid.MakeBackupID(customerGlobalID, remotePath, version)
                        backup_rebuilder.RemoveBackupToWork(backupID)
                        io_throttle.DeleteBackupRequests(backupID)
                        io_throttle.DeleteBackupSendings(backupID)
                        # callback.delete_backup_interest(backupID)
                        backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
        backup_fs.ScanID(remotePath)
        backup_fs.Calculate()
        Save()
        control.request_update([('pathID', remotePath), ])
        # TODO: check used space, if we have over use - stop all tasks immediately
        backup_matrix.RepaintBackup(backupID)
    elif result == 'abort':
        DeleteBackup(backupID)
    if len(tasks()) == 0:
        # do we really need to restart backup_monitor after each backup?
        # if we have a lot tasks started this will produce a lot unneeded actions
        # will be smarter to restart it once we finish all tasks
        # because user will probably leave BitDust working after starting a long running operations
        from storage import backup_monitor
        backup_monitor.A('restart')
    reactor.callLater(0, RunTask)
    reactor.callLater(0, FireTaskFinishedCallbacks, remotePath, version, result)
예제 #25
0
def OnJobDone(backupID, result):
    """
    A callback method fired when backup is finished.

    Here we need to save the index data base.
    """
    import backup_rebuilder
    from customer import io_throttle
    lg.out(4, '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
    lg.out(4, 'backup_control.OnJobDone [%s] %s, %d more tasks' % (backupID, result, len(tasks())))
    jobs().pop(backupID)
    pathID, version = packetid.SplitBackupID(backupID)
    if result == 'done':
        maxBackupsNum = settings.getBackupsMaxCopies()
        if maxBackupsNum:
            item = backup_fs.GetByID(pathID)
            if item:
                versions = item.list_versions(sorted=True, reverse=True)
                if len(versions) > maxBackupsNum:
                    for version in versions[maxBackupsNum:]:
                        item.delete_version(version)
                        backupID = pathID + '/' + version
                        backup_rebuilder.RemoveBackupToWork(backupID)
                        io_throttle.DeleteBackupRequests(backupID)
                        io_throttle.DeleteBackupSendings(backupID)
                        callback.delete_backup_interest(backupID)
                        backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
        backup_fs.ScanID(pathID)
        backup_fs.Calculate()
        Save()
        control.request_update([('pathID', pathID), ])
        # TODO: check used space, if we have over use - stop all tasks immediately
        backup_matrix.RepaintBackup(backupID)
    elif result == 'abort':
        DeleteBackup(backupID)
    if len(tasks()) == 0:
        # do we really need to restart backup_monitor after each backup?
        # if we have a lot tasks started this will produce a lot unneeded actions
        # will be smarter to restart it once we finish all tasks
        # because user will probably leave BitDust working after starting a long running operations
        from storage import backup_monitor
        backup_monitor.A('restart')
    RunTasks()
    reactor.callLater(0, FireTaskFinishedCallbacks, pathID, version, result)
예제 #26
0
def DeletePathBackups(pathID, removeLocalFilesToo=True, saveDB=True, calculate=True):
    """
    This removes all backups of given path ID.

    Doing same operations as ``DeleteBackup()``.
    """
    import backup_rebuilder
    from customer import io_throttle
    # get the working item
    item = backup_fs.GetByID(pathID)
    if item is None:
        return False
    # this is a list of all known backups of this path
    versions = item.list_versions()
    for version in versions:
        backupID = pathID + '/' + version
        # abort backup if it just started and is running at the moment
        AbortRunningBackup(backupID)
        # if we requested for files for this backup - we do not need it anymore
        io_throttle.DeleteBackupRequests(backupID)
        io_throttle.DeleteBackupSendings(backupID)
        # remove interests in transport_control
        callback.delete_backup_interest(backupID)
        # remove local files for this backupID
        if removeLocalFilesToo:
            backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID)
        # remove remote info for this backup from the memory
        backup_matrix.EraseBackupLocalInfo(backupID)
        # also remove local info
        backup_matrix.EraseBackupLocalInfo(backupID)
        # finally remove this backup from the index
        item.delete_version(version)
        # lg.out(8, 'backup_control.DeletePathBackups ' + backupID)
    # stop any rebuilding, we will restart it soon
    backup_rebuilder.RemoveAllBackupsToWork()
    backup_rebuilder.SetStoppedFlag()
    # check and calculate used space
    if calculate:
        backup_fs.Scan()
        backup_fs.Calculate()
    # save the index if needed
    if saveDB:
        Save()
        control.request_update()
    return True
예제 #27
0
def on_register_file_receiving(proto, host, sender_idurl, filename, size=0):
    """
    Called from transport plug-in when receiving a single file were started
    from some peer.

    Must return a unique transfer ID, create a `FileTransferInfo` object
    and put it into "transfers" list. Plug-in's code must create a
    temporary file and write incoming data into that file.
    """
    transfer_id = make_transfer_ID()
    if _Debug:
        lg.out(
            _DebugLevel, '... IN ... %d receive {%s} via [%s] from %s at %s' %
            (transfer_id, os.path.basename(filename), proto,
             nameurl.GetName(sender_idurl), host))
    packet_in.create(transfer_id).automat(
        'register-item', (proto, host, sender_idurl, filename, size))
    control.request_update([('stream', transfer_id)])
    return transfer_id
예제 #28
0
def DeleteAllBackups():
    """
    Remove all backup IDs from index data base, see ``DeleteBackup()`` method.
    """
    # prepare a list of all known backup IDs
    all_ids = set(backup_fs.ListAllBackupIDs())
    all_ids.update(backup_matrix.GetBackupIDs(remote=True, local=True))
    lg.out(4, 'backup_control.DeleteAllBackups %d ID\'s to kill' % len(all_ids))
    # delete one by one
    for backupID in all_ids:
        DeleteBackup(backupID, saveDB=False, calculate=False)
    # scan all files
    backup_fs.Scan()
    # check and calculate used space
    backup_fs.Calculate()
    # save the index
    Save()
    # refresh the GUI
    control.request_update()
예제 #29
0
def on_register_file_receiving(proto, host, sender_idurl, filename, size=0):
    """
    Called from transport plug-in when receiving a single file were started
    from some peer.

    Must return a unique transfer ID, create a `FileTransferInfo` object
    and put it into "transfers" list. Plug-in's code must create a
    temporary file and write incoming data into that file.
    """
    transfer_id = make_transfer_ID()
    if _Debug:
        lg.out(
            _DebugLevel,
            "... IN ... %d receive {%s} via [%s] from %s at %s"
            % (transfer_id, os.path.basename(filename), proto, nameurl.GetName(sender_idurl), host),
        )
    packet_in.create(transfer_id).automat("register-item", (proto, host, sender_idurl, filename, size))
    control.request_update([("stream", transfer_id)])
    return transfer_id
예제 #30
0
def DeleteAllBackups():
    """
    Remove all backup IDs from index data base, see ``DeleteBackup()`` method.
    """
    # prepare a list of all known backup IDs
    all_ids = set(backup_fs.ListAllBackupIDs())
    all_ids.update(backup_matrix.GetBackupIDs(remote=True, local=True))
    lg.out(4, 'backup_control.DeleteAllBackups %d ID\'s to kill' % len(all_ids))
    # delete one by one
    for backupID in all_ids:
        DeleteBackup(backupID, saveDB=False, calculate=False)
    # scan all files
    backup_fs.Scan()
    # check and calculate used space
    backup_fs.Calculate()
    # save the index
    Save()
    # refresh the GUI
    control.request_update()
예제 #31
0
def IncomingSupplierBackupIndex(newpacket):
    """
    Called by ``p2p.p2p_service`` when a remote copy of our local index data
    base ( in the "Data" packet ) is received from one of our suppliers.

    The index is also stored on suppliers to be able to restore it.
    """
    b = encrypted.Unserialize(newpacket.Payload)
    if b is None:
        lg.out(2, 'backup_control.IncomingSupplierBackupIndex ERROR reading data from %s' % newpacket.RemoteID)
        return
    try:
        session_key = key.DecryptLocalPK(b.EncryptedSessionKey)
        padded_data = key.DecryptWithSessionKey(session_key, b.EncryptedData)
        inpt = cStringIO.StringIO(padded_data[:int(b.Length)])
        supplier_revision = inpt.readline().rstrip('\n')
        if supplier_revision:
            supplier_revision = int(supplier_revision)
        else:
            supplier_revision = -1
        inpt.seek(0)
    except:
        lg.out(2, 'backup_control.IncomingSupplierBackupIndex ERROR reading data from %s' % newpacket.RemoteID)
        lg.out(2, '\n' + padded_data)
        lg.exc()
        try:
            inpt.close()
        except:
            pass
        return
    if driver.is_started('service_backup_db'):
        from storage import index_synchronizer
        index_synchronizer.A('index-file-received', (newpacket, supplier_revision))
    if revision() < supplier_revision:
        ReadIndex(inpt)
        backup_fs.Scan()
        backup_fs.Calculate()
        WriteIndex()
        control.request_update()
        lg.out(2, 'backup_control.IncomingSupplierBackupIndex updated to revision %d from %s' % (
            revision(), newpacket.RemoteID))
    inpt.close()
예제 #32
0
def on_register_file_sending(proto,
                             host,
                             receiver_idurl,
                             filename,
                             size=0,
                             description=''):
    """
    Called from transport plug-in when sending a single file were started to
    some remote peer. Must return a unique transfer ID so plug-in will know
    that ID.

    After finishing that given transfer - that ID is passed to `unregister_file_sending()`.
    """
    if _Debug:
        lg.out(
            _DebugLevel,
            'gateway.on_register_file_sending %s %s' % (filename, description))
    pkt_out, work_item = packet_out.search(proto,
                                           host,
                                           filename,
                                           remote_idurl=receiver_idurl)
    if pkt_out is None:
        if _Debug:
            lg.out(
                _DebugLevel, '    skip, packet_out not found: %r %r %r' %
                (proto, host, os.path.basename(filename)))
        return None
    transfer_id = make_transfer_ID()
    if _Debug:
        lg.out(
            _DebugLevel, '... OUT ... %s (%d) send {%s} via [%s] to %s at %s' %
            (pkt_out.description, transfer_id, os.path.basename(filename),
             proto, nameurl.GetName(receiver_idurl), host))


#    if pkt_out.remote_idurl != receiver_idurl and receiver_idurl:
#        if _Debug:
#            lg.out(_DebugLevel, 'gateway.on_register_file_sending [%s] receiver idurl is different [%s]' % (pkt_out.remote_idurl, receiver_idurl))
    pkt_out.automat('register-item', (proto, host, filename, transfer_id))
    control.request_update([('stream', transfer_id)])
    return transfer_id
예제 #33
0
def _upload(params):
    path = params['path']
    if bpio.Linux() or bpio.Mac():
        path = '/' + (path.lstrip('/'))
    localPath = unicode(path)
    if not bpio.pathExist(localPath):
        return {'result': {"success": False, "error": 'local path %s was not found' % path}}
    result = []
    pathID = backup_fs.ToID(localPath)
    if pathID is None:
        if bpio.pathIsDir(localPath):
            pathID, iter, iterID = backup_fs.AddDir(localPath, True)
            result.append('new folder was added: %s' % localPath)
        else:
            pathID, iter, iterID = backup_fs.AddFile(localPath, True)
            result.append('new file was added: %s' % localPath)
    backup_control.StartSingle(pathID, localPath)
    backup_fs.Calculate()
    backup_control.Save()
    control.request_update([('pathID', pathID), ])
    result.append('backup started: %s' % pathID)
    return {'result': result, }
예제 #34
0
def on_cancelled_file_sending(proto, host, filename, size, description="", error_message=None):
    """
    
    """
    pkt_out, work_item = packet_out.search(proto, host, filename)
    if pkt_out is None:
        if _Debug:
            lg.out(
                _DebugLevel,
                "gateway.on_cancelled_file_sending packet_out %s %s %s not found - IT IS OK"
                % (proto, host, os.path.basename(filename)),
            )
        return True
    pkt_out.automat("item-cancelled", (proto, host, filename, size, description, error_message))
    control.request_update([("packet", pkt_out.outpacket.PacketID)])
    if _Debug:
        lg.out(
            _DebugLevel,
            ">>> OUT >>>  {%s} CANCELLED via [%s] to %s : %s"
            % (os.path.basename(filename), proto, host, error_message),
        )
    return True
예제 #35
0
def on_unregister_file_receiving(transfer_id, status, bytes_received, error_message=""):
    """
    Called from transport plug-in after finish receiving a single file.
    """
    pkt_in = packet_in.get(transfer_id)
    assert pkt_in is not None
    if status == "finished":
        if _Debug:
            lg.out(
                _DebugLevel,
                "<<< IN <<< (%d) [%s://%s] %s with %d bytes"
                % (transfer_id, pkt_in.proto, pkt_in.host, status.upper(), bytes_received),
            )
    else:
        if _Debug:
            lg.out(
                _DebugLevel,
                "<<< IN <<< (%d) [%s://%s] %s : %s"
                % (transfer_id, pkt_in.proto, pkt_in.host, status.upper(), error_message),
            )
    pkt_in.automat("unregister-item", (status, bytes_received, error_message))
    control.request_update([("stream", transfer_id)])
    return True
예제 #36
0
def on_unregister_file_sending(transfer_id, status, bytes_sent, error_message=None):
    """
    Called from transport plug-in after finish sending a single file.
    """
    if transfer_id is None:
        return False
    if _Debug:
        lg.out(_DebugLevel, "gateway.on_unregister_file_sending %s %s" % (transfer_id, status))
    pkt_out, work_item = packet_out.search_by_transfer_id(transfer_id)
    if pkt_out is None:
        if _Debug:
            lg.out(_DebugLevel, "        %s is not found" % str(transfer_id))
        return False
    pkt_out.automat("unregister-item", (transfer_id, status, bytes_sent, error_message))
    control.request_update([("stream", transfer_id)])
    if status == "finished":
        if _Debug:
            lg.out(
                _DebugLevel,
                ">>> OUT >>> %s (%d) [%s://%s] %s with %d bytes"
                % (pkt_out.description, transfer_id, work_item.proto, work_item.host, status.upper(), bytes_sent),
            )
    else:
        if _Debug:
            lg.out(
                _DebugLevel,
                ">>> OUT >>> %s (%d) [%s://%s] %s : %s"
                % (
                    pkt_out.description,
                    transfer_id,
                    work_item.proto,
                    work_item.host,
                    str(status).upper(),
                    error_message,
                ),
            )
    return True
예제 #37
0
def on_unregister_file_receiving(transfer_id,
                                 status,
                                 bytes_received,
                                 error_message=''):
    """
    Called from transport plug-in after finish receiving a single file.
    """
    pkt_in = packet_in.get(transfer_id)
    assert pkt_in is not None
    if status == 'finished':
        if _Debug:
            lg.out(
                _DebugLevel, '<<< IN <<< (%d) [%s://%s] %s with %d bytes' %
                (transfer_id, pkt_in.proto, pkt_in.host, status.upper(),
                 bytes_received))
    else:
        if _Debug:
            lg.out(
                _DebugLevel, '<<< IN <<< (%d) [%s://%s] %s : %s' %
                (transfer_id, pkt_in.proto, pkt_in.host, status.upper(),
                 error_message))
    pkt_in.automat('unregister-item', (status, bytes_received, error_message))
    control.request_update([('stream', transfer_id)])
    return True
예제 #38
0
 def doCleanUpBackups(self, arg):
     # here we check all backups we have and remove the old one
     # user can set how many versions of that file or folder to keep
     # other versions (older) will be removed here
     versionsToKeep = settings.getBackupsMaxCopies()
     bytesUsed = backup_fs.sizebackups() / contactsdb.num_suppliers()
     bytesNeeded = diskspace.GetBytesFromString(settings.getNeededString(),
                                                0)
     customerGlobID = my_id.getGlobalID()
     lg.out(
         6,
         'backup_monitor.doCleanUpBackups backupsToKeep=%d used=%d needed=%d'
         % (versionsToKeep, bytesUsed, bytesNeeded))
     delete_count = 0
     if versionsToKeep > 0:
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             pathID = global_id.CanonicalID(pathID)
             if backup_control.IsPathInProcess(pathID):
                 continue
             versions = itemInfo.list_versions()
             # TODO: do we need to sort the list? it comes from a set, so must be sorted may be
             while len(versions) > versionsToKeep:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  versions.pop(0))
                 lg.out(
                     6,
                     'backup_monitor.doCleanUpBackups %d of %d backups for %s, so remove older %s'
                     % (len(versions), versionsToKeep, localPath, backupID))
                 backup_control.DeleteBackup(backupID,
                                             saveDB=False,
                                             calculate=False)
                 delete_count += 1
     # we need also to fit used space into needed space (given from other users)
     # they trust us - do not need to take extra space from our friends
     # so remove oldest backups, but keep at least one for every folder - at least locally!
     # still our suppliers will remove our "extra" files by their "local_tester"
     if bytesNeeded <= bytesUsed:
         sizeOk = False
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             if sizeOk:
                 break
             pathID = global_id.CanonicalID(pathID)
             versions = itemInfo.list_versions(True, False)
             if len(versions) <= 1:
                 continue
             for version in versions[1:]:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  version)
                 versionInfo = itemInfo.get_version_info(version)
                 if versionInfo[1] > 0:
                     lg.out(
                         6,
                         'backup_monitor.doCleanUpBackups over use %d of %d, so remove %s of %s'
                         % (bytesUsed, bytesNeeded, backupID, localPath))
                     backup_control.DeleteBackup(backupID,
                                                 saveDB=False,
                                                 calculate=False)
                     delete_count += 1
                     bytesUsed -= versionInfo[1]
                     if bytesNeeded > bytesUsed:
                         sizeOk = True
                         break
     if delete_count > 0:
         backup_fs.Scan()
         backup_fs.Calculate()
         backup_control.Save()
         from web import control
         control.request_update()
     collected = gc.collect()
     lg.out(
         6,
         'backup_monitor.doCleanUpBackups collected %d objects' % collected)
예제 #39
0
def inbox(info):
    """
    1) The protocol modules write to temporary files and gives us that filename
    2) We unserialize 3) We check that it is for us 4) We check that it is from
    one of our contacts.

    5) We use signed.validate() to check signature and that number
    fields are numbers 6) Any other sanity checks we can do and if
    anything funny we toss out the packet . 7) Then change the filename
    to the PackedID that it should be.    and call the right function(s)
    for this new packet    (encryptedblock, scrubber, remotetester,
    customerservice, ...)    to dispatch it to right place(s). 8) We
    have to keep track of bandwidth to/from everyone, and make a report
    every 24 hours    which we send to BitDust sometime in the 24 hours
    after that.
    """
    global _DoingShutdown
    global _LastInboxPacketTime
    if _DoingShutdown:
        if _Debug:
            lg.out(_DebugLevel - 4, "gateway.inbox ignoring input since _DoingShutdown ")
        return None
    if info.filename == "" or not os.path.exists(info.filename):
        lg.err("bad filename=" + info.filename)
        return None
    try:
        data = bpio.ReadBinaryFile(info.filename)
    except:
        lg.err("gateway.inbox ERROR reading file " + info.filename)
        return None
    if len(data) == 0:
        lg.err("gateway.inbox ERROR zero byte file from %s://%s" % (info.proto, info.host))
        return None
    try:
        newpacket = signed.Unserialize(data)
    except:
        lg.err("gateway.inbox ERROR during Unserialize data from %s://%s" % (info.proto, info.host))
        lg.exc()
        return None
    if newpacket is None:
        lg.warn("newpacket from %s://%s is None" % (info.proto, info.host))
        return None
    try:
        Command = newpacket.Command
        OwnerID = newpacket.OwnerID
        CreatorID = newpacket.CreatorID
        PacketID = newpacket.PacketID
        Date = newpacket.Date
        Payload = newpacket.Payload
        RemoteID = newpacket.RemoteID
        Signature = newpacket.Signature
        packet_sz = len(data)
    except:
        lg.err("gateway.inbox ERROR during Unserialize data from %s://%s" % (info.proto, info.host))
        lg.err("data length=" + str(len(data)))
        lg.exc()
        fd, filename = tmpfile.make("other", ".bad")
        os.write(fd, data)
        os.close(fd)
        return None
    _LastInboxPacketTime = time.time()
    if _Debug:
        lg.out(
            _DebugLevel - 8,
            "gateway.inbox [%s] signed by %s|%s (for %s) from %s://%s"
            % (
                Command,
                nameurl.GetName(OwnerID),
                nameurl.GetName(CreatorID),
                nameurl.GetName(RemoteID),
                info.proto,
                info.host,
            ),
        )
    if _Debug and lg.is_debug(_DebugLevel):
        monitoring()
    control.request_update([("packet", newpacket.PacketID)])
    return newpacket