Example #1
0
def OnJobDone(backupID, result):
    """
    A callback method fired when backup is finished.

    Here we need to save the index data base.
    """
    from storage import backup_rebuilder
    # from customer import io_throttle
    lg.info('job done [%s] with result "%s", %d more tasks' %
            (backupID, result, len(tasks())))
    jobs().pop(backupID)
    customerGlobalID, remotePath, version = packetid.SplitBackupID(backupID)
    customer_idurl = global_id.GlobalUserToIDURL(customerGlobalID)
    if result == 'done':
        maxBackupsNum = settings.getBackupsMaxCopies()
        if maxBackupsNum:
            item = backup_fs.GetByID(remotePath,
                                     iterID=backup_fs.fsID(customer_idurl))
            if item:
                versions = item.list_versions(sorted=True, reverse=True)
                if len(versions) > maxBackupsNum:
                    for version in versions[maxBackupsNum:]:
                        item.delete_version(version)
                        backupID = packetid.MakeBackupID(
                            customerGlobalID, remotePath, version)
                        backup_rebuilder.RemoveBackupToWork(backupID)
                        # io_throttle.DeleteBackupRequests(backupID)
                        # io_throttle.DeleteBackupSendings(backupID)
                        # callback.delete_backup_interest(backupID)
                        backup_fs.DeleteLocalBackup(
                            settings.getLocalBackupsDir(), backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
                        backup_matrix.EraseBackupLocalInfo(backupID)
        backup_fs.ScanID(remotePath)
        backup_fs.Calculate()
        Save()
        control.request_update([
            ('pathID', remotePath),
        ])
        # TODO: check used space, if we have over use - stop all tasks immediately
        backup_matrix.RepaintBackup(backupID)
    elif result == 'abort':
        DeleteBackup(backupID)
    if len(tasks()) == 0:
        # do we really need to restart backup_monitor after each backup?
        # if we have a lot tasks started this will produce a lot unneeded actions
        # will be smarter to restart it once we finish all tasks
        # because user will probably leave BitDust working after starting a long running operations
        from storage import backup_monitor
        if _Debug:
            lg.out(
                _DebugLevel,
                'backup_control.OnJobDone restarting backup_monitor() machine because no tasks left'
            )
        backup_monitor.A('restart')
    reactor.callLater(0, RunTask)  # @UndefinedVariable
    reactor.callLater(0, FireTaskFinishedCallbacks, remotePath, version,
                      result)  # @UndefinedVariable
 def _itemRestored(backupID, result):
     customerGlobalID, remotePath, _ = packetid.SplitBackupID(backupID)
     backup_fs.ScanID(
         remotePath,
         customer_idurl=global_id.GlobalUserToIDURL(customerGlobalID))
     backup_fs.Calculate()