예제 #1
0
def GetBackupStats(backupID):
    """
    Collect needed info from "remote" matrix and create a detailed report about
    given backup.
    """
    if backupID not in remote_files():
        return 0, 0, [(0, 0)] * contactsdb.num_suppliers()
    percentPerSupplier = 100.0 / contactsdb.num_suppliers()
    # ??? maxBlockNum = remote_max_block_numbers().get(backupID, -1)
    maxBlockNum = GetKnownMaxBlockNum(backupID)
    fileNumbers = [0] * contactsdb.num_suppliers()
    totalNumberOfFiles = 0
    for blockNum in remote_files()[backupID].keys():
        for supplierNum in xrange(len(fileNumbers)):
            if supplierNum < contactsdb.num_suppliers():
                if remote_files()[backupID][blockNum]["D"][supplierNum] == 1:
                    fileNumbers[supplierNum] += 1
                    totalNumberOfFiles += 1
                if remote_files()[backupID][blockNum]["P"][supplierNum] == 1:
                    fileNumbers[supplierNum] += 1
                    totalNumberOfFiles += 1
    statsArray = []
    for supplierNum in xrange(contactsdb.num_suppliers()):
        if maxBlockNum > -1:
            # 0.5 because we count both Parity and Data.
            percent = percentPerSupplier * 0.5 * fileNumbers[supplierNum] / (maxBlockNum + 1)
        else:
            percent = 0.0
        statsArray.append((percent, fileNumbers[supplierNum]))
    del fileNumbers
    return totalNumberOfFiles, maxBlockNum, statsArray
예제 #2
0
 def doDecideToDismiss(self, arg):
     """
     Action method.
     """
     global _SuppliersToFire
     result = set(_SuppliersToFire)
     _SuppliersToFire = []
     # if you have some empty suppliers need to get rid of them,
     # but no need to dismiss anyone at the moment.
     if '' in contactsdb.suppliers():
         lg.out(10, 'fire_hire.doDecideToDismiss found empty supplier, SKIP')
         self.automat('made-decision', [])
         return
     for supplier_idurl in contactsdb.suppliers():
         if not supplier_idurl:
             continue
         sc = supplier_connector.by_idurl(supplier_idurl)
         if not sc:
             continue
         if sc.state == 'NO_SERVICE':
             result.add(supplier_idurl)
     if contactsdb.num_suppliers() > settings.getSuppliersNumberDesired():
         for supplier_index in range(
                 settings.getSuppliersNumberDesired(),
                 contactsdb.num_suppliers()):
             idurl = contactsdb.supplier(supplier_index)
             if idurl:
                 result.add(idurl)
     result = list(result)
     lg.out(10, 'fire_hire.doDecideToDismiss %s' % result)
     self.automat('made-decision', result)
예제 #3
0
def RemoteFileReport(backupID, blockNum, supplierNum, dataORparity, result):
    """
    Writes info for a single piece of data into "remote" matrix.

    May be called when you got an Ack packet from remote supplier after
    you sent him some Data packet .
    """
    blockNum = int(blockNum)
    supplierNum = int(supplierNum)
    if supplierNum > contactsdb.num_suppliers():
        lg.out(4, "backup_matrix.RemoteFileReport got too big supplier number, possible this is an old packet")
        return
    if backupID not in remote_files():
        remote_files()[backupID] = {}
        lg.out(8, "backup_matrix.RemoteFileReport new remote entry for %s created in the memory" % backupID)
    if blockNum not in remote_files()[backupID]:
        remote_files()[backupID][blockNum] = {
            "D": [0] * contactsdb.num_suppliers(),
            "P": [0] * contactsdb.num_suppliers(),
        }
    # save backed up block info into remote info structure, synchronize on hand info
    flag = 1 if result else 0
    if dataORparity == "Data":
        remote_files()[backupID][blockNum]["D"][supplierNum] = flag
    elif dataORparity == "Parity":
        remote_files()[backupID][blockNum]["P"][supplierNum] = flag
    else:
        lg.warn("incorrect backup ID: %s" % backupID)
    # if we know only 5 blocks stored on remote machine
    # but we have backed up 6th block - remember this
    remote_max_block_numbers()[backupID] = max(remote_max_block_numbers().get(backupID, -1), blockNum)
    # mark to repaint this backup in gui
    RepaintBackup(backupID)
예제 #4
0
def GetRemoteMatrix(backupID, blockNum):
    """
    
    """
    if backupID not in remote_files():
        return {"D": [0] * contactsdb.num_suppliers(), "P": [0] * contactsdb.num_suppliers()}
    if blockNum not in remote_files()[backupID]:
        return {"D": [0] * contactsdb.num_suppliers(), "P": [0] * contactsdb.num_suppliers()}
    return remote_files()[backupID][blockNum]
예제 #5
0
def GetRemoteParityArray(backupID, blockNum):
    """
    Get "remote" info for a single block of given backup, this is for "Parity"
    surface.
    """
    if backupID not in remote_files():
        return [0] * contactsdb.num_suppliers()
    if blockNum not in remote_files()[backupID]:
        return [0] * contactsdb.num_suppliers()
    return remote_files()[backupID][blockNum]["P"]
예제 #6
0
def GetLocalDataArray(backupID, blockNum):
    """
    Get "local" info for a single block of given backup, this is for "Data"
    surface.
    """
    if backupID not in local_files():
        return [0] * contactsdb.num_suppliers()
    if blockNum not in local_files()[backupID]:
        return [0] * contactsdb.num_suppliers()
    return local_files()[backupID][blockNum]["D"]
예제 #7
0
def LocalBlockReport(backupID, blockNumber, result):
    """
    This updates "local" matrix - a several pieces corresponding to given block of data.
    """
    # if contactsdb.num_suppliers() != num_suppliers:
    #     lg.out(6, 'backup_matrix.LocalBlockReport %s skipped, because number of suppliers were changed' % str(newblock))
    #     return
    if result is None:
        lg.warn("result is None")
        return
    try:
        blockNum = int(blockNumber)
    except:
        lg.exc()
        return
    repaint_flag = False
    for supplierNum in xrange(contactsdb.num_suppliers()):
        for dataORparity in ("Data", "Parity"):
            packetID = packetid.MakePacketID(backupID, blockNum, supplierNum, dataORparity)
            local_file = os.path.join(settings.getLocalBackupsDir(), packetID)
            if backupID not in local_files():
                local_files()[backupID] = {}
                repaint_flag = True
                # lg.out(14, 'backup_matrix.LocalFileReport new local entry for %s created in the memory' % backupID)
            if blockNum not in local_files()[backupID]:
                local_files()[backupID][blockNum] = {
                    "D": [0] * contactsdb.num_suppliers(),
                    "P": [0] * contactsdb.num_suppliers(),
                }
                repaint_flag = True
            if not os.path.isfile(local_file):
                local_files()[backupID][blockNum][dataORparity[0]][supplierNum] = 0
                repaint_flag = True
                continue
            local_files()[backupID][blockNum][dataORparity[0]][supplierNum] = 1
            # lg.out(6, 'backup_matrix.LocalFileReport %s max block num is %d' % (backupID, local_max_block_numbers()[backupID]))
            if backupID not in local_backup_size():
                local_backup_size()[backupID] = 0
                repaint_flag = True
            try:
                local_backup_size()[backupID] += os.path.getsize(local_file)
                repaint_flag = True
            except:
                lg.exc()
    if backupID not in local_max_block_numbers():
        local_max_block_numbers()[backupID] = -1
    if local_max_block_numbers()[backupID] < blockNum:
        local_max_block_numbers()[backupID] = blockNum
    if repaint_flag:
        RepaintBackup(backupID)
예제 #8
0
def LocalFileReport(packetID=None, backupID=None, blockNum=None, supplierNum=None, dataORparity=None):
    """
    Writes info for a single piece of data into "local" matrix.

    You can use two forms:
    * pass ``packetID`` parameter only
    * pass all other parameters and do not use ``packetID``

    This is called when new local file created, for example during rebuilding process.
    """
    if packetID is not None:
        backupID, blockNum, supplierNum, dataORparity = packetid.Split(packetID)
        if backupID is None:
            lg.warn("incorrect filename: " + packetID)
            return
    else:
        blockNum = int(blockNum)
        supplierNum = int(supplierNum)
        dataORparity = dataORparity
        packetID = packetid.MakePacketID(backupID, blockNum, supplierNum, dataORparity)
    filename = packetID
    if dataORparity not in ["Data", "Parity"]:
        lg.warn("Data or Parity? " + filename)
        return
    if supplierNum >= contactsdb.num_suppliers():
        # lg.warn('supplier number? %d > %d : %s' % (supplierNum, contactsdb.num_suppliers(), filename))
        return
    localDest = os.path.join(settings.getLocalBackupsDir(), filename)
    if backupID not in local_files():
        local_files()[backupID] = {}
    if blockNum not in local_files()[backupID]:
        local_files()[backupID][blockNum] = {
            "D": [0] * contactsdb.num_suppliers(),
            "P": [0] * contactsdb.num_suppliers(),
        }
    if not os.path.isfile(localDest):
        local_files()[backupID][blockNum][dataORparity[0]][supplierNum] = 0
        return
    local_files()[backupID][blockNum][dataORparity[0]][supplierNum] = 1
    if backupID not in local_max_block_numbers():
        local_max_block_numbers()[backupID] = -1
    if local_max_block_numbers()[backupID] < blockNum:
        local_max_block_numbers()[backupID] = blockNum
    if backupID not in local_backup_size():
        local_backup_size()[backupID] = 0
    try:
        local_backup_size()[backupID] += os.path.getsize(localDest)
    except:
        lg.exc()
    RepaintBackup(backupID)
예제 #9
0
 def isAllReady(self, arg):
     """
     Condition method.
     """
     lg.out(14, 'fire_hire.isAllReady %d %d' % (
         len(self.connect_list), contactsdb.num_suppliers()))
     return len(self.connect_list) == 0  # contactsdb.num_suppliers()
예제 #10
0
def GetWeakRemoteBlock(backupID):
    """
    Scan all "remote" blocks for given backup and find the most "weak" block -
    less suppliers keeps the data and stay online.
    """
    supplierCount = contactsdb.num_suppliers()
    if backupID not in remote_files():
        return -1, 0, supplierCount
    maxBlockNum = GetKnownMaxBlockNum(backupID)
    weakBlockNum = -1
    lessSuppliers = supplierCount
    activeArray = GetActiveArray()
    for blockNum in xrange(maxBlockNum + 1):
        if blockNum not in remote_files()[backupID].keys():
            return blockNum, 0, supplierCount
        goodSuppliers = supplierCount
        for supplierNum in xrange(supplierCount):
            if activeArray[supplierNum] != 1:
                goodSuppliers -= 1
                continue
            if (
                remote_files()[backupID][blockNum]["D"][supplierNum] != 1
                or remote_files()[backupID][blockNum]["P"][supplierNum] != 1
            ):
                goodSuppliers -= 1
        if goodSuppliers < lessSuppliers:
            lessSuppliers = goodSuppliers
            weakBlockNum = blockNum
    return weakBlockNum, lessSuppliers, supplierCount
예제 #11
0
def GetActiveArray():
    """
    Loops all suppliers and returns who is alive at the moment.

    Return a list with integers: 0 for offline suppler and 1 if he is
    available right now. Uses ``p2p.contact_status.isOnline()`` to see
    the current state of supplier.
    """
    from p2p import contact_status

    activeArray = [0] * contactsdb.num_suppliers()
    for i in xrange(contactsdb.num_suppliers()):
        suplier_idurl = contactsdb.supplier(i)
        if not suplier_idurl:
            continue
        if contact_status.isOnline(suplier_idurl):
            activeArray[i] = 1
        else:
            activeArray[i] = 0
    return activeArray
예제 #12
0
def GetBackupBlocksAndPercent(backupID):
    """
    Another method to get details about a backup.
    """
    if backupID not in remote_files():
        return 0, 0
    # get max block number
    # ??? maxBlockNum = remote_max_block_numbers().get(backupID, -1)
    maxBlockNum = GetKnownMaxBlockNum(backupID)
    if maxBlockNum == -1:
        return 0, 0
    # we count all remote files for this backup
    fileCounter = 0
    for blockNum in remote_files()[backupID].keys():
        for supplierNum in xrange(contactsdb.num_suppliers()):
            if remote_files()[backupID][blockNum]["D"][supplierNum] == 1:
                fileCounter += 1
            if remote_files()[backupID][blockNum]["P"][supplierNum] == 1:
                fileCounter += 1
    # +1 since zero based and *0.5 because Data and Parity
    return maxBlockNum + 1, 100.0 * 0.5 * fileCounter / ((maxBlockNum + 1) * contactsdb.num_suppliers())
예제 #13
0
 def doCleanUpBackups(self, arg):
     # here we check all backups we have and remove the old one
     # user can set how many versions of that file or folder to keep
     # other versions (older) will be removed here
     versionsToKeep = settings.getBackupsMaxCopies()
     bytesUsed = backup_fs.sizebackups() / contactsdb.num_suppliers()
     bytesNeeded = diskspace.GetBytesFromString(settings.getNeededString(), 0)
     lg.out(6, 'backup_monitor.doCleanUpBackups backupsToKeep=%d used=%d needed=%d' % (versionsToKeep, bytesUsed, bytesNeeded))
     delete_count = 0
     if versionsToKeep > 0:
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             if backup_control.IsPathInProcess(pathID):
                 continue
             versions = itemInfo.list_versions()
             # TODO: do we need to sort the list? it comes from a set, so must be sorted may be
             while len(versions) > versionsToKeep:
                 backupID = pathID + '/' + versions.pop(0)
                 lg.out(6, 'backup_monitor.doCleanUpBackups %d of %d backups for %s, so remove older %s' % (len(versions), versionsToKeep, localPath, backupID))
                 backup_control.DeleteBackup(backupID, saveDB=False, calculate=False)
                 delete_count += 1
     # we need also to fit used space into needed space (given from other users)
     # they trust us - do not need to take extra space from our friends
     # so remove oldest backups, but keep at least one for every folder - at least locally!
     # still our suppliers will remove our "extra" files by their "local_tester"
     if bytesNeeded <= bytesUsed:
         sizeOk = False
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             if sizeOk:
                 break
             versions = itemInfo.list_versions(True, False)
             if len(versions) <= 1:
                 continue
             for version in versions[1:]:
                 backupID = pathID + '/' + version
                 versionInfo = itemInfo.get_version_info(version)
                 if versionInfo[1] > 0:
                     lg.out(6, 'backup_monitor.doCleanUpBackups over use %d of %d, so remove %s of %s' % (
                         bytesUsed, bytesNeeded, backupID, localPath))
                     backup_control.DeleteBackup(backupID, saveDB=False, calculate=False)
                     delete_count += 1
                     bytesUsed -= versionInfo[1]
                     if bytesNeeded > bytesUsed:
                         sizeOk = True
                         break
     if delete_count > 0:
         backup_fs.Scan()
         backup_fs.Calculate()
         backup_control.Save()
         from web import control
         control.request_update()
     collected = gc.collect()
     lg.out(6, 'backup_monitor.doCleanUpBackups collected %d objects' % collected)
예제 #14
0
def _stats(params):
    from contacts import contactsdb
    from p2p import contact_status
    from lib import diskspace
    result = {}
    result['suppliers'] = contactsdb.num_suppliers()
    result['max_suppliers'] = settings.getSuppliersNumberDesired()
    result['online_suppliers'] = contact_status.countOnlineAmong(contactsdb.suppliers())
    result['customers'] = contactsdb.num_customers()
    result['bytes_donated'] = settings.getDonatedBytes()
    result['value_donated'] = diskspace.MakeStringFromBytes(settings.getDonatedBytes())
    result['bytes_needed'] = settings.getNeededBytes()
    result['value_needed'] = diskspace.MakeStringFromBytes(settings.getNeededBytes())
    result['bytes_used_total'] = backup_fs.sizebackups()
    result['value_used_total'] = diskspace.MakeStringFromBytes(backup_fs.sizebackups())
    result['bytes_used_supplier'] = 0 if (contactsdb.num_suppliers() == 0) else (int(backup_fs.sizebackups() / contactsdb.num_suppliers()))
    result['bytes_indexed'] = backup_fs.sizefiles() + backup_fs.sizefolders()
    result['files_count'] = backup_fs.numberfiles()
    result['folders_count'] = backup_fs.numberfolders()
    result['items_count'] = backup_fs.counter()
    result['timestamp'] = time.time()
    return {'result': result, }
예제 #15
0
def ScanBlocksToRemove(backupID, check_all_suppliers=True):
    """
    This method compare both matrixes and found pieces which is present on both
    sides.

    If remote supplier got that file it can be removed from the local
    HDD.
    """
    from customer import io_throttle

    lg.out(10, "backup_matrix.ScanBlocksToRemove for %s" % backupID)
    packets = []
    localMaxBlockNum = local_max_block_numbers().get(backupID, -1)
    if backupID not in remote_files() or backupID not in local_files():
        # no info about this backup yet - skip
        return packets
    for blockNum in xrange(localMaxBlockNum + 1):
        localArray = {"Data": GetLocalDataArray(backupID, blockNum), "Parity": GetLocalParityArray(backupID, blockNum)}
        remoteArray = {
            "Data": GetRemoteDataArray(backupID, blockNum),
            "Parity": GetRemoteParityArray(backupID, blockNum),
        }
        if (0 in remoteArray["Data"]) or (0 in remoteArray["Parity"]):
            # if some supplier do not have some data for that block - do not remove any local files for that block!
            # we do remove the local files only when we sure all suppliers got the all data pieces
            continue
        if (-1 in remoteArray["Data"]) or (-1 in remoteArray["Parity"]):
            # also if we do not have any info about this block for some supplier do not remove other local pieces
            continue
        for supplierNum in xrange(contactsdb.num_suppliers()):
            supplierIDURL = contactsdb.supplier(supplierNum)
            if not supplierIDURL:
                # supplier is unknown - skip
                continue
            for dataORparity in ["Data", "Parity"]:
                packetID = packetid.MakePacketID(backupID, blockNum, supplierNum, dataORparity)
                if io_throttle.HasPacketInSendQueue(supplierIDURL, packetID):
                    # if we do sending the packet at the moment - skip
                    continue
                if localArray[dataORparity][supplierNum] == 1:
                    packets.append(packetID)
                    # lg.out(10, '    mark to remove %s, blockNum:%d remote:%s local:%s' % (packetID, blockNum, str(remoteArray), str(localArray)))
    #                if check_all_suppliers:
    #                    if localArray[dataORparity][supplierNum] == 1:
    #                        packets.append(packetID)
    #                else:
    #                    if remoteArray[dataORparity][supplierNum] == 1 and localArray[dataORparity][supplierNum] == 1:
    #                        packets.append(packetID)
    return packets
예제 #16
0
 def isMoreNeeded(self, arg):
     """
     Condition method.
     """
     # lg.out(10, 'fire_hire.isMoreNeeded current=%d dismiss=%d needed=%d' % (
     # contactsdb.num_suppliers(), len(self.dismiss_list),
     # settings.getSuppliersNumberDesired()))
     if '' in contactsdb.suppliers():
         lg.out(4, 'fire_hire.isMoreNeeded found empty suppliers!!!')
         return True
     if isinstance(arg, list):
         dismissed = arg
     else:
         dismissed = self.dismiss_list
     s = set(contactsdb.suppliers())
     s.difference_update(set(dismissed))
     result = len(s) < settings.getSuppliersNumberDesired()
     lg.out(14, 'fire_hire.isMoreNeeded %d %d %d %d, result=%s' % (
         contactsdb.num_suppliers(), len(dismissed), len(s),
         settings.getSuppliersNumberDesired(), result))
     return result
예제 #17
0
 def isMoreNeeded(self, *args, **kwargs):
     """
     Condition method.
     """
     # if _Debug:
     #     lg.out(_DebugLevel, 'fire_hire.isMoreNeeded current=%d dismiss=%d needed=%d' % (
     #         contactsdb.num_suppliers(), len(self.dismiss_list),
     #         settings.getSuppliersNumberDesired()))
     if id_url.is_some_empty(contactsdb.suppliers()):
         if _Debug:
             lg.out(_DebugLevel, 'fire_hire.isMoreNeeded found empty supplier!!!')
         return True
     if isinstance(args[0], list):
         dismissed = args[0]
     else:
         dismissed = self.dismiss_list
     s = set(id_url.to_bin_list(contactsdb.suppliers()))
     s.difference_update(set(id_url.to_bin_list(dismissed)))
     result = len(s) < settings.getSuppliersNumberDesired()
     if _Debug:
         lg.out(_DebugLevel, 'fire_hire.isMoreNeeded %d %d %d %d, result=%s' % (
             contactsdb.num_suppliers(), len(dismissed), len(s),
             settings.getSuppliersNumberDesired(), result))
     return result
예제 #18
0
def report_consumed_storage():
    result = {}
    result['suppliers_num'] = contactsdb.num_suppliers()
    result['needed'] = settings.getNeededBytes()
    result['needed_str'] = diskspace.MakeStringFromBytes(result['needed'])
    result['used'] = int(backup_fs.sizebackups() / 2)
    result['used_str'] = diskspace.MakeStringFromBytes(result['used'])
    result['available'] = result['needed'] - result['used']
    result['available_str'] = diskspace.MakeStringFromBytes(result['available'])
    result['needed_per_supplier'] = 0
    result['used_per_supplier'] = 0
    result['available_per_supplier'] = 0
    if result['suppliers_num'] > 0:
        result['needed_per_supplier'] = int(math.ceil(2.0 * result['needed'] / result['suppliers_num']))
        result['used_per_supplier'] = int(math.ceil(2.0 * result['used'] / result['suppliers_num']))
        result['available_per_supplier'] = result['needed_per_supplier'] - result['used_per_supplier']
    result['needed_per_supplier_str'] = diskspace.MakeStringFromBytes(result['needed_per_supplier'])
    result['used_per_supplier_str'] = diskspace.MakeStringFromBytes(result['used_per_supplier'])
    result['available_per_supplier_str'] = diskspace.MakeStringFromBytes(result['available_per_supplier'])
    try:
        result['used_percent'] = misc.value2percent(float(result['used']), float(result['needed']))
    except:
        result['used_percent'] = '0%'
    return result
예제 #19
0
def report_consumed_storage():
    result = {}
    result['suppliers_num'] = contactsdb.num_suppliers()
    result['needed'] = settings.getNeededBytes()
    result['needed_str'] = diskspace.MakeStringFromBytes(result['needed'])
    result['used'] = int(backup_fs.sizebackups() / 2)
    result['used_str'] = diskspace.MakeStringFromBytes(result['used'])
    result['available'] = result['needed'] - result['used']
    result['available_str'] = diskspace.MakeStringFromBytes(result['available'])
    result['needed_per_supplier'] = 0
    result['used_per_supplier'] = 0
    result['available_per_supplier'] = 0
    if result['suppliers_num'] > 0:
        result['needed_per_supplier'] = int(math.ceil(2.0 * result['needed'] / result['suppliers_num']))
        result['used_per_supplier'] = int(math.ceil(2.0 * result['used'] / result['suppliers_num']))
        result['available_per_supplier'] = result['needed_per_supplier'] - result['used_per_supplier']
    result['needed_per_supplier_str'] = diskspace.MakeStringFromBytes(result['needed_per_supplier'])
    result['used_per_supplier_str'] = diskspace.MakeStringFromBytes(result['used_per_supplier'])
    result['available_per_supplier_str'] = diskspace.MakeStringFromBytes(result['available_per_supplier'])
    try:
        result['used_percent'] = misc.percent2string(float(result['used']) / float(result['needed']))
    except:
        result['used_percent'] = '0%'
    return result
예제 #20
0
def GetWeakLocalBlock(backupID):
    """
    Scan all "local" blocks for given backup and find the most "weak" block.
    """
    supplierCount = contactsdb.num_suppliers()
    if backupID not in local_files():
        return -1, 0, supplierCount
    maxBlockNum = GetKnownMaxBlockNum(backupID)
    weakBlockNum = -1
    lessSuppliers = supplierCount
    for blockNum in xrange(maxBlockNum + 1):
        if blockNum not in local_files()[backupID].keys():
            return blockNum, 0, supplierCount
        goodSuppliers = supplierCount
        for supplierNum in xrange(supplierCount):
            if (
                local_files()[backupID][blockNum]["D"][supplierNum] != 1
                or local_files()[backupID][blockNum]["P"][supplierNum] != 1
            ):
                goodSuppliers -= 1
        if goodSuppliers < lessSuppliers:
            lessSuppliers = goodSuppliers
            weakBlockNum = blockNum
    return weakBlockNum, lessSuppliers, supplierCount
예제 #21
0
def GetBackupLocalStats(backupID):
    """
    Provide detailed info about local files for that backup. Return a tuple::

    (totalPercent, totalNumberOfFiles, totalSize, maxBlockNum,
    statsArray)
    """
    # ??? maxBlockNum = local_max_block_numbers().get(backupID, -1)
    maxBlockNum = GetKnownMaxBlockNum(backupID)
    if backupID not in local_files():
        return 0, 0, 0, maxBlockNum, [(0, 0)] * contactsdb.num_suppliers()
    percentPerSupplier = 100.0 / contactsdb.num_suppliers()
    totalNumberOfFiles = 0
    fileNumbers = [0] * contactsdb.num_suppliers()
    for blockNum in xrange(maxBlockNum + 1):
        if blockNum not in local_files()[backupID].keys():
            continue
        #    for blockNum in local_files()[backupID].keys():
        for supplierNum in xrange(len(fileNumbers)):
            if supplierNum < contactsdb.num_suppliers():
                if local_files()[backupID][blockNum]["D"][supplierNum] == 1:
                    fileNumbers[supplierNum] += 1
                    totalNumberOfFiles += 1
                if local_files()[backupID][blockNum]["P"][supplierNum] == 1:
                    fileNumbers[supplierNum] += 1
                    totalNumberOfFiles += 1
    statsArray = []
    for supplierNum in xrange(contactsdb.num_suppliers()):
        if maxBlockNum > -1:
            # 0.5 because we count both Parity and Data.
            percent = percentPerSupplier * 0.5 * fileNumbers[supplierNum] / (maxBlockNum + 1)
        else:
            percent = 0.0
        statsArray.append((percent, fileNumbers[supplierNum]))
    del fileNumbers
    totalPercent = 100.0 * 0.5 * totalNumberOfFiles / ((maxBlockNum + 1) * contactsdb.num_suppliers())
    return totalPercent, totalNumberOfFiles, local_backup_size().get(backupID, 0), maxBlockNum, statsArray
예제 #22
0
 def _block_finished(self, result, params):
     if not result:
         if _Debug:
             lg.out(
                 _DebugLevel,
                 'backup_rebuilder._block_finished FAILED, blockIndex=%d' %
                 self.blockIndex)
         reactor.callLater(0, self._finish_rebuilding)  # @UndefinedVariable
         return
     try:
         newData, localData, localParity, reconstructedData, reconstructedParity = result
         _backupID = params[0]
         _blockNumber = params[1]
     except:
         lg.exc()
         reactor.callLater(0, self._finish_rebuilding)  # @UndefinedVariable
         return
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_rebuilder._block_finished   backupID=%r  blockNumber=%r  newData=%r'
             % (_backupID, _blockNumber, newData))
     if _Debug:
         lg.out(
             _DebugLevel, '        localData=%r  localParity=%r' %
             (localData, localParity))
     err = False
     if newData:
         from storage import backup_matrix
         from stream import data_sender
         count = 0
         customer_idurl = packetid.CustomerIDURL(_backupID)
         for supplierNum in range(
                 contactsdb.num_suppliers(customer_idurl=customer_idurl)):
             try:
                 localData[supplierNum]
                 localParity[supplierNum]
                 reconstructedData[supplierNum]
                 reconstructedParity[supplierNum]
             except:
                 err = True
                 lg.err('invalid result from the task: %s' % repr(params))
                 if _Debug:
                     lg.out(_DebugLevel, 'result is %s' % repr(result))
                 break
             if localData[supplierNum] == 1 and reconstructedData[
                     supplierNum] == 1:
                 backup_matrix.LocalFileReport(None, _backupID,
                                               _blockNumber, supplierNum,
                                               'Data')
                 count += 1
             if localParity[supplierNum] == 1 and reconstructedParity[
                     supplierNum] == 1:
                 backup_matrix.LocalFileReport(None, _backupID,
                                               _blockNumber, supplierNum,
                                               'Parity')
                 count += 1
         if err:
             lg.err('seems suppliers were changed, stop rebuilding')
             reactor.callLater(
                 0, self._finish_rebuilding)  # @UndefinedVariable
             return
         self.blocksSucceed.append(_blockNumber)
         data_sender.A('new-data')
         if _Debug:
             lg.out(
                 _DebugLevel,
                 '        !!!!!! %d NEW DATA segments reconstructed, blockIndex=%d'
                 % (
                     count,
                     self.blockIndex,
                 ))
     else:
         if _Debug:
             lg.out(_DebugLevel,
                    '        NO CHANGES, blockIndex=%d' % self.blockIndex)
     self.blockIndex -= 1
     reactor.callLater(0, self._start_one_block)  # @UndefinedVariable
예제 #23
0
 def isSuppliersNumberChanged(self, arg):
     """
     Condition method.
     """
     return contactsdb.num_suppliers() != len(self.current_suppliers)
예제 #24
0
 def isExistSomeSuppliers(self, *args, **kwargs):
     """
     Condition method.
     """
     sup_list = contactsdb.suppliers()
     return contactsdb.num_suppliers() > 0 and sup_list.count(id_url.field(b'')) < contactsdb.num_suppliers()
예제 #25
0
 def isExistSomeSuppliers(self, arg):
     """
     Condition method.
     """
     return contactsdb.num_suppliers() > 0 and contactsdb.suppliers().count(
         '') < contactsdb.num_suppliers()
예제 #26
0
 def isExistSomeSuppliers(self, *args, **kwargs):
     """
     Condition method.
     """
     return contactsdb.num_suppliers() > 0 and (contactsdb.suppliers().count(b'') + contactsdb.suppliers().count('')) < contactsdb.num_suppliers()
예제 #27
0
def ReadRawListFiles(supplierNum, listFileText):
    """
    Read ListFiles packet for given supplier and build a "remote" matrix. All
    lines are something like that::

      Findex 5456
      D0 -1
      D0/1 -1
      V0/1/F20090709034221PM 3 0-1000 7463434
      V0/1/F20090709034221PM 3 0-1000 7463434
      D0/0/123/4567 -1
      V0/0/123/4567/F20090709034221PM 3 0-11 434353 missing Data:1,3
      V0/0/123/4/F20090709012331PM 3 0-5 434353 missing Data:1,3 Parity:0,1,2

    First character can be::

      "F" for files
      "D" for folders
      "V" for backed up data
    """
    from storage import backup_control

    if driver.is_started("service_backup_db"):
        from storage import index_synchronizer

        is_in_sync = index_synchronizer.is_synchronized() and backup_control.revision() > 0
    else:
        is_in_sync = False
    backups2remove = set()
    paths2remove = set()
    oldfiles = ClearSupplierRemoteInfo(supplierNum)
    newfiles = 0
    lg.out(
        8,
        "backup_matrix.ReadRawListFiles %d bytes to read from supplier #%d, rev:%d, %s, is_in_sync=%s"
        % (len(listFileText), supplierNum, backup_control.revision(), index_synchronizer.A(), is_in_sync),
    )
    inpt = cStringIO.StringIO(listFileText)
    while True:
        line = inpt.readline()
        if line == "":
            break
        typ = line[0]
        line = line[1:]
        line = line.rstrip("\n")
        if line.strip() == "":
            continue
        # also don't consider the identity a backup,
        if line.find("http://") != -1 or line.find(".xml") != -1:
            continue
        lg.out(8, "    %s:{%s}" % (typ, line))
        if typ == "F":
            # we don't have this path in the index
            # so we have several cases:
            #    1. this is old file and we need to remove it and all its backups
            #    2. we loose our local index and did not restore it from one of suppliers yet
            #    3. we did restore our account and did not restore the index yet
            #    4. we lost our index at all and we do not have nor local nor remote copy
            # what to do now:
            #    - in first case we just need to remove the file from remote supplier
            #    - in other cases we must keep all remote data and believe we can restore the index
            #         and get all file names and backed up data
            # how to recognize that? how to be sure we have the correct index?
            # because it should be empty right after we recover our account
            # or we may loose it if the local index file were lost
            # the first idea:  check index_synchronizer() state - IN_SYNC means index is fine
            # the second idea: check revision number of the local index - 0 means we have no index yet
            try:
                pth, filesz = line.split(" ")
                filesz = int(filesz)
            except:
                pth = line
                filesz = -1
            if not backup_fs.IsFileID(pth):  # remote supplier have some file - but we don't have it in the index
                if pth.strip("/") in [settings.BackupIndexFileName()]:
                    # this is the index file saved on remote supplier
                    # let's remember its size and put it in the backup_fs
                    item = backup_fs.FSItemInfo(pth.strip("/"), pth.strip("/"), backup_fs.FILE)
                    item.size = filesz
                    backup_fs.SetFile(item)
                else:
                    if is_in_sync:
                        # so we have some modifications in the index - it is not empty!
                        # index_synchronizer() did his job - so we have up to date index on hands
                        # now we are sure that this file is old and must be removed from remote site
                        paths2remove.add(pth)
                        lg.out(8, "        F%s - remove, not found in the index" % pth)
                # what to do now? let's hope we still can restore our index and this file is our remote data
        elif typ == "D":
            try:
                pth = line.split(" ")[0]
            except:
                pth = line
            if not backup_fs.ExistsID(pth):
                if is_in_sync:
                    paths2remove.add(pth)
                    lg.out(8, "        D%s - remove, not found in the index" % pth)
        elif typ == "V":
            # minimum is 4 words: "0/0/F20090709034221PM", "3", "0-1000" "123456"
            words = line.split(" ")
            if len(words) < 4:
                lg.warn("incorrect line:[%s]" % line)
                continue
            try:
                pathID, versionName = packetid.SplitBackupID(words[0])
                backupID = pathID + "/" + versionName
                lineSupplierNum = int(words[1])
                minBlockNum, maxBlockNum = words[2].split("-")
                maxBlockNum = int(maxBlockNum)
            except:
                lg.warn("incorrect line:[%s]" % line)
                continue
            if lineSupplierNum != supplierNum:
                # this mean supplier have old files and we do not need those files
                backups2remove.add(backupID)
                lg.out(8, "        V%s - remove, different supplier number" % backupID)
                continue
            iter_path = backup_fs.WalkByID(pathID)
            if iter_path is None:
                # this version is not found in the index
                if is_in_sync:
                    backups2remove.add(backupID)
                    paths2remove.add(pathID)
                    lg.out(8, "        V%s - remove, path not found in the index" % pathID)
                continue
            item, localPath = iter_path
            if isinstance(item, dict):
                try:
                    item = item[backup_fs.INFO_KEY]
                except:
                    item = None
            if not item or not item.has_version(versionName):
                if is_in_sync:
                    backups2remove.add(backupID)
                    lg.out(8, "        V%s - remove, version is not found in the index" % backupID)
                continue
            missingBlocksSet = {"Data": set(), "Parity": set()}
            if len(words) > 4:
                # "0/0/123/4567/F20090709034221PM/0-Data" "3" "0-5" "434353" "missing" "Data:1,3" "Parity:0,1,2"
                if words[4].strip() != "missing":
                    lg.warn("incorrect line:[%s]" % line)
                    continue
                for missingBlocksString in words[5:]:
                    try:
                        dp, blocks = missingBlocksString.split(":")
                        missingBlocksSet[dp] = set(blocks.split(","))
                    except:
                        lg.exc()
                        break
            if backupID not in remote_files():
                remote_files()[backupID] = {}
                # lg.out(6, 'backup_matrix.ReadRawListFiles new remote entry for %s created in the memory' % backupID)
            # +1 because range(2) give us [0,1] but we want [0,1,2]
            for blockNum in xrange(maxBlockNum + 1):
                if blockNum not in remote_files()[backupID]:
                    remote_files()[backupID][blockNum] = {
                        "D": [0] * contactsdb.num_suppliers(),
                        "P": [0] * contactsdb.num_suppliers(),
                    }
                for dataORparity in ["Data", "Parity"]:
                    # we set -1 if the file is missing and 1 if exist, so 0 mean "no info yet" ... smart!
                    bit = -1 if str(blockNum) in missingBlocksSet[dataORparity] else 1
                    remote_files()[backupID][blockNum][dataORparity[0]][supplierNum] = bit
                    newfiles += int((bit + 1) / 2)  # this should switch -1 or 1 to 0 or 1
            # save max block number for this backup
            if backupID not in remote_max_block_numbers():
                remote_max_block_numbers()[backupID] = -1
            if maxBlockNum > remote_max_block_numbers()[backupID]:
                remote_max_block_numbers()[backupID] = maxBlockNum
            # mark this backup to be repainted
            RepaintBackup(backupID)
    inpt.close()
    lg.out(
        8,
        "            old:%d, new:%d, backups2remove:%d, paths2remove:%d"
        % (oldfiles, newfiles, len(backups2remove), len(paths2remove)),
    )
    # return list of backupID's which is too old but stored on suppliers machines
    return backups2remove, paths2remove
예제 #28
0
 def _request_files(self):
     from storage import backup_matrix
     from customer import io_throttle
     from customer import data_sender
     self.missingPackets = 0
     # here we want to request some packets before we start working to
     # rebuild the missed blocks
     availableSuppliers = backup_matrix.GetActiveArray(customer_idurl=self.currentCustomerIDURL)
     # remember how many requests we did on this iteration
     total_requests_count = 0
     # at the moment I do download everything I have available and needed
     if '' in contactsdb.suppliers(customer_idurl=self.currentCustomerIDURL):
         lg.out(8, 'backup_rebuilder._request_files SKIP - empty supplier')
         self.automat('no-requests')
         return
     for supplierNum in range(contactsdb.num_suppliers(customer_idurl=self.currentCustomerIDURL)):
         supplierID = contactsdb.supplier(supplierNum, customer_idurl=self.currentCustomerIDURL)
         if not supplierID:
             continue
         requests_count = 0
         # we do requests in reverse order because we start rebuilding from
         # the last block
         for blockIndex in range(len(self.workingBlocksQueue) - 1, -1, -1):
             blockNum = self.workingBlocksQueue[blockIndex]
             # do not keep too many requests in the queue
             if io_throttle.GetRequestQueueLength(supplierID) >= 16:
                 break
             # also don't do too many requests at once
             if requests_count > 16:
                 break
             remoteData = backup_matrix.GetRemoteDataArray(
                 self.currentBackupID, blockNum)
             remoteParity = backup_matrix.GetRemoteParityArray(
                 self.currentBackupID, blockNum)
             localData = backup_matrix.GetLocalDataArray(
                 self.currentBackupID, blockNum)
             localParity = backup_matrix.GetLocalParityArray(
                 self.currentBackupID, blockNum)
             if supplierNum >= len(remoteData) or supplierNum >= len(remoteParity):
                 break
             if supplierNum >= len(localData) or supplierNum >= len(localParity):
                 break
             # if remote Data exist and is available because supplier is on-line,
             # but we do not have it on hand - do request
             if localData[supplierNum] == 0:
                 PacketID = packetid.MakePacketID(
                     self.currentBackupID, blockNum, supplierNum, 'Data')
                 if remoteData[supplierNum] == 1:
                     if availableSuppliers[supplierNum]:
                         # if supplier is not alive - we can't request from him
                         if not io_throttle.HasPacketInRequestQueue(supplierID, PacketID):
                             customer, remotePath = packetid.SplitPacketID(PacketID)
                             filename = os.path.join(
                                 settings.getLocalBackupsDir(),
                                 customer,
                                 remotePath,
                             )
                             if not os.path.exists(filename):
                                 if io_throttle.QueueRequestFile(
                                         self._file_received,
                                         my_id.getLocalID(),
                                         PacketID,
                                         my_id.getLocalID(),
                                         supplierID):
                                     requests_count += 1
                 else:
                     # count this packet as missing
                     self.missingPackets += 1
                     # also mark this guy as one who dont have any data - nor local nor remote
             else:
                 # but if local Data already exists, but was not sent - do it now
                 if remoteData[supplierNum] != 1:
                     data_sender.A('new-data')
             # same for Parity
             if localParity[supplierNum] == 0:
                 PacketID = packetid.MakePacketID(
                     self.currentBackupID, blockNum, supplierNum, 'Parity')
                 if remoteParity[supplierNum] == 1:
                     if availableSuppliers[supplierNum]:
                         if not io_throttle.HasPacketInRequestQueue(
                                 supplierID, PacketID):
                             customer, remotePath = packetid.SplitPacketID(PacketID)
                             filename = os.path.join(
                                 settings.getLocalBackupsDir(),
                                 customer,
                                 remotePath,
                             )
                             if not os.path.exists(filename):
                                 if io_throttle.QueueRequestFile(
                                     self._file_received,
                                     my_id.getLocalID(),
                                     PacketID,
                                     my_id.getLocalID(),
                                     supplierID,
                                 ):
                                     requests_count += 1
                 else:
                     self.missingPackets += 1
             else:
                 # but if local Parity already exists, but was not sent - do it now
                 if remoteParity[supplierNum] != 1:
                     data_sender.A('new-data')
         total_requests_count += requests_count
     if total_requests_count > 0:
         lg.out(8, 'backup_rebuilder._request_files : %d chunks requested' % total_requests_count)
         self.automat('requests-sent', total_requests_count)
     else:
         if self.missingPackets:
             lg.out(8, 'backup_rebuilder._request_files : found %d missing packets' % self.missingPackets)
             self.automat('found-missing')
         else:
             lg.out(8, 'backup_rebuilder._request_files : nothing was requested')
             self.automat('no-requests')
예제 #29
0
 def isSuppliersNumberChanged(self, *args, **kwargs):
     """
     Condition method.
     """
     return contactsdb.num_suppliers() != len(self.current_suppliers)
예제 #30
0
 def doCleanUpBackups(self, *args, **kwargs):
     # here we check all backups we have and remove the old one
     # user can set how many versions of that file or folder to keep
     # other versions (older) will be removed here
     from storage import backup_rebuilder
     try:
         self.backups_progress_last_iteration = len(
             backup_rebuilder.A().backupsWasRebuilt)
     except:
         self.backups_progress_last_iteration = 0
     versionsToKeep = settings.getBackupsMaxCopies()
     if not contactsdb.num_suppliers():
         bytesUsed = 0
     else:
         bytesUsed = backup_fs.sizebackups() / contactsdb.num_suppliers()
     bytesNeeded = diskspace.GetBytesFromString(settings.getNeededString(),
                                                0)
     customerGlobID = my_id.getGlobalID()
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_monitor.doCleanUpBackups backupsToKeep=%d used=%d needed=%d'
             % (versionsToKeep, bytesUsed, bytesNeeded))
     delete_count = 0
     if versionsToKeep > 0:
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             pathID = global_id.CanonicalID(pathID)
             if backup_control.IsPathInProcess(pathID):
                 continue
             versions = itemInfo.list_versions()
             # TODO: do we need to sort the list? it comes from a set, so must be sorted may be
             while len(versions) > versionsToKeep:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  versions.pop(0))
                 if _Debug:
                     lg.out(
                         _DebugLevel,
                         'backup_monitor.doCleanUpBackups %d of %d backups for %s, so remove older %s'
                         % (len(versions), versionsToKeep, localPath,
                            backupID))
                 backup_control.DeleteBackup(backupID,
                                             saveDB=False,
                                             calculate=False)
                 delete_count += 1
     # we need also to fit used space into needed space (given from other users)
     # they trust us - do not need to take extra space from our friends
     # so remove oldest backups, but keep at least one for every folder - at least locally!
     # still our suppliers will remove our "extra" files by their "local_tester"
     if bytesNeeded <= bytesUsed:
         sizeOk = False
         for pathID, localPath, itemInfo in backup_fs.IterateIDs():
             if sizeOk:
                 break
             pathID = global_id.CanonicalID(pathID)
             versions = itemInfo.list_versions(True, False)
             if len(versions) <= 1:
                 continue
             for version in versions[1:]:
                 backupID = packetid.MakeBackupID(customerGlobID, pathID,
                                                  version)
                 versionInfo = itemInfo.get_version_info(version)
                 if versionInfo[1] > 0:
                     if _Debug:
                         lg.out(
                             _DebugLevel,
                             'backup_monitor.doCleanUpBackups over use %d of %d, so remove %s of %s'
                             %
                             (bytesUsed, bytesNeeded, backupID, localPath))
                     backup_control.DeleteBackup(backupID,
                                                 saveDB=False,
                                                 calculate=False)
                     delete_count += 1
                     bytesUsed -= versionInfo[1]
                     if bytesNeeded > bytesUsed:
                         sizeOk = True
                         break
     if delete_count > 0:
         backup_fs.Scan()
         backup_fs.Calculate()
         backup_control.Save()
         from main import control
         control.request_update()
     collected = gc.collect()
     if self.backups_progress_last_iteration > 0:
         if _Debug:
             lg.out(
                 _DebugLevel,
                 'backup_monitor.doCleanUpBackups  sending "restart", backups_progress_last_iteration=%s'
                 % self.backups_progress_last_iteration)
         reactor.callLater(1, self.automat, 'restart')  # @UndefinedVariable
     if _Debug:
         lg.out(
             _DebugLevel,
             'backup_monitor.doCleanUpBackups collected %d objects' %
             collected)
예제 #31
0
def RequestListFilesAll():
    r = []
    for supi in range(contactsdb.num_suppliers()):
        r.append(SendRequestListFiles(supi))
    return r
예제 #32
0
    def doDecideToDismiss(self, *args, **kwargs):
        """
        Action method.
        """
        global _SuppliersToFire
        from p2p import p2p_connector
        from p2p import network_connector
        from customer import supplier_connector
        from p2p import online_status
        # take any actions only if I am connected to the network
        if not p2p_connector.A() or not network_connector.A():
            if _Debug:
                lg.out(
                    _DebugLevel,
                    'fire_hire.doDecideToDismiss    p2p_connector() is not ready yet, SKIP'
                )
            self.automat('made-decision', [])
            return
        if not network_connector.A():
            if _Debug:
                lg.out(
                    _DebugLevel,
                    'fire_hire.doDecideToDismiss    network_connector() is not ready yet, SKIP'
                )
            self.automat('made-decision', [])
            return
        if p2p_connector.A().state != 'CONNECTED' or network_connector.A(
        ).state != 'CONNECTED':
            if _Debug:
                lg.out(
                    _DebugLevel,
                    'fire_hire.doDecideToDismiss    p2p/network is not connected at the moment, SKIP'
                )
            self.automat('made-decision', [])
            return
        # if certain suppliers needs to be removed by manual/external request just do that
        to_be_fired = id_url.to_list(set(_SuppliersToFire))
        _SuppliersToFire = []
        if to_be_fired:
            lg.info('going to fire %d suppliers from external request' %
                    len(to_be_fired))
            self.automat('made-decision', to_be_fired)
            return
        # make sure to not go too far when i just want to decrease number of my suppliers
        number_desired = settings.getSuppliersNumberDesired()
        redundant_suppliers = set()
        if contactsdb.num_suppliers() > number_desired:
            for supplier_index in range(number_desired,
                                        contactsdb.num_suppliers()):
                idurl = contactsdb.supplier(supplier_index)
                if idurl:
                    lg.info('found REDUNDANT supplier %s at position %d' % (
                        idurl,
                        supplier_index,
                    ))
                    redundant_suppliers.add(idurl)
        if redundant_suppliers:
            result = list(redundant_suppliers)
            lg.info('will replace redundant suppliers: %s' % result)
            self.automat('made-decision', result)
            return
        # now I need to look more careful at my suppliers
        potentialy_fired = set()
        connected_suppliers = set()
        disconnected_suppliers = set()
        requested_suppliers = set()
        online_suppliers = set()
        offline_suppliers = set()
        # if you have some empty suppliers need to get rid of them,
        # but no need to dismiss anyone at the moment.
        my_suppliers = contactsdb.suppliers()
        if _Debug:
            lg.args(_DebugLevel, my_suppliers=my_suppliers)
        if id_url.is_some_empty(my_suppliers):
            lg.warn('SKIP, found empty supplier')
            self.automat('made-decision', [])
            return
        for supplier_idurl in my_suppliers:
            sc = supplier_connector.by_idurl(supplier_idurl)
            if not sc:
                lg.warn('SKIP, supplier connector for supplier %s not exist' %
                        supplier_idurl)
                continue
            if sc.state == 'NO_SERVICE':
                lg.warn('found "NO_SERVICE" supplier: %s' % supplier_idurl)
                disconnected_suppliers.add(supplier_idurl)
                potentialy_fired.add(supplier_idurl)
            elif sc.state == 'CONNECTED':
                connected_suppliers.add(supplier_idurl)
            elif sc.state in [
                    'DISCONNECTED',
                    'REFUSE',
            ]:
                disconnected_suppliers.add(supplier_idurl)
#             elif sc.state in ['QUEUE?', 'REQUEST', ]:
#                 requested_suppliers.add(supplier_idurl)
            if online_status.isOffline(supplier_idurl):
                offline_suppliers.add(supplier_idurl)
            elif online_status.isOnline(supplier_idurl):
                online_suppliers.add(supplier_idurl)
            elif online_status.isCheckingNow(supplier_idurl):
                requested_suppliers.add(supplier_idurl)
        if not connected_suppliers or not online_suppliers:
            lg.warn('SKIP, no ONLINE suppliers found at the moment')
            self.automat('made-decision', [])
            return
        if requested_suppliers:
            lg.warn('SKIP, still waiting response from some of suppliers')
            self.automat('made-decision', [])
            return
        if not disconnected_suppliers:
            if _Debug:
                lg.out(
                    _DebugLevel,
                    'fire_hire.doDecideToDismiss    SKIP, no OFFLINE suppliers found at the moment'
                )
            # TODO: add more conditions to fire "slow" suppliers - they are still connected but useless
            self.automat('made-decision', [])
            return
        if len(offline_suppliers) + len(online_suppliers) != number_desired:
            lg.warn('SKIP, offline + online != total count: %s %s %s' %
                    (offline_suppliers, online_suppliers, number_desired))
            self.automat('made-decision', [])
            return
        max_offline_suppliers_count = eccmap.GetCorrectableErrors(
            number_desired)
        if len(offline_suppliers) > max_offline_suppliers_count:
            lg.warn(
                'SKIP, too many OFFLINE suppliers at the moment : %d > %d' % (
                    len(offline_suppliers),
                    max_offline_suppliers_count,
                ))
            self.automat('made-decision', [])
            return
        critical_offline_suppliers_count = eccmap.GetFireHireErrors(
            number_desired)
        if len(offline_suppliers) >= critical_offline_suppliers_count and len(
                offline_suppliers) > 0:
            if config.conf().getBool(
                    'services/employer/replace-critically-offline-enabled'):
                # TODO: check that issue
                # too aggressive replacing suppliers who still have the data is very dangerous !!!
                one_dead_supplier = offline_suppliers.pop()
                lg.warn(
                    'found "CRITICALLY_OFFLINE" supplier %s, max offline limit is %d'
                    % (
                        one_dead_supplier,
                        critical_offline_suppliers_count,
                    ))
                potentialy_fired.add(one_dead_supplier)
        if not potentialy_fired:
            if _Debug:
                lg.out(
                    _DebugLevel,
                    'fire_hire.doDecideToDismiss   found no "bad" suppliers, all is good !!!!!'
                )
            self.automat('made-decision', [])
            return
        # only replace suppliers one by one at the moment
        result = list(potentialy_fired)
        lg.info('will replace supplier %s' % result[0])
        self.automat('made-decision', [
            result[0],
        ])
예제 #33
0
def GetBackupRemoteStats(backupID, only_available_files=True):
    """
    This method found a most "weak" block of that backup, this is a block which
    pieces is kept by less suppliers from all other blocks.

    This is needed to detect the whole backup availability.
    Because if you loose at least one block of the backup - you will loose the whole backup.!

    The backup condition is equal to the "worst" block condition.
    Return a tuple::

      (blocks, percent, weakBlock, weakBlockPercent)
    """
    if backupID not in remote_files():
        return 0, 0, 0, 0
    # get max block number
    # ??? maxBlockNum = remote_max_block_numbers().get(backupID, -1)
    maxBlockNum = GetKnownMaxBlockNum(backupID)
    if maxBlockNum == -1:
        return 0, 0, 0, 0
    supplierCount = contactsdb.num_suppliers()
    fileCounter = 0
    weakBlockNum = -1
    lessSuppliers = supplierCount
    activeArray = GetActiveArray()
    # we count all remote files for this backup - scan all blocks
    for blockNum in xrange(maxBlockNum + 1):
        if blockNum not in remote_files()[backupID].keys():
            lessSuppliers = 0
            weakBlockNum = blockNum
            continue
        goodSuppliers = supplierCount
        for supplierNum in xrange(supplierCount):
            if activeArray[supplierNum] != 1 and only_available_files:
                goodSuppliers -= 1
                continue
            try:
                remote_files()[backupID][blockNum]["D"][supplierNum]
                remote_files()[backupID][blockNum]["D"][supplierNum]
            except:
                goodSuppliers -= 1
                continue
            if (
                remote_files()[backupID][blockNum]["D"][supplierNum] != 1
                or remote_files()[backupID][blockNum]["P"][supplierNum] != 1
            ):
                goodSuppliers -= 1
            if remote_files()[backupID][blockNum]["D"][supplierNum] == 1:
                fileCounter += 1
            if remote_files()[backupID][blockNum]["P"][supplierNum] == 1:
                fileCounter += 1
        if goodSuppliers < lessSuppliers:
            lessSuppliers = goodSuppliers
            weakBlockNum = blockNum
    # +1 since zero based and *0.5 because Data and Parity
    return (
        maxBlockNum + 1,
        100.0 * 0.5 * fileCounter / ((maxBlockNum + 1) * supplierCount),
        weakBlockNum,
        100.0 * float(lessSuppliers) / float(supplierCount),
    )
예제 #34
0
 def doRequestMyListFiles(self, *args, **kwargs):
     """
     Action method.
     """
     self._do_request_list_files(contactsdb.num_suppliers())
예제 #35
0
 def isExistSomeSuppliers(self, arg):
     """
     Condition method.
     """
     return contactsdb.num_suppliers() > 0 and contactsdb.suppliers().count(
         '') < contactsdb.num_suppliers()
예제 #36
0
    def doDecideToDismiss(self, arg):
        """
        Action method.
        """
        global _SuppliersToFire
        to_be_fired = list(set(_SuppliersToFire))
        _SuppliersToFire = []
        if to_be_fired:
            lg.warn('going to fire %d suppliers from external request' %
                    len(to_be_fired))
            self.automat('made-decision', to_be_fired)
            return
        potentialy_fired = set()
        connected_suppliers = set()
        disconnected_suppliers = set()
        requested_suppliers = set()
        online_suppliers = set()
        offline_suppliers = set()
        redundant_suppliers = set()
        # if you have some empty suppliers need to get rid of them,
        # but no need to dismiss anyone at the moment.
        if '' in contactsdb.suppliers() or None in contactsdb.suppliers():
            lg.warn('SKIP, found empty supplier')
            self.automat('made-decision', [])
            return
        number_desired = settings.getSuppliersNumberDesired()
        for supplier_idurl in contactsdb.suppliers():
            sc = supplier_connector.by_idurl(supplier_idurl)
            if not sc:
                lg.warn('SKIP, supplier connector for supplier %s not exist' %
                        supplier_idurl)
                continue
            if sc.state == 'NO_SERVICE':
                lg.warn('found "NO_SERVICE" supplier: %s' % supplier_idurl)
                disconnected_suppliers.add(supplier_idurl)
                potentialy_fired.add(supplier_idurl)
            elif sc.state == 'CONNECTED':
                connected_suppliers.add(supplier_idurl)
            elif sc.state in [
                    'DISCONNECTED',
                    'REFUSE',
            ]:
                disconnected_suppliers.add(supplier_idurl)
#             elif sc.state in ['QUEUE?', 'REQUEST', ]:
#                 requested_suppliers.add(supplier_idurl)
            if contact_status.isOffline(supplier_idurl):
                offline_suppliers.add(supplier_idurl)
            elif contact_status.isOnline(supplier_idurl):
                online_suppliers.add(supplier_idurl)
            elif contact_status.isCheckingNow(supplier_idurl):
                requested_suppliers.add(supplier_idurl)
        if contactsdb.num_suppliers() > number_desired:
            for supplier_index in range(number_desired,
                                        contactsdb.num_suppliers()):
                idurl = contactsdb.supplier(supplier_index)
                if idurl:
                    lg.warn('found "REDUNDANT" supplier %s at position %d' % (
                        idurl,
                        supplier_index,
                    ))
                    potentialy_fired.add(idurl)
                    redundant_suppliers.add(idurl)
                else:
                    lg.warn('supplier at position %d not exist' %
                            supplier_index)
        if not connected_suppliers or not online_suppliers:
            lg.warn('SKIP, no ONLINE suppliers found at the moment')
            self.automat('made-decision', [])
            return
        if requested_suppliers:
            lg.warn('SKIP, still waiting response from some of suppliers')
            self.automat('made-decision', [])
            return
        if redundant_suppliers:
            result = list(redundant_suppliers)
            lg.info('will replace redundant suppliers: %s' % result)
            self.automat('made-decision', result)
            return
        if not disconnected_suppliers:
            lg.warn('SKIP, no OFFLINE suppliers found at the moment')
            # TODO: add more conditions to fire "slow" suppliers
            self.automat('made-decision', [])
            return
        if len(offline_suppliers) + len(online_suppliers) != number_desired:
            lg.warn('SKIP, offline + online != total count: %s %s %s' %
                    (offline_suppliers, online_suppliers, number_desired))
            self.automat('made-decision', [])
            return
        from raid import eccmap
        max_offline_suppliers_count = eccmap.GetCorrectableErrors(
            number_desired)
        if len(offline_suppliers) > max_offline_suppliers_count:
            lg.warn(
                'SKIP, too many OFFLINE suppliers at the moment : %d > %d' % (
                    len(offline_suppliers),
                    max_offline_suppliers_count,
                ))
            self.automat('made-decision', [])
            return
        critical_offline_suppliers_count = eccmap.GetFireHireErrors(
            number_desired)
        # TODO:  temporary disabled because of an issue: too aggressive replacing suppliers who still have the data
        if False:  # len(offline_suppliers) >= critical_offline_suppliers_count:
            one_dead_supplier = offline_suppliers.pop()
            lg.warn(
                'found "CRITICALLY_OFFLINE" supplier %s, max offline limit is %d'
                % (
                    one_dead_supplier,
                    critical_offline_suppliers_count,
                ))
            potentialy_fired.add(one_dead_supplier)
        if not potentialy_fired:
            lg.out(
                6,
                'fire_hire.doDecideToDismiss   found no "bad" suppliers, all is good !!!!!'
            )
            self.automat('made-decision', [])
            return
        # only replace suppliers one by one at the moment
        result = list(potentialy_fired)
        lg.info('will replace supplier %s' % result[0])
        self.automat('made-decision', [
            result[0],
        ])