def set_key_id(self, key_id): self.keyID = key_id self.keyAlias = packetid.KeyAlias(self.keyID) self.fullGlobPath = global_id.MakeGlobalID( customer=self.customerGlobID, key_alias=self.keyAlias, path=self.remotePath) self.fullCustomerID = global_id.MakeGlobalID( customer=self.customerGlobID, key_alias=self.keyAlias)
def DeletePathBackups(pathID, removeLocalFilesToo=True, saveDB=True, calculate=True): """ This removes all backups of given path ID Doing same operations as ``DeleteBackup()``. """ from storage import backup_rebuilder from stream import io_throttle pathID = global_id.CanonicalID(pathID) # get the working item customer, remotePath = packetid.SplitPacketID(pathID) key_alias = packetid.KeyAlias(customer) customer_idurl = global_id.GlobalUserToIDURL(customer) item = backup_fs.GetByID(remotePath, iterID=backup_fs.fsID(customer_idurl, key_alias)) if item is None: return False if _Debug: lg.out(_DebugLevel, 'backup_control.DeletePathBackups ' + pathID) # this is a list of all known backups of this path versions = item.list_versions() for version in versions: backupID = packetid.MakeBackupID(customer, remotePath, version) if _Debug: lg.out(_DebugLevel, ' removing %s' % backupID) # abort backup if it just started and is running at the moment AbortRunningBackup(backupID) # if we requested for files for this backup - we do not need it anymore io_throttle.DeleteBackupRequests(backupID) io_throttle.DeleteBackupSendings(backupID) # remove interests in transport_control # callback.delete_backup_interest(backupID) # remove local files for this backupID if removeLocalFilesToo: backup_fs.DeleteLocalBackup(settings.getLocalBackupsDir(), backupID) # remove remote info for this backup from the memory backup_matrix.EraseBackupRemoteInfo(backupID) # also remove local info backup_matrix.EraseBackupLocalInfo(backupID) # finally remove this backup from the index item.delete_version(version) # lg.out(8, 'backup_control.DeletePathBackups ' + backupID) # stop any rebuilding, we will restart it soon backup_rebuilder.RemoveAllBackupsToWork() backup_rebuilder.SetStoppedFlag() # check and calculate used space if calculate: backup_fs.Scan() backup_fs.Calculate() # save the index if needed if saveDB: Save() control.request_update() return True
def OnFoundFolderSize(pth, sz, arg): """ This is a callback, fired from ``lib.dirsize.ask()`` method after finish calculating of folder size. """ try: pathID, version = arg customerGlobID, pathID = packetid.SplitPacketID(pathID) customerIDURL = global_id.GlobalUserToIDURL(customerGlobID) keyAlias = packetid.KeyAlias(customerGlobID) item = backup_fs.GetByID(pathID, iterID=backup_fs.fsID(customerIDURL, keyAlias)) if item: item.set_size(sz) backup_fs.Calculate() Save() if version: backupID = packetid.MakeBackupID(customerGlobID, pathID, version) job = GetRunningBackupObject(backupID) if job: job.totalSize = sz if _Debug: lg.out(_DebugLevel, 'backup_control.OnFoundFolderSize %s %d' % (backupID, sz)) except: lg.exc()
def doScanAndQueue(self, *args, **kwargs): """ Action method. """ global _ShutdownFlag if _ShutdownFlag: if _Debug: lg.out(_DebugLevel, 'data_sender.doScanAndQueue _ShutdownFlag is True\n') self.automat('scan-done', 0) return from storage import backup_matrix from storage import backup_fs backup_matrix.ReadLocalFiles() progress = 0 # if _Debug: # lg.out(_DebugLevel, 'data_sender.doScanAndQueue with %d known customers' % len(contactsdb.known_customers())) for customer_idurl in contactsdb.known_customers(): if customer_idurl != my_id.getIDURL(): # TODO: check that later if _Debug: lg.out( _DebugLevel + 2, 'data_sender.doScanAndQueue skip sending to another customer: %r' % customer_idurl) continue known_suppliers = contactsdb.suppliers(customer_idurl) if not known_suppliers or id_url.is_some_empty(known_suppliers): if _Debug: lg.out( _DebugLevel, 'data_sender.doScanAndQueue found empty supplier(s) for customer %r, SKIP' % customer_idurl) continue known_backups = misc.sorted_backup_ids( list(backup_matrix.local_files().keys()), True) if _Debug: lg.out( _DebugLevel, 'data_sender.doScanAndQueue found %d known suppliers for customer %r with %d backups' % (len(known_suppliers), customer_idurl, len(known_backups))) for backupID in known_backups: this_customer_idurl = packetid.CustomerIDURL(backupID) if this_customer_idurl != customer_idurl: continue customerGlobalID, pathID, _ = packetid.SplitBackupID( backupID, normalize_key_alias=True) keyAlias = packetid.KeyAlias(customerGlobalID) item = backup_fs.GetByID(pathID, iterID=backup_fs.fsID( customer_idurl, keyAlias)) if not item: if _Debug: lg.out( _DebugLevel, 'data_sender.doScanAndQueue skip sending backup %r path not exist in catalog' % backupID) continue if item.key_id and customerGlobalID and customerGlobalID != item.key_id: if _Debug: lg.out( _DebugLevel, 'data_sender.doScanAndQueue skip sending backup %r key is different in the catalog: %r ~ %r' % ( backupID, customerGlobalID, item.key_id, )) continue packetsBySupplier = backup_matrix.ScanBlocksToSend( backupID, limit_per_supplier=None) total_for_customer = sum( [len(v) for v in packetsBySupplier.values()]) if total_for_customer: if _Debug: lg.out( _DebugLevel, 'data_sender.doScanAndQueue sending %r for customer %r with %d pieces' % (item.name(), customer_idurl, total_for_customer)) for supplierNum in packetsBySupplier.keys(): # supplier_idurl = contactsdb.supplier(supplierNum, customer_idurl=customer_idurl) if supplierNum >= 0 and supplierNum < len( known_suppliers): supplier_idurl = known_suppliers[supplierNum] else: supplier_idurl = None if not supplier_idurl: lg.warn( 'skip sending, unknown supplier_idurl supplierNum=%s for %s, customer_idurl=%r' % (supplierNum, backupID, customer_idurl)) continue for packetID in packetsBySupplier[supplierNum]: backupID_, _, supplierNum_, _ = packetid.BidBnSnDp( packetID) if backupID_ != backupID: lg.warn( 'skip sending, unexpected backupID supplierNum=%s for %s, customer_idurl=%r' % (packetID, backupID, customer_idurl)) continue if supplierNum_ != supplierNum: lg.warn( 'skip sending, unexpected supplierNum %s for %s, customer_idurl=%r' % (packetID, backupID, customer_idurl)) continue if io_throttle.HasPacketInSendQueue( supplier_idurl, packetID): if _Debug: lg.out( _DebugLevel, 'data_sender.doScanAndQueue %s already in sending queue for %r' % (packetID, supplier_idurl)) continue latest_progress = self.statistic.get( supplier_idurl, {}).get('latest', '') if len(latest_progress ) >= 3 and latest_progress.endswith('---'): if _Debug: lg.out( _DebugLevel + 2, 'data_sender.doScanAndQueue skip sending to supplier %r because multiple packets already failed' % supplier_idurl) continue if not io_throttle.OkToSend(supplier_idurl): if _Debug: lg.out( _DebugLevel + 2, 'data_sender.doScanAndQueue skip sending, queue is busy for %r' % supplier_idurl) continue customerGlobalID, pathID = packetid.SplitPacketID( packetID) filename = os.path.join( settings.getLocalBackupsDir(), customerGlobalID, pathID, ) if not os.path.isfile(filename): if _Debug: lg.out( _DebugLevel, 'data_sender.doScanAndQueue %s is not a file' % filename) continue itemInfo = item.to_json() if io_throttle.QueueSendFile( filename, packetID, supplier_idurl, my_id.getIDURL(), lambda packet, ownerID, packetID: self._packetAcked( packet, ownerID, packetID, itemInfo), lambda remoteID, packetID, why: self._packetFailed( remoteID, packetID, why, itemInfo), ): progress += 1 if _Debug: lg.out( _DebugLevel, 'data_sender.doScanAndQueue for %r put %s in the queue progress=%d' % ( item.name(), packetID, progress, )) else: if _Debug: lg.out( _DebugLevel, 'data_sender.doScanAndQueue io_throttle.QueueSendFile FAILED %s' % packetID) if _Debug: lg.out(_DebugLevel, 'data_sender.doScanAndQueue progress=%s' % progress) self.automat('scan-done', progress)