def _processRecoveryInterest(self, interest, syncDigest, face): logging.getLogger(__name__).info("processRecoveryInterest") if self._logFind(syncDigest) != -1: tempContent = SyncStateMsg() for i in range(self._digestTree.size()): content = getattr(tempContent, "ss").add() content.name = self._digestTree.get(i).getDataPrefix() content.type = SyncState_UPDATE content.seqno.seq = self._digestTree.get(i).getSequenceNo() content.seqno.session = self._digestTree.get(i).getSessionNo() if len(getattr(tempContent, "ss")) != 0: # TODO: Check if this works in Python 3. #pylint: disable=E1103 array = tempContent.SerializeToString() #pylint: enable=E1103 data = Data(interest.getName()) data.setContent(Blob(array)) if interest.getName().get(-1).toEscapedString() == "00": # Limit the lifetime of replies to interest for "00" since # they can be different. data.getMetaInfo().setFreshnessPeriod(1000) self._keyChain.sign(data, self._certificateName) try: face.putData(data) except Exception as ex: logging.getLogger(__name__).error( "Error in face.putData: %s", str(ex)) return logging.getLogger(__name__).info("send recovery data back") logging.getLogger(__name__).info("%s", interest.getName().toUri())
def _onData(self, interest, data): """ Process Sync Data. """ if not self._enabled: # Ignore callbacks after the application calls shutdown(). return logging.getLogger(__name__).info( "Sync ContentObject received in callback") logging.getLogger(__name__).info("name: %s", data.getName().toUri()) # TODO: Check if this works in Python 3. tempContent = SyncStateMsg() #pylint: disable=E1103 tempContent.ParseFromString(data.getContent().toBytes()) #pylint: enable=E1103 content = getattr(tempContent, "ss") if self._digestTree.getRoot() == "00": isRecovery = True #processing initial sync data self._initialOndata(content) else: self._update(content) if (interest.getName().size() == self._applicationBroadcastPrefix.size() + 2): # Assume this is a recovery interest. isRecovery = True else: isRecovery = False # Send the interests to fetch the application data. syncStates = [] for i in range(len(content)): syncState = content[i] # Only report UPDATE sync states. if syncState.type == SyncState_UPDATE: if len(syncState.application_info) > 0: applicationInfo = Blob(syncState.application_info, True) else: applicationInfo = Blob() syncStates.append( self.SyncState(syncState.name, syncState.seqno.session, syncState.seqno.seq, applicationInfo)) try: self._onReceivedSyncState(syncStates, isRecovery) except: logging.exception("Error in onReceivedSyncState") name = Name(self._applicationBroadcastPrefix) name.append(self._digestTree.getRoot()) syncInterest = Interest(name) syncInterest.setInterestLifetimeMilliseconds(self._syncLifetime) self._face.expressInterest(syncInterest, self._onData, self._syncTimeout) logging.getLogger(__name__).info("Syncinterest expressed:") logging.getLogger(__name__).info("%s", name.toUri())
def publishNextSequenceNo(self, applicationInfo=None): """ Increment the sequence number, create a sync message with the new sequence number and publish a data packet where the name is the applicationBroadcastPrefix + the root digest of the current digest tree. Then add the sync message to the digest tree and digest log which creates a new root digest. Finally, express an interest for the next sync update with the name applicationBroadcastPrefix + the new root digest. After this, your application should publish the content for the new sequence number. You can get the new sequence number with getSequenceNo(). Note: Your application must call processEvents. Since processEvents modifies the internal ChronoSync data structures, your application should make sure that it calls processEvents in the same thread as publishNextSequenceNo() (which also modifies the data structures). :param Blob applicationInfo: (optional) This appends applicationInfo to the content of the sync messages. This same info is provided to the receiving application in the SyncState state object provided to the onReceivedSyncState callback. """ applicationInfo = (applicationInfo if isinstance( applicationInfo, Blob) else Blob(applicationInfo)) self._sequenceNo += 1 syncMessage = SyncStateMsg() content = getattr(syncMessage, "ss").add() content.name = self._applicationDataPrefixUri content.type = SyncState_UPDATE content.seqno.seq = self._sequenceNo content.seqno.session = self._sessionNo if not applicationInfo.isNull() and applicationInfo.size() > 0: content.application_info = applicationInfo.toBytes() self._broadcastSyncState(self._digestTree.getRoot(), syncMessage) if not self._update(getattr(syncMessage, "ss")): # Since we incremented the sequence number, we expect there to be a # new digest log entry. raise RuntimeError( "ChronoSync: update did not create a new digest log entry") # TODO: Should we have an option to not express an interest if this is the # final publish of the session? interest = Interest(self._applicationBroadcastPrefix) interest.getName().append(self._digestTree.getRoot()) interest.setInterestLifetimeMilliseconds(self._syncLifetime) self._face.expressInterest(interest, self._onData, self._syncTimeout)
def _initialTimeOut(self, interest): """ Initial sync interest timeout, which means there are no other publishers yet. """ if not self._enabled: # Ignore callbacks after the application calls shutdown(). return logging.getLogger(__name__).info("initial sync timeout") logging.getLogger(__name__).info("no other people") self._sequenceNo += 1 if self._sequenceNo != 0: # Since there were no other users, we expect sequence no 0. raise RuntimeError( "ChronoSync: sequenceNo_ is not the expected value of 0 for first use." ) tempContent = SyncStateMsg() content = getattr(tempContent, "ss").add() content.name = self._applicationDataPrefixUri content.type = SyncState_UPDATE content.seqno.seq = self._sequenceNo content.seqno.session = self._sessionNo self._update(getattr(tempContent, "ss")) try: self._onInitialized() except: logging.exception("Error in onInitialized") name = Name(self._applicationBroadcastPrefix) name.append(self._digestTree.getRoot()) retryInterest = Interest(name) retryInterest.setInterestLifetimeMilliseconds(self._syncLifetime) self._face.expressInterest(retryInterest, self._onData, self._syncTimeout) logging.getLogger(__name__).info("Syncinterest expressed:") logging.getLogger(__name__).info("%s", name.toUri())
def __init__(self, onReceivedSyncState, onInitialized, applicationDataPrefix, applicationBroadcastPrefix, sessionNo, face, keyChain, certificateName, syncLifetime, onRegisterFailed): self._onReceivedSyncState = onReceivedSyncState self._onInitialized = onInitialized self._applicationDataPrefixUri = applicationDataPrefix.toUri() self._applicationBroadcastPrefix = Name(applicationBroadcastPrefix) self._sessionNo = sessionNo self._face = face self._keyChain = keyChain self._certificateName = Name(certificateName) self._syncLifetime = syncLifetime self._contentCache = MemoryContentCache(face) self._digestLog = [] # of _DigestLogEntry self._digestTree = DigestTree() self._sequenceNo = -1 self._enabled = True emptyContent = SyncStateMsg() # Use getattr to avoid pylint errors. self._digestLog.append( self._DigestLogEntry("00", getattr(emptyContent, "ss"))) # Register the prefix with the contentCache_ and use our own onInterest # as the onDataNotFound fallback. self._contentCache.registerPrefix(self._applicationBroadcastPrefix, onRegisterFailed, self._onInterest) interest = Interest(self._applicationBroadcastPrefix) interest.getName().append("00") interest.setInterestLifetimeMilliseconds(1000) interest.setMustBeFresh(True) face.expressInterest(interest, self._onData, self._initialTimeOut) logging.getLogger(__name__).info("initial sync expressed") logging.getLogger(__name__).info("%s", interest.getName().toUri())
def _initialOndata(self, content): """ Process initial data which usually includes all other publisher's info, and send back the new comer's own info. """ # The user is a new comer and receive data of all other people in the group. self._update(content) digest = self._digestTree.getRoot() for i in range(len(content)): syncState = content[i] if (syncState.name == self._applicationDataPrefixUri and syncState.seqno.session == self._sessionNo): # If the user was an old comer, after add the static log he # needs to increase his sequence number by 1. tempContent = SyncStateMsg() # Use getattr to avoid pylint errors. content2 = getattr(tempContent, "ss").add() content2.name = self._applicationDataPrefixUri content2.type = SyncState_UPDATE content2.seqno.seq = syncState.seqno.seq + 1 content2.seqno.session = self._sessionNo if self._update(getattr(tempContent, "ss")): try: self._onInitialized() except: logging.exception("Error in onInitialized") tempContent2 = SyncStateMsg() if self._sequenceNo >= 0: # Send the data packet with the new sequence number back. content2 = getattr(tempContent2, "ss").add() content2.name = self._applicationDataPrefixUri content2.type = SyncState_UPDATE content2.seqno.seq = self._sequenceNo content2.seqno.session = self._sessionNo else: content2 = getattr(tempContent2, "ss").add() content2.name = self._applicationDataPrefixUri content2.type = SyncState_UPDATE content2.seqno.seq = 0 content2.seqno.session = self._sessionNo self._broadcastSyncState(digest, tempContent2) if (self._digestTree.find(self._applicationDataPrefixUri, self._sessionNo) == -1): # The user hasn't put himself in the digest tree. logging.getLogger(__name__).info("initial state") self._sequenceNo += 1 tempContent = SyncStateMsg() content2 = getattr(tempContent, "ss").add() content2.name = self._applicationDataPrefixUri content2.type = SyncState_UPDATE content2.seqno.seq = self._sequenceNo content2.seqno.session = self._sessionNo if self._update(getattr(tempContent, "ss")): try: self._onInitialized() except: logging.exception("Error in onInitialized")
def _processSyncInterest(self, index, syncDigest, face): """ Common interest processing, using digest log to find the difference after syncDigest. :return: True if sent a data packet to satisfy the interest, otherwise False. :rtype: bool """ nameList = [] # of str sequenceNoList = [] # of int sessionNoList = [] # of int for j in range(index + 1, len(self._digestLog)): temp = self._digestLog[j].getData( ) # array of sync_state_pb2.SyncState. for i in range(len(temp)): syncState = temp[i] if syncState.type != SyncState_UPDATE: continue if self._digestTree.find(syncState.name, syncState.seqno.session) != -1: n = -1 for k in range(len(nameList)): if nameList[k] == syncState.name: n = k break if n == -1: nameList.append(syncState.name) sequenceNoList.append(syncState.seqno.seq) sessionNoList.append(syncState.seqno.session) else: sequenceNoList[n] = syncState.seqno.seq sessionNoList[n] = syncState.seqno.session tempContent = SyncStateMsg() for i in range(len(nameList)): content = getattr(tempContent, "ss").add() content.name = nameList[i] content.type = SyncState_UPDATE content.seqno.seq = sequenceNoList[i] content.seqno.session = sessionNoList[i] sent = False if len(getattr(tempContent, "ss")) != 0: name = Name(self._applicationBroadcastPrefix) name.append(syncDigest) # TODO: Check if this works in Python 3. #pylint: disable=E1103 array = tempContent.SerializeToString() #pylint: enable=E1103 data = Data(name) data.setContent(Blob(array)) self._keyChain.sign(data, self._certificateName) try: face.putData(data) except Exception as ex: logging.getLogger(__name__).error("Error in face.putData: %s", str(ex)) return sent = True logging.getLogger(__name__).info("Sync Data send") logging.getLogger(__name__).info("%s", name.toUri()) return sent