Exemplo n.º 1
0
 def setupComplete(self):
     # The cache will clear old values every 100s
     self._dataCache = MemoryContentCache(self.face, 100000)
     self._dataPrefix = Name(self.prefix).append('data')
     self.registerCachePrefix()
     print "Serving data at {}".format(self._dataPrefix.toUri())
     self.loop.call_soon(self.publishData)
Exemplo n.º 2
0
    def __init__(self, onReceivedSyncState, onInitialized,
      applicationDataPrefix, applicationBroadcastPrefix, sessionNo, face,
      keyChain, certificateName, syncLifetime, onRegisterFailed):
        self._onReceivedSyncState = onReceivedSyncState
        self._onInitialized = onInitialized
        self._applicationDataPrefixUri = applicationDataPrefix.toUri()
        self._applicationBroadcastPrefix = Name(applicationBroadcastPrefix)
        self._sessionNo = sessionNo
        self._face = face
        self._keyChain = keyChain
        self._certificateName = Name(certificateName)
        self._syncLifetime = syncLifetime
        self._contentCache = MemoryContentCache(face)

        self._digestLog = [] # of _DigestLogEntry
        self._digestTree = DigestTree()
        self._sequenceNo = -1
        self._enabled = True

        emptyContent = sync_state_pb2.SyncStateMsg()
        # Use getattr to avoid pylint errors.
        self._digestLog.append(self._DigestLogEntry("00", getattr(emptyContent, "ss")))

        # Register the prefix with the contentCache_ and use our own onInterest
        #   as the onDataNotFound fallback.
        self._contentCache.registerPrefix(
          self._applicationBroadcastPrefix, onRegisterFailed, self._onInterest)

        interest = Interest(self._applicationBroadcastPrefix)
        interest.getName().append("00")
        interest.setInterestLifetimeMilliseconds(1000)
        interest.setMustBeFresh(True)
        face.expressInterest(interest, self._onData, self._initialTimeOut)
        logging.getLogger(__name__).info("initial sync expressed")
        logging.getLogger(__name__).info("%s", interest.getName().toUri())
def main():
    # Params parsing
    parser = argparse.ArgumentParser(
        description=
        'bms gateway node to Parse or follow Cascade Datahub log and publish to MiniNdn.'
    )
    parser.add_argument('filename', help='datahub log file')
    parser.add_argument('-f',
                        dest='follow',
                        action='store_true',
                        help='follow (tail -f) the log file')
    parser.add_argument('--namespace',
                        default='/ndn/nist/bms',
                        help='root of ndn name, no trailing slash')

    parser.add_argument('--image',
                        dest='image',
                        default='../simulator/res/floor2.jpg',
                        help='the floor plan to publish')
    parser.add_argument('--location',
                        dest='location',
                        default='../simulator/res/locations.txt',
                        help='the floor plan to publish')

    args = parser.parse_args()

    # Setup logging
    logger = Logger()
    logger.prepareLogging()

    # Face, KeyChain, memoryContentCache and asio event loop initialization
    loop = asyncio.get_event_loop()
    face = ThreadsafeFace(loop)

    keyChain = KeyChain(IdentityManager(BasicIdentityStorage()))
    # For the gateway publisher, we create one identity for it to sign nfd command interests
    certificateName = keyChain.createIdentityAndCertificate(
        Name("/ndn/nist/gateway"))
    face.setCommandSigningInfo(keyChain, certificateName)
    cache = MemoryContentCache(face)

    dataPublisher = DataPublisher(face, keyChain, loop, cache, args.namespace,
                                  args.image, args.location)
    cache.registerPrefix(Name(args.namespace), dataPublisher.onRegisterFailed,
                         dataPublisher.onDataNotFound)
    loop.run_until_complete(dataPublisher.publishFloorImage())

    if args.follow:
        #asyncio.async(loop.run_in_executor(executor, followfile, args.filename, args.namespace, cache))
        loop.run_until_complete(dataPublisher.followfile(args.filename))
    else:
        loop.run_until_complete(dataPublisher.readfile(args.filename))

    loop.run_forever()
    face.shutdown()
Exemplo n.º 4
0
class AppProducer():
    def __init__(self, face, certificateName, keyChain, dataPrefix, security = False):
        self._keyChain = keyChain
        self._certificateName = certificateName
        self._face = face
        self._dataPrefix = dataPrefix
        self._security = security
        return

    def start(self):
        self._dataCache = MemoryContentCache(self._face, 100000)
        self.registerCachePrefix()
        print "Serving data at {}".format(self._dataPrefix.toUri())
        return

    def registerCachePrefix(self):
        self._dataCache.registerPrefix(self._dataPrefix, self.cacheRegisterFail, self.onDataMissing)

    def cacheRegisterFail(self, interest):
        # just try again
        self.log.warn('Could not register data cache')
        self.registerCachePrefix()

    def onDataMissing(self, prefix, interest, face, interestFilterId, filter):
        print "data not found for " + interest.getName().toUri()
        # let it timeout

    def onBtleData(self, data):
        # expect data format like "0.2,0.1,0.3"
        content = data.getContent().toRawStr()
        print "got data: " + data.getName().toUri() + " : " + content

        
        if self._security:
            # Hmac verify the data we receive
            pass

        pyr = content.split(',')
        if len(pyr) >= 3:
            resultingContent = "{\"p\":" + pyr[0] + ",\"y\":" + pyr[1] + ",\"r\":" + pyr[2] + "}"
            timestamp = time.time() * 1000
            dataOut = Data(Name(self._dataPrefix).appendVersion(int(timestamp)))
            dataOut.setContent(resultingContent)
            dataOut.getMetaInfo().setFreshnessPeriod(10000)
            self._keyChain.sign(dataOut, self._certificateName)

            self._dataCache.add(dataOut)
            print "data added: " + dataOut.getName().toUri()

    def makePublicKeyInterest(self):
        interest = Interest(Name("/"))
        interest.getName().append(self._keyChain.getCertificate(self._certificateName).getPublicKeyInfo().getKeyDer())
        return interest
Exemplo n.º 5
0
def main():
    # Params parsing
    parser = argparse.ArgumentParser(
        description=
        'bms gateway node to Parse or follow Cascade Datahub log and publish to MiniNdn.'
    )
    parser.add_argument('filename', help='datahub log file')
    parser.add_argument('-f',
                        dest='follow',
                        action='store_true',
                        help='follow (tail -f) the log file')
    parser.add_argument('--namespace',
                        default='/ndn/edu/ucla/remap/bms',
                        help='root of ndn name, no trailing slash')
    args = parser.parse_args()

    # Setup logging
    logger = Logger()
    logger.prepareLogging()

    # Face, KeyChain, memoryContentCache and asio event loop initialization
    loop = asyncio.get_event_loop()
    face = ThreadsafeFace(loop, "128.97.98.7")

    keyChain = KeyChain(
        IdentityManager(BasicIdentityStorage(), FilePrivateKeyStorage()))
    # For the gateway publisher, we create one identity for it to sign nfd command interests
    #certificateName = keyChain.createIdentityAndCertificate(Name("/ndn/bms/gateway-publisher"))
    face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName())
    print "Using certificate name " + keyChain.getDefaultCertificateName(
    ).toUri()
    cache = MemoryContentCache(face)

    dataPublisher = DataPublisher(face, keyChain, loop, cache, args.namespace)
    cache.registerPrefix(Name(args.namespace), dataPublisher.onRegisterFailed,
                         dataPublisher.onDataNotFound)

    # Parse csv to decide the mapping between sensor JSON -> <NDN name, data type>
    dataPublisher.populateSensorNDNDictFromCSV(
        'bms-sensor-data-types-sanitized.csv')

    loop.call_later(dataPublisher._restartInterval, dataPublisher.checkAlive)
    if args.follow:
        #asyncio.async(loop.run_in_executor(executor, followfile, args.filename, args.namespace, cache))
        loop.run_until_complete(dataPublisher.followfile(args.filename))
    else:
        loop.run_until_complete(dataPublisher.readfile(args.filename))

    loop.run_forever()
    face.shutdown()
Exemplo n.º 6
0
class AppProducer():
    def __init__(self, face, certificateName, keyChain, dataPrefix):
        self._keyChain = keyChain
        self._certificateName = certificateName
        self._face = face
        self._dataPrefix = dataPrefix
        return

    def start(self):
        self._dataCache = MemoryContentCache(self._face, 100000)
        self.registerCachePrefix()
        print "Serving data at {}".format(self._dataPrefix.toUri())
        self._face.callLater(5000, self.publishData)
        return

    def registerCachePrefix(self):
        self._dataCache.registerPrefix(self._dataPrefix, self.cacheRegisterFail, self.onDataMissing)

    def cacheRegisterFail(self, interest):
        # just try again
        self.log.warn('Could not register data cache')
        self.registerCachePrefix()

    def onDataMissing(self, prefix, interest, face, interestFilterId, filter):
        print "data not found for " + interest.getName().toUri()
        # let it timeout

    def publishData(self):
        timestamp = time.time() 
        cpu_use = ps.cpu_percent()
        users = [u.name for u in ps.users()]
        nProcesses = len(ps.pids())
        memUse = ps.virtual_memory().percent
        swapUse = ps.swap_memory().percent

        info = {'cpu_usage':cpu_use, 'users':users, 'processes':nProcesses,
                 'memory_usage':memUse, 'swap_usage':swapUse}
    
        dataOut = Data(Name(self._dataPrefix).appendVersion(int(timestamp)))
        dataOut.setContent(json.dumps(info))
        dataOut.getMetaInfo().setFreshnessPeriod(10000)
        self._keyChain.sign(dataOut, self._certificateName)

        self._dataCache.add(dataOut)
        print "data added: " + dataOut.getName().toUri()

        # repeat every 5 seconds
        self._face.callLater(5000, self.publishData)
Exemplo n.º 7
0
def main():

    # COMMAND LINE ARGS
    parser = argparse.ArgumentParser(
        description='Parse or follow Cascade Datahub log and publish to NDN.')
    parser.add_argument('filename', help='datahub log file')
    parser.add_argument('-f',
                        dest='follow',
                        action='store_true',
                        help='follow (tail -f) the log file')
    parser.add_argument('--namespace',
                        default='/ndn/edu/ucla/remap/bms',
                        help='root ndn name, no trailing slash')
    args = parser.parse_args()

    # NDN
    global face, keychain
    loop = asyncio.get_event_loop()
    face = ThreadsafeFace(loop, "localhost")

    keychain = KeyChain(
        IdentityManager(
            BasicIdentityStorage(),
            FilePrivateKeyStorage()))  # override default even for MacOS
    cache = MemoryContentCache(face)

    # READ THE FILE (MAIN LOOP)
    if args.follow:
        loop.run_until_complete(
            followfile(args.filename, args.namespace, cache))
    else:
        loop.run_until_complete(readfile(args.filename, args.namespace, cache))

    face.shutdown()
Exemplo n.º 8
0
    def __init__(self, onReceivedSyncState, onInitialized,
      applicationDataPrefix, applicationBroadcastPrefix, sessionNo, face,
      keyChain, certificateName, syncLifetime, onRegisterFailed):
        self._onReceivedSyncState = onReceivedSyncState
        self._onInitialized = onInitialized
        self._applicationDataPrefixUri = applicationDataPrefix.toUri()
        self._applicationBroadcastPrefix = Name(applicationBroadcastPrefix)
        self._sessionNo = sessionNo
        self._face = face
        self._keyChain = keyChain
        self._certificateName = Name(certificateName)
        self._syncLifetime = syncLifetime
        self._contentCache = MemoryContentCache(face)

        self._digestLog = [] # of _DigestLogEntry
        self._digestTree = DigestTree()
        self._sequenceNo = -1
        self._enabled = True

        emptyContent = SyncStateMsg()
        # Use getattr to avoid pylint errors.
        self._digestLog.append(self._DigestLogEntry("00", getattr(emptyContent, "ss")))

        # Register the prefix with the contentCache_ and use our own onInterest
        #   as the onDataNotFound fallback.
        self._contentCache.registerPrefix(
          self._applicationBroadcastPrefix, onRegisterFailed, self._onInterest)

        interest = Interest(self._applicationBroadcastPrefix)
        interest.getName().append("00")
        interest.setInterestLifetimeMilliseconds(1000)
        interest.setMustBeFresh(True)
        face.expressInterest(interest, self._onData, self._initialTimeOut)
        logging.getLogger(__name__).info("initial sync expressed")
        logging.getLogger(__name__).info("%s", interest.getName().toUri())
Exemplo n.º 9
0
 def setupComplete(self):
     # The cache will clear old values every 100s
     self._dataCache = MemoryContentCache(self.face, 100000)
     self._dataPrefix = Name(self.prefix).append('data')
     self.registerCachePrefix()
     print "Serving data at {}".format(self._dataPrefix.toUri())
     self.loop.call_soon(self.publishData)
Exemplo n.º 10
0
    def start(self):
        self.loop = asyncio.new_event_loop()
        self.face = ThreadsafeFace(self.loop, "")
        self.dataCache = MemoryContentCache(self.face, 100)

        asyncio.set_event_loop(self.loop)
        self.face.setCommandSigningInfo(self.keychain, self.certificateName)

        self.dataCache.registerPrefix(self.dataPrefix, self.onRegisterFailed)
        self.loop.run_until_complete(self.insertNewVersion())
Exemplo n.º 11
0
def main(): 
    # Params parsing
    parser = argparse.ArgumentParser(description='bms gateway node to Parse or follow Cascade Datahub log and publish to MiniNdn.')
    parser.add_argument('filename', help='datahub log file')
    parser.add_argument('-f', dest='follow', action='store_true', help='follow (tail -f) the log file')  
    parser.add_argument('--namespace', default='/ndn/edu/ucla/remap/bms', help='root of ndn name, no trailing slash')
    args = parser.parse_args()
    
    # Setup logging
    logger = Logger()
    logger.prepareLogging()

    # Face, KeyChain, memoryContentCache and asio event loop initialization
    loop = asyncio.get_event_loop()
    face = ThreadsafeFace(loop, "128.97.98.7")

    keyChain = KeyChain(IdentityManager(BasicIdentityStorage(), FilePrivateKeyStorage()))
    # For the gateway publisher, we create one identity for it to sign nfd command interests
    #certificateName = keyChain.createIdentityAndCertificate(Name("/ndn/bms/gateway-publisher"))
    face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName())
    print "Using certificate name " + keyChain.getDefaultCertificateName().toUri()
    cache = MemoryContentCache(face)

    dataPublisher = DataPublisher(face, keyChain, loop, cache, args.namespace)
    cache.registerPrefix(Name(args.namespace), dataPublisher.onRegisterFailed, dataPublisher.onDataNotFound)
    
    # Parse csv to decide the mapping between sensor JSON -> <NDN name, data type>
    dataPublisher.populateSensorNDNDictFromCSV('bms-sensor-data-types-sanitized.csv')

    loop.call_later(dataPublisher._restartInterval, dataPublisher.checkAlive)
    if args.follow: 
        #asyncio.async(loop.run_in_executor(executor, followfile, args.filename, args.namespace, cache))
        loop.run_until_complete(dataPublisher.followfile(args.filename))
    else:
        loop.run_until_complete(dataPublisher.readfile(args.filename))
        
    loop.run_forever()
    face.shutdown()
Exemplo n.º 12
0
class MCCPublisher:
    def __init__(self, dataPrefix, dataSuffix, keychain=None):
        self.currentInsertion = -1
        self.currentStatus = -1
        self.face = None
        self.loop = None
        self.dataName = Name(dataPrefix).append(dataSuffix)
        self.dataPrefix = Name(dataPrefix)

        if keychain is not None:
            self.keychain = keychain
        else:
            self.keychain = KeyChain()

        self.certificateName = self.keychain.getDefaultCertificateName()

        self.fakeSignature = Sha256WithRsaSignature()

        self.failureCount = 0
        self.successCount = 0

        self.dataCache = None

    def onRegisterFailed(self):
        logger.error("Could not register data publishing face!")
        self.stop()

    def stop(self):
        self.loop.close()
        self.face.shutdown()

    def generateVersionedName(self):
        fullName = Name(self.dataName)
        # currently we need to provide the version ourselves when we
        # poke the repo
        ts = int(time.time()*1000)
        fullName.appendVersion(int(ts))
        return fullName

    def generateData(self, baseName):
        '''
           This appends the segment number to the data name, since repo-ng tends to expect it
        '''
        # just make up some data and return it
        ts = (time.time())
        segmentId = 0 # compatible with repo-ng test: may change to test segmented data

        versionStr = baseName.get(-1).toEscapedString()
        dataName = Name(baseName)
        dataName.appendSegment(segmentId)

        d = Data(dataName)
        content = "(" + str(ts) +  ") Data named " + dataName.toUri()
        d.setContent(content)
        d.getMetaInfo().setFinalBlockID(segmentId)
        d.getMetaInfo().setFreshnessPeriod(-1)
        if shouldSign:
            self.keychain.sign(d, self.certificateName)
        else:
            d.setSignature(self.fakeSignature)

        stats.insertDataForVersion(versionStr, {'publish_time':time.time()})
        logger.debug('Publishing: '+d.getName().toUri())
        return d

    def onTimeout(self, prefix):
        logger.warn('Timeout waiting for '+prefix.toUri())

    @asyncio.coroutine
    def insertNewVersion(self, interval=20):
        #while True:
            newVersion = self.generateVersionedName()
            versionStr = newVersion.get(-1).toEscapedString()
            logger.info('Inserting: '+versionStr)
            stats.insertDataForVersion(versionStr, {'insert_request':time.time()})

            newData = self.generateData(newVersion)

            self.dataCache.add(newData)
            stats.insertDataForVersion(versionStr, {'insert_complete':time.time()})
            yield From (self.insertNewVersion())

    def start(self):
        self.loop = asyncio.new_event_loop()
        self.face = ThreadsafeFace(self.loop, "")
        self.dataCache = MemoryContentCache(self.face, 100)

        asyncio.set_event_loop(self.loop)
        self.face.setCommandSigningInfo(self.keychain, self.certificateName)

        self.dataCache.registerPrefix(self.dataPrefix, self.onRegisterFailed)
        self.loop.run_until_complete(self.insertNewVersion())
Exemplo n.º 13
0
 def start(self):
     self._dataCache = MemoryContentCache(self._face, 100000)
     self.registerCachePrefix()
     print "Serving data at {}".format(self._dataPrefix.toUri())
     return
Exemplo n.º 14
0
class ChronoSync2013(object):
    """
    Create a new ChronoSync2013 to communicate using the given face. Initialize
    the digest log with a digest of "00" and and empty content. Register the
    applicationBroadcastPrefix to receive interests for sync state messages and
    express an interest for the initial root digest "00".
    Note: Your application must call processEvents. Since processEvents
    modifies the internal ChronoSync data structures, your application should
    make sure that it calls processEvents in the same thread as this
    constructor (which also modifies the data structures).

    :param onReceivedSyncState: When ChronoSync receives a sync state message,
      this calls onReceivedSyncState(syncStates, isRecovery) where syncStates is
      the list of SyncState messages and isRecovery is true if this is the initial
      list of SyncState messages or from a recovery interest. (For example, if
      isRecovery is true, a chat application would not want to re-display all
      the associated chat messages.) The callback should send interests to fetch
      the application data for the sequence numbers in the sync state.
      NOTE: The library will log any exceptions raised by this callback, but
      for better error handling the callback should catch and properly
      handle any exceptions.
    :type onReceivedSyncState: function object
    :param onInitialized: This calls onInitialized() when the first sync data is
      received (or the interest times out because there are no other publishers
      yet).
      NOTE: The library will log any exceptions raised by this callback, but
      for better error handling the callback should catch and properly
      handle any exceptions.
    :type onInitialized: function object
    :param Name applicationDataPrefix: The prefix used by this application instance
      for application data. For example, "/my/local/prefix/ndnchat4/0K4wChff2v".
      This is used when sending a sync message for a new sequence number.
      In the sync message, this uses applicationDataPrefix.toUri().
    :param Name applicationBroadcastPrefix: The broadcast name prefix including the
      application name. For example, "/ndn/broadcast/ChronoChat-0.3/ndnchat1".
      This makes a copy of the name.
    :param int sessionNo: The session number used with the applicationDataPrefix
      in sync state messages.
    :param Face face: The Face for calling registerPrefix and expressInterest. The
       Face object must remain valid for the life of this ChronoSync2013 object.
    :param KeyChain keyChain: To sign a data packet containing a sync state
      message, this calls keyChain.sign(data, certificateName).
    :param Name certificateName: The certificate name of the key to use for
      signing a data packet containing a sync state message.
    :param float syncLifetime: The interest lifetime in milliseconds for sending
      sync interests.
    :param onRegisterFailed: If failed to register the prefix to receive
      interests for the applicationBroadcastPrefix, this calls
      onRegisterFailed(applicationBroadcastPrefix).
      NOTE: The library will log any exceptions raised by this callback, but
      for better error handling the callback should catch and properly
      handle any exceptions.
    :type onRegisterFailed: function object
    """
    def __init__(self, onReceivedSyncState, onInitialized,
      applicationDataPrefix, applicationBroadcastPrefix, sessionNo, face,
      keyChain, certificateName, syncLifetime, onRegisterFailed):
        self._onReceivedSyncState = onReceivedSyncState
        self._onInitialized = onInitialized
        self._applicationDataPrefixUri = applicationDataPrefix.toUri()
        self._applicationBroadcastPrefix = Name(applicationBroadcastPrefix)
        self._sessionNo = sessionNo
        self._face = face
        self._keyChain = keyChain
        self._certificateName = Name(certificateName)
        self._syncLifetime = syncLifetime
        self._contentCache = MemoryContentCache(face)

        self._digestLog = [] # of _DigestLogEntry
        self._digestTree = DigestTree()
        self._sequenceNo = -1
        self._enabled = True

        emptyContent = SyncStateMsg()
        # Use getattr to avoid pylint errors.
        self._digestLog.append(self._DigestLogEntry("00", getattr(emptyContent, "ss")))

        # Register the prefix with the contentCache_ and use our own onInterest
        #   as the onDataNotFound fallback.
        self._contentCache.registerPrefix(
          self._applicationBroadcastPrefix, onRegisterFailed, self._onInterest)

        interest = Interest(self._applicationBroadcastPrefix)
        interest.getName().append("00")
        interest.setInterestLifetimeMilliseconds(1000)
        interest.setMustBeFresh(True)
        face.expressInterest(interest, self._onData, self._initialTimeOut)
        logging.getLogger(__name__).info("initial sync expressed")
        logging.getLogger(__name__).info("%s", interest.getName().toUri())

    class SyncState(object):
        """
        A SyncState holds the values of a sync state message which is passed to
        the onReceivedSyncState callback which was given to the ChronoSyn2013
        constructor. Note: this has the same info as the Protobuf class
        sync_state_pb2.SyncState, but we make a separate class so
        that we don't need the Protobuf definition in the ChronoSync API.
        """
        def __init__(self, dataPrefixUri, sessionNo, sequenceNo, applicationInfo):
            self._dataPrefixUri = dataPrefixUri
            self._sessionNo = sessionNo
            self._sequenceNo = sequenceNo
            self._applicationInfo = applicationInfo

        def getDataPrefix(self):
            """
            Get the application data prefix for this sync state message.

            :return: The application data prefix as a Name URI string.
            :rtype: str
            """
            return self._dataPrefixUri

        def getSessionNo(self):
            """
            Get the session number associated with the application data prefix
            for this sync state message.

            :return: The session number.
            :rtype: int
            """
            return self._sessionNo

        def getSequenceNo(self):
            """
            Get the sequence number for this sync state message.

            :return: The sequence number.
            :rtype: int
            """
            return self._sequenceNo

        def getApplicationInfo(self):
            """
            Get the application info which was included when the sender
            published the next sequence number.

            :return: The applicationInfo Blob. If the sender did not provide
              any, return an isNull Blob.
            :rtype: Blob
            """
            return self._applicationInfo

    class PrefixAndSessionNo(object):
        """
        A PrefixAndSessionNo holds a user's data prefix and session number (used
        to return a list from getProducerPrefixes).
        """
        def __init__(self, dataPrefixUri, sessionNo):
            self._dataPrefixUri = dataPrefixUri
            self._sessionNo = sessionNo

        def getDataPrefix(self):
            """
            Get the application data prefix.

            :return: The application data prefix as a Name URI string.
            :rtype: str
            """
            return self._dataPrefixUri

        def getSessionNo(self):
            """
            Get the session number associated with the application data prefix.

            :return: The session number.
            :rtype: int
            """
            return self._sessionNo

    def getProducerPrefixes(self):
        """
        Get a copy of the current list of producer data prefixes, and the
        associated session number. You can use these in getProducerSequenceNo().
        This includes the prefix for this user.

        :return: A copy of the list of each producer prefix and session number.
        :rtype: array of ChronoSync2013.PrefixAndSessionNo
        """
        prefixes = []

        for i in range(self._digestTree.size()):
            node = self._digestTree.get(i)
            prefixes.append(ChronoSync2013.PrefixAndSessionNo
              (node.getDataPrefix(), node.getSessionNo()))

        return prefixes

    def getProducerSequenceNo(self, dataPrefix, sessionNo):
        """
        Get the current sequence number in the digest tree for the given
        producer dataPrefix and sessionNo.

        :param std dataPrefix: The producer data prefix as a Name URI string.
        :param int sessionNo: The producer session number.
        :return: The current producer sequence number, or -1 if the producer
          namePrefix and sessionNo are not in the digest tree.
        :rtype: int
        """
        index = self._digestTree.find(dataPrefix, sessionNo)
        if index < 0:
          return -1
        else:
          return self._digestTree.get(index).getSequenceNo()

    def publishNextSequenceNo(self, applicationInfo = None):
        """
        Increment the sequence number, create a sync message with the new
        sequence number and publish a data packet where the name is
        the applicationBroadcastPrefix + the root digest of the current digest
        tree. Then add the sync message to the digest tree and digest log which
        creates a new root digest. Finally, express an interest for the next sync
        update with the name applicationBroadcastPrefix + the new root digest.
        After this, your application should publish the content for the new
        sequence number. You can get the new sequence number with getSequenceNo().
        Note: Your application must call processEvents. Since processEvents
        modifies the internal ChronoSync data structures, your application should
        make sure that it calls processEvents in the same thread as
        publishNextSequenceNo() (which also modifies the data structures).

        :param Blob applicationInfo: (optional) This appends applicationInfo to
          the content of the sync messages. This same info is provided to the
          receiving application in the SyncState state object provided to the
          onReceivedSyncState callback.
        """
        applicationInfo = (applicationInfo if isinstance(applicationInfo, Blob)
          else Blob(applicationInfo))

        self._sequenceNo += 1

        syncMessage = SyncStateMsg()
        content = getattr(syncMessage, "ss").add()
        content.name = self._applicationDataPrefixUri
        content.type = SyncState_UPDATE
        content.seqno.seq = self._sequenceNo
        content.seqno.session = self._sessionNo
        if not applicationInfo.isNull() and applicationInfo.size() > 0:
            content.application_info = applicationInfo.toBytes()

        self._broadcastSyncState(self._digestTree.getRoot(), syncMessage)

        if not self._update(getattr(syncMessage, "ss")):
          # Since we incremented the sequence number, we expect there to be a
          #   new digest log entry.
          raise RuntimeError(
            "ChronoSync: update did not create a new digest log entry")

        # TODO: Should we have an option to not express an interest if this is the
        #   final publish of the session?
        interest = Interest(self._applicationBroadcastPrefix)
        interest.getName().append(self._digestTree.getRoot())
        interest.setInterestLifetimeMilliseconds(self._syncLifetime)
        self._face.expressInterest(interest, self._onData, self._syncTimeout)

    def getSequenceNo(self):
        """
        Get the sequence number of the latest data published by this application
        instance.

        :return: The sequence number.
        :rtype: int
        """
        return self._sequenceNo

    def shutdown(self):
        """
        Unregister callbacks so that this does not respond to interests anymore.
        If you will discard this ChronoSync2013 object while your application is
        still running, you should call shutdown() first.  After calling this, you
        should not call publishNextSequenceNo() again since the behavior will be
        undefined.
        Note: Because this modifies internal ChronoSync data structures, your
        application should make sure that it calls processEvents in the same
        thread as shutdown() (which also modifies the data structures).
        """
        self._enabled = False
        self._contentCache.unregisterAll()

    class _DigestLogEntry(object):
        def __init__(self, digest, data):
            self._digest = digest
            # Copy.
            self._data = data[:]

        def getDigest(self):
            return self._digest

        def getData(self):
            """
            Get the data.

            :return: The data as a list.
            :rtype: array of sync_state_pb2.SyncState.
            """
            return self._data

    def _broadcastSyncState(self, digest, syncMessage):
        """
        Make a data packet with the syncMessage and with name
        applicationBroadcastPrefix_ + digest. Sign and send.

        :param str digest: The root digest as a hex string for the data packet
          name.
        :param sync_state_pb2.SyncState syncMessage:
        """
        data = Data(self._applicationBroadcastPrefix)
        data.getName().append(digest)
        # TODO: Check if this works in Python 3.
        data.setContent(Blob(syncMessage.SerializeToString()))
        self._keyChain.sign(data, self._certificateName)
        self._contentCache.add(data)

    def _update(self, content):
        """
        Update the digest tree with the messages in content. If the digest tree
        root is not in the digest log, also add a log entry with the content.

        :param content: The list of SyncState.
        :type content: array of sync_state_pb2.SyncState
        :return: True if added a digest log entry (because the updated digest
          tree root was not in the log), False if didn't add a log entry.
        :rtype: bool
        """
        for i in range(len(content)):
            syncState = content[i]

            if syncState.type == SyncState_UPDATE:
                if self._digestTree.update(
                  syncState.name, syncState.seqno.session,
                  syncState.seqno.seq):
                    # The digest tree was updated.
                    if self._applicationDataPrefixUri == syncState.name:
                        self._sequenceNo = syncState.seqno.seq

        if self._logFind(self._digestTree.getRoot()) == -1:
            self._digestLog.append(
              self._DigestLogEntry(self._digestTree.getRoot(), content))
            return True
        else:
            return False

    def _logFind(self, digest):
        """
        Search the digest log by digest.
        """
        for i in range(len(self._digestLog)):
            if digest == self._digestLog[i].getDigest():
                return i

        return -1

    def _onInterest(self, prefix, interest, face, interestFilterId, filter):
        """
        Process the sync interest from the applicationBroadcastPrefix. If we
        can't satisfy the interest, add it to the pending interest table in
        the _contentCache so that a future call to contentCacheAdd may satisfy it.
        """
        if not self._enabled:
            # Ignore callbacks after the application calls shutdown().
            return

        # Search if the digest already exists in the digest log.
        logging.getLogger(__name__).info("Sync Interest received in callback.")
        logging.getLogger(__name__).info("%s", interest.getName().toUri())

        syncDigest = interest.getName().get(
          self._applicationBroadcastPrefix.size()).toEscapedString()
        if interest.getName().size() == self._applicationBroadcastPrefix.size() + 2:
            # Assume this is a recovery interest.
            syncDigest = interest.getName().get(
              self._applicationBroadcastPrefix.size() + 1).toEscapedString()
        logging.getLogger(__name__).info("syncDigest: %s", syncDigest)
        if (interest.getName().size() == self._applicationBroadcastPrefix.size() + 2 or
             syncDigest == "00"):
            # Recovery interest or newcomer interest.
            self._processRecoveryInterest(interest, syncDigest, face)
        else:
            self._contentCache.storePendingInterest(interest, face)

            if syncDigest != self._digestTree.getRoot():
                index = self._logFind(syncDigest)
                if index == -1:
                    # To see whether there is any data packet coming back, wait
                    #   2 seconds using the Interest timeout mechanism.
                    # TODO: Are we sure using a "/local/timeout" interest is the
                    #   best future call approach?
                    timeout = Interest(Name("/local/timeout"))
                    timeout.setInterestLifetimeMilliseconds(2000)
                    self._face.expressInterest(
                      timeout, self._dummyOnData,
                      self._makeJudgeRecovery(syncDigest, face))
                    logging.getLogger(__name__).info("set timer recover")
                else:
                    # common interest processing
                    self._processSyncInterest(index, syncDigest, face)

    def _onData(self, interest, data):
        """
        Process Sync Data.
        """
        if not self._enabled:
            # Ignore callbacks after the application calls shutdown().
            return

        logging.getLogger(__name__).info(
          "Sync ContentObject received in callback")
        logging.getLogger(__name__).info(
            "name: %s", data.getName().toUri())
        # TODO: Check if this works in Python 3.
        tempContent = SyncStateMsg()
#pylint: disable=E1103
        tempContent.ParseFromString(data.getContent().toBytes())
#pylint: enable=E1103
        content = getattr(tempContent, "ss")
        if self._digestTree.getRoot() == "00":
            isRecovery = True
            #processing initial sync data
            self._initialOndata(content)
        else:
            self._update(content)
            if (interest.getName().size() ==
                self._applicationBroadcastPrefix.size() + 2):
                # Assume this is a recovery interest.
                isRecovery = True
            else:
                isRecovery = False

        # Send the interests to fetch the application data.
        syncStates = []
        for i in range(len(content)):
            syncState = content[i]

            # Only report UPDATE sync states.
            if syncState.type == SyncState_UPDATE:
                if len(syncState.application_info) > 0:
                    applicationInfo = Blob(syncState.application_info, True)
                else:
                    applicationInfo = Blob()

                syncStates.append(self.SyncState(
                  syncState.name, syncState.seqno.session,
                  syncState.seqno.seq, applicationInfo))

        try:
            self._onReceivedSyncState(syncStates, isRecovery)
        except:
            logging.exception("Error in onReceivedSyncState")

        name = Name(self._applicationBroadcastPrefix)
        name.append(self._digestTree.getRoot())
        syncInterest = Interest(name)
        syncInterest.setInterestLifetimeMilliseconds(self._syncLifetime)
        self._face.expressInterest(syncInterest, self._onData, self._syncTimeout)
        logging.getLogger(__name__).info("Syncinterest expressed:")
        logging.getLogger(__name__).info("%s", name.toUri())

    def _initialTimeOut(self, interest):
        """
        Initial sync interest timeout, which means there are no other publishers
        yet.
        """
        if not self._enabled:
            # Ignore callbacks after the application calls shutdown().
            return

        logging.getLogger(__name__).info("initial sync timeout")
        logging.getLogger(__name__).info("no other people")
        self._sequenceNo += 1
        if self._sequenceNo != 0:
            # Since there were no other users, we expect sequence no 0.
            raise RuntimeError(
              "ChronoSync: sequenceNo_ is not the expected value of 0 for first use.")

        tempContent = SyncStateMsg()
        content = getattr(tempContent, "ss").add()
        content.name = self._applicationDataPrefixUri
        content.type = SyncState_UPDATE
        content.seqno.seq = self._sequenceNo
        content.seqno.session = self._sessionNo
        self._update(getattr(tempContent, "ss"))

        try:
            self._onInitialized()
        except:
            logging.exception("Error in onInitialized")

        name = Name(self._applicationBroadcastPrefix)
        name.append(self._digestTree.getRoot())
        retryInterest = Interest(name)
        retryInterest.setInterestLifetimeMilliseconds(self._syncLifetime)
        self._face.expressInterest(retryInterest, self._onData, self._syncTimeout)
        logging.getLogger(__name__).info("Syncinterest expressed:")
        logging.getLogger(__name__).info("%s", name.toUri())

    def _processRecoveryInterest(self, interest, syncDigest, face):
        logging.getLogger(__name__).info("processRecoveryInterest")
        if self._logFind(syncDigest) != -1:
            tempContent = SyncStateMsg()
            for i in range(self._digestTree.size()):
                content = getattr(tempContent, "ss").add()
                content.name = self._digestTree.get(i).getDataPrefix()
                content.type = SyncState_UPDATE
                content.seqno.seq = self._digestTree.get(i).getSequenceNo()
                content.seqno.session = self._digestTree.get(i).getSessionNo()

            if len(getattr(tempContent, "ss")) != 0:
                # TODO: Check if this works in Python 3.
#pylint: disable=E1103
                array = tempContent.SerializeToString()
#pylint: enable=E1103
                data = Data(interest.getName())
                data.setContent(Blob(array))
                if interest.getName().get(-1).toEscapedString() == "00":
                    # Limit the lifetime of replies to interest for "00" since
                    # they can be different.
                    data.getMetaInfo().setFreshnessPeriod(1000)

                self._keyChain.sign(data, self._certificateName)
                try:
                    face.putData(data)
                except Exception as ex:
                    logging.getLogger(__name__).error(
                      "Error in face.putData: %s", str(ex))
                    return

                logging.getLogger(__name__).info("send recovery data back")
                logging.getLogger(__name__).info("%s", interest.getName().toUri())

    def _processSyncInterest(self, index, syncDigest, face):
        """
        Common interest processing, using digest log to find the difference
        after syncDigest.

        :return: True if sent a data packet to satisfy the interest, otherwise
          False.
        :rtype: bool
        """
        nameList = []       # of str
        sequenceNoList = [] # of int
        sessionNoList = []  # of int
        for j in range(index + 1, len(self._digestLog)):
            temp = self._digestLog[j].getData() # array of sync_state_pb2.SyncState.
            for i in range(len(temp)):
                syncState = temp[i]
                if syncState.type != SyncState_UPDATE:
                    continue

                if self._digestTree.find(
                      syncState.name, syncState.seqno.session) != -1:
                    n = -1
                    for k in range(len(nameList)):
                        if nameList[k] == syncState.name:
                            n = k
                            break

                    if n == -1:
                        nameList.append(syncState.name)
                        sequenceNoList.append(syncState.seqno.seq)
                        sessionNoList.append(syncState.seqno.session)
                    else:
                        sequenceNoList[n] = syncState.seqno.seq
                        sessionNoList[n] = syncState.seqno.session

        tempContent = SyncStateMsg()
        for i in range(len(nameList)):
            content = getattr(tempContent, "ss").add()
            content.name = nameList[i]
            content.type = SyncState_UPDATE
            content.seqno.seq = sequenceNoList[i]
            content.seqno.session = sessionNoList[i]

        sent = False
        if len(getattr(tempContent, "ss")) != 0:
            name = Name(self._applicationBroadcastPrefix)
            name.append(syncDigest)
            # TODO: Check if this works in Python 3.
#pylint: disable=E1103
            array = tempContent.SerializeToString()
#pylint: enable=E1103
            data = Data(name)
            data.setContent(Blob(array))
            self._keyChain.sign(data, self._certificateName)

            try:
                face.putData(data)
            except Exception as ex:
                logging.getLogger(__name__).error(
                  "Error in face.putData: %s", str(ex))
                return

            sent = True
            logging.getLogger(__name__).info("Sync Data send")
            logging.getLogger(__name__).info("%s", name.toUri())

        return sent

    def _sendRecovery(self, syncDigest):
        """
        Send Recovery Interest.
        """
        logging.getLogger(__name__).info("unknown digest: ")
        name = Name(self._applicationBroadcastPrefix)
        name.append("recovery").append(syncDigest)
        interest = Interest(name)
        interest.setInterestLifetimeMilliseconds(self._syncLifetime)
        self._face.expressInterest(interest, self._onData, self._syncTimeout)
        logging.getLogger(__name__).info("Recovery Syncinterest expressed:")
        logging.getLogger(__name__).info("%s", name.toUri())

    def _makeJudgeRecovery(self, syncDigest, face):
        """
        Return a function for onTimeout which calls _judgeRecovery.
        """
        def f(interest):
            self._judgeRecovery(interest, syncDigest, face)
        return f

    def _judgeRecovery(self, interest, syncDigest, face):
        """
        This is called by _onInterest after a timeout to check if a recovery is
        needed.
        """
        if not self._enabled:
            # Ignore callbacks after the application calls shutdown().
            return

        index2 = self._logFind(syncDigest)
        if index2 != -1:
            if syncDigest != self._digestTree.getRoot():
                self._processSyncInterest(index2, syncDigest, face)
        else:
            self._sendRecovery(syncDigest)

    def _syncTimeout(self, interest):
        """
        Sync interest time out.  If the interest is the static one send again.
        """
        if not self._enabled:
            # Ignore callbacks after the application calls shutdown().
            return

        logging.getLogger(__name__).info("Sync Interest time out.")
        logging.getLogger(__name__).info(
          "Sync Interest name: %s", interest.getName().toUri())
        component = interest.getName().get(
          self._applicationBroadcastPrefix.size()).toEscapedString()
        if component == self._digestTree.getRoot():
            name = Name(interest.getName())
            retryInterest = Interest(interest.getName())
            retryInterest.setInterestLifetimeMilliseconds(self._syncLifetime)
            self._face.expressInterest(
              retryInterest, self._onData, self._syncTimeout)

            logging.getLogger(__name__).info("Syncinterest expressed:")
            logging.getLogger(__name__).info("%s", name.toUri())

    def _initialOndata(self, content):
        """
        Process initial data which usually includes all other publisher's info,
        and send back the new comer's own info.
        """
        # The user is a new comer and receive data of all other people in the group.
        self._update(content)
        digest = self._digestTree.getRoot()
        for i in range(len(content)):
            syncState = content[i]
            if (syncState.name == self._applicationDataPrefixUri and
                  syncState.seqno.session == self._sessionNo):
                # If the user was an old comer, after add the static log he
                #   needs to increase his sequence number by 1.
                tempContent = SyncStateMsg()
                # Use getattr to avoid pylint errors.
                content2 = getattr(tempContent, "ss").add()
                content2.name = self._applicationDataPrefixUri
                content2.type = SyncState_UPDATE
                content2.seqno.seq = syncState.seqno.seq + 1
                content2.seqno.session = self._sessionNo

                if self._update(getattr(tempContent, "ss")):
                    try:
                        self._onInitialized()
                    except:
                        logging.exception("Error in onInitialized")

        tempContent2 = SyncStateMsg()
        if self._sequenceNo >= 0:
            # Send the data packet with the new sequence number back.
            content2 = getattr(tempContent2, "ss").add()
            content2.name = self._applicationDataPrefixUri
            content2.type = SyncState_UPDATE
            content2.seqno.seq = self._sequenceNo
            content2.seqno.session = self._sessionNo
        else:
            content2 = getattr(tempContent2, "ss").add()
            content2.name = self._applicationDataPrefixUri
            content2.type = SyncState_UPDATE
            content2.seqno.seq = 0
            content2.seqno.session = self._sessionNo

        self._broadcastSyncState(digest, tempContent2)

        if (self._digestTree.find(self._applicationDataPrefixUri, self._sessionNo)
             == -1):
            # The user hasn't put himself in the digest tree.
            logging.getLogger(__name__).info("initial state")
            self._sequenceNo += 1
            tempContent = SyncStateMsg()
            content2 = getattr(tempContent, "ss").add()
            content2.name = self._applicationDataPrefixUri
            content2.type = SyncState_UPDATE
            content2.seqno.seq = self._sequenceNo
            content2.seqno.session = self._sessionNo

            if self._update(getattr(tempContent, "ss")):
                try:
                    self._onInitialized()
                except:
                    logging.exception("Error in onInitialized")

    @staticmethod
    def _dummyOnData(interest, data):
        """
        This is a do-nothing onData for using expressInterest for timeouts.
        This should never be called.
        """
        pass
Exemplo n.º 15
0
class ChronoSync2013(object):
    """
    Create a new ChronoSync2013 to communicate using the given face. Initialize
    the digest log with a digest of "00" and and empty content. Register the
    applicationBroadcastPrefix to receive interests for sync state messages and
    express an interest for the initial root digest "00".
    Note: Your application must call processEvents. Since processEvents
    modifies the internal ChronoSync data structures, your application should
    make sure that it calls processEvents in the same thread as this
    constructor (which also modifies the data structures).

    :param onReceivedSyncState: When ChronoSync receives a sync state message,
      this calls onReceivedSyncState(syncStates, isRecovery) where syncStates is
      the list of SyncState messages and isRecovery is true if this is the initial
      list of SyncState messages or from a recovery interest. (For example, if
      isRecovery is true, a chat application would not want to re-display all
      the associated chat messages.) The callback should send interests to fetch
      the application data for the sequence numbers in the sync state.
      NOTE: The library will log any exceptions raised by this callback, but
      for better error handling the callback should catch and properly
      handle any exceptions.
    :type onReceivedSyncState: function object
    :param onInitialized: This calls onInitialized() when the first sync data is
      received (or the interest times out because there are no other publishers
      yet).
      NOTE: The library will log any exceptions raised by this callback, but
      for better error handling the callback should catch and properly
      handle any exceptions.
    :type onInitialized: function object
    :param Name applicationDataPrefix: The prefix used by this application instance
      for application data. For example, "/my/local/prefix/ndnchat4/0K4wChff2v".
      This is used when sending a sync message for a new sequence number.
      In the sync message, this uses applicationDataPrefix.toUri().
    :param Name applicationBroadcastPrefix: The broadcast name prefix including the
      application name. For example, "/ndn/broadcast/ChronoChat-0.3/ndnchat1".
      This makes a copy of the name.
    :param int sessionNo: The session number used with the applicationDataPrefix
      in sync state messages.
    :param Face face: The Face for calling registerPrefix and expressInterest. The
       Face object must remain valid for the life of this ChronoSync2013 object.
    :param KeyChain keyChain: To sign a data packet containing a sync state
      message, this calls keyChain.sign(data, certificateName).
    :param Name certificateName: The certificate name of the key to use for
      signing a data packet containing a sync state message.
    :param float syncLifetime: The interest lifetime in milliseconds for sending
      sync interests.
    :param onRegisterFailed: If failed to register the prefix to receive
      interests for the applicationBroadcastPrefix, this calls
      onRegisterFailed(applicationBroadcastPrefix).
      NOTE: The library will log any exceptions raised by this callback, but
      for better error handling the callback should catch and properly
      handle any exceptions.
    :type onRegisterFailed: function object
    """
    def __init__(self, onReceivedSyncState, onInitialized,
                 applicationDataPrefix, applicationBroadcastPrefix, sessionNo,
                 face, keyChain, certificateName, syncLifetime,
                 onRegisterFailed):
        self._onReceivedSyncState = onReceivedSyncState
        self._onInitialized = onInitialized
        self._applicationDataPrefixUri = applicationDataPrefix.toUri()
        self._applicationBroadcastPrefix = Name(applicationBroadcastPrefix)
        self._sessionNo = sessionNo
        self._face = face
        self._keyChain = keyChain
        self._certificateName = Name(certificateName)
        self._syncLifetime = syncLifetime
        self._contentCache = MemoryContentCache(face)

        self._digestLog = []  # of _DigestLogEntry
        self._digestTree = DigestTree()
        self._sequenceNo = -1
        self._enabled = True

        emptyContent = SyncStateMsg()
        # Use getattr to avoid pylint errors.
        self._digestLog.append(
            self._DigestLogEntry("00", getattr(emptyContent, "ss")))

        # Register the prefix with the contentCache_ and use our own onInterest
        #   as the onDataNotFound fallback.
        self._contentCache.registerPrefix(self._applicationBroadcastPrefix,
                                          onRegisterFailed, self._onInterest)

        interest = Interest(self._applicationBroadcastPrefix)
        interest.getName().append("00")
        interest.setInterestLifetimeMilliseconds(1000)
        interest.setMustBeFresh(True)
        face.expressInterest(interest, self._onData, self._initialTimeOut)
        logging.getLogger(__name__).info("initial sync expressed")
        logging.getLogger(__name__).info("%s", interest.getName().toUri())

    class SyncState(object):
        """
        A SyncState holds the values of a sync state message which is passed to
        the onReceivedSyncState callback which was given to the ChronoSyn2013
        constructor. Note: this has the same info as the Protobuf class
        sync_state_pb2.SyncState, but we make a separate class so
        that we don't need the Protobuf definition in the ChronoSync API.
        """
        def __init__(self, dataPrefixUri, sessionNo, sequenceNo,
                     applicationInfo):
            self._dataPrefixUri = dataPrefixUri
            self._sessionNo = sessionNo
            self._sequenceNo = sequenceNo
            self._applicationInfo = applicationInfo

        def getDataPrefix(self):
            """
            Get the application data prefix for this sync state message.

            :return: The application data prefix as a Name URI string.
            :rtype: str
            """
            return self._dataPrefixUri

        def getSessionNo(self):
            """
            Get the session number associated with the application data prefix
            for this sync state message.

            :return: The session number.
            :rtype: int
            """
            return self._sessionNo

        def getSequenceNo(self):
            """
            Get the sequence number for this sync state message.

            :return: The sequence number.
            :rtype: int
            """
            return self._sequenceNo

        def getApplicationInfo(self):
            """
            Get the application info which was included when the sender
            published the next sequence number.

            :return: The applicationInfo Blob. If the sender did not provide
              any, return an isNull Blob.
            :rtype: Blob
            """
            return self._applicationInfo

    class PrefixAndSessionNo(object):
        """
        A PrefixAndSessionNo holds a user's data prefix and session number (used
        to return a list from getProducerPrefixes).
        """
        def __init__(self, dataPrefixUri, sessionNo):
            self._dataPrefixUri = dataPrefixUri
            self._sessionNo = sessionNo

        def getDataPrefix(self):
            """
            Get the application data prefix.

            :return: The application data prefix as a Name URI string.
            :rtype: str
            """
            return self._dataPrefixUri

        def getSessionNo(self):
            """
            Get the session number associated with the application data prefix.

            :return: The session number.
            :rtype: int
            """
            return self._sessionNo

    def getProducerPrefixes(self):
        """
        Get a copy of the current list of producer data prefixes, and the
        associated session number. You can use these in getProducerSequenceNo().
        This includes the prefix for this user.

        :return: A copy of the list of each producer prefix and session number.
        :rtype: array of ChronoSync2013.PrefixAndSessionNo
        """
        prefixes = []

        for i in range(self._digestTree.size()):
            node = self._digestTree.get(i)
            prefixes.append(
                ChronoSync2013.PrefixAndSessionNo(node.getDataPrefix(),
                                                  node.getSessionNo()))

        return prefixes

    def getProducerSequenceNo(self, dataPrefix, sessionNo):
        """
        Get the current sequence number in the digest tree for the given
        producer dataPrefix and sessionNo.

        :param std dataPrefix: The producer data prefix as a Name URI string.
        :param int sessionNo: The producer session number.
        :return: The current producer sequence number, or -1 if the producer
          namePrefix and sessionNo are not in the digest tree.
        :rtype: int
        """
        index = self._digestTree.find(dataPrefix, sessionNo)
        if index < 0:
            return -1
        else:
            return self._digestTree.get(index).getSequenceNo()

    def publishNextSequenceNo(self, applicationInfo=None):
        """
        Increment the sequence number, create a sync message with the new
        sequence number and publish a data packet where the name is
        the applicationBroadcastPrefix + the root digest of the current digest
        tree. Then add the sync message to the digest tree and digest log which
        creates a new root digest. Finally, express an interest for the next sync
        update with the name applicationBroadcastPrefix + the new root digest.
        After this, your application should publish the content for the new
        sequence number. You can get the new sequence number with getSequenceNo().
        Note: Your application must call processEvents. Since processEvents
        modifies the internal ChronoSync data structures, your application should
        make sure that it calls processEvents in the same thread as
        publishNextSequenceNo() (which also modifies the data structures).

        :param Blob applicationInfo: (optional) This appends applicationInfo to
          the content of the sync messages. This same info is provided to the
          receiving application in the SyncState state object provided to the
          onReceivedSyncState callback.
        """
        applicationInfo = (applicationInfo if isinstance(
            applicationInfo, Blob) else Blob(applicationInfo))

        self._sequenceNo += 1

        syncMessage = SyncStateMsg()
        content = getattr(syncMessage, "ss").add()
        content.name = self._applicationDataPrefixUri
        content.type = SyncState_UPDATE
        content.seqno.seq = self._sequenceNo
        content.seqno.session = self._sessionNo
        if not applicationInfo.isNull() and applicationInfo.size() > 0:
            content.application_info = applicationInfo.toBytes()

        self._broadcastSyncState(self._digestTree.getRoot(), syncMessage)

        if not self._update(getattr(syncMessage, "ss")):
            # Since we incremented the sequence number, we expect there to be a
            #   new digest log entry.
            raise RuntimeError(
                "ChronoSync: update did not create a new digest log entry")

        # TODO: Should we have an option to not express an interest if this is the
        #   final publish of the session?
        interest = Interest(self._applicationBroadcastPrefix)
        interest.getName().append(self._digestTree.getRoot())
        interest.setInterestLifetimeMilliseconds(self._syncLifetime)
        self._face.expressInterest(interest, self._onData, self._syncTimeout)

    def getSequenceNo(self):
        """
        Get the sequence number of the latest data published by this application
        instance.

        :return: The sequence number.
        :rtype: int
        """
        return self._sequenceNo

    def shutdown(self):
        """
        Unregister callbacks so that this does not respond to interests anymore.
        If you will discard this ChronoSync2013 object while your application is
        still running, you should call shutdown() first.  After calling this, you
        should not call publishNextSequenceNo() again since the behavior will be
        undefined.
        Note: Because this modifies internal ChronoSync data structures, your
        application should make sure that it calls processEvents in the same
        thread as shutdown() (which also modifies the data structures).
        """
        self._enabled = False
        self._contentCache.unregisterAll()

    class _DigestLogEntry(object):
        def __init__(self, digest, data):
            self._digest = digest
            # Copy.
            self._data = data[:]

        def getDigest(self):
            return self._digest

        def getData(self):
            """
            Get the data.

            :return: The data as a list.
            :rtype: array of sync_state_pb2.SyncState.
            """
            return self._data

    def _broadcastSyncState(self, digest, syncMessage):
        """
        Make a data packet with the syncMessage and with name
        applicationBroadcastPrefix_ + digest. Sign and send.

        :param str digest: The root digest as a hex string for the data packet
          name.
        :param sync_state_pb2.SyncState syncMessage:
        """
        data = Data(self._applicationBroadcastPrefix)
        data.getName().append(digest)
        # TODO: Check if this works in Python 3.
        data.setContent(Blob(syncMessage.SerializeToString()))
        self._keyChain.sign(data, self._certificateName)
        self._contentCache.add(data)

    def _update(self, content):
        """
        Update the digest tree with the messages in content. If the digest tree
        root is not in the digest log, also add a log entry with the content.

        :param content: The list of SyncState.
        :type content: array of sync_state_pb2.SyncState
        :return: True if added a digest log entry (because the updated digest
          tree root was not in the log), False if didn't add a log entry.
        :rtype: bool
        """
        for i in range(len(content)):
            syncState = content[i]

            if syncState.type == SyncState_UPDATE:
                if self._digestTree.update(syncState.name,
                                           syncState.seqno.session,
                                           syncState.seqno.seq):
                    # The digest tree was updated.
                    if self._applicationDataPrefixUri == syncState.name:
                        self._sequenceNo = syncState.seqno.seq

        if self._logFind(self._digestTree.getRoot()) == -1:
            self._digestLog.append(
                self._DigestLogEntry(self._digestTree.getRoot(), content))
            return True
        else:
            return False

    def _logFind(self, digest):
        """
        Search the digest log by digest.
        """
        for i in range(len(self._digestLog)):
            if digest == self._digestLog[i].getDigest():
                return i

        return -1

    def _onInterest(self, prefix, interest, face, interestFilterId, filter):
        """
        Process the sync interest from the applicationBroadcastPrefix. If we
        can't satisfy the interest, add it to the pending interest table in
        the _contentCache so that a future call to contentCacheAdd may satisfy it.
        """
        if not self._enabled:
            # Ignore callbacks after the application calls shutdown().
            return

        # Search if the digest already exists in the digest log.
        logging.getLogger(__name__).info("Sync Interest received in callback.")
        logging.getLogger(__name__).info("%s", interest.getName().toUri())

        syncDigest = interest.getName().get(
            self._applicationBroadcastPrefix.size()).toEscapedString()
        if interest.getName().size(
        ) == self._applicationBroadcastPrefix.size() + 2:
            # Assume this is a recovery interest.
            syncDigest = interest.getName().get(
                self._applicationBroadcastPrefix.size() + 1).toEscapedString()
        logging.getLogger(__name__).info("syncDigest: %s", syncDigest)
        if (interest.getName().size()
                == self._applicationBroadcastPrefix.size() + 2
                or syncDigest == "00"):
            # Recovery interest or newcomer interest.
            self._processRecoveryInterest(interest, syncDigest, face)
        else:
            self._contentCache.storePendingInterest(interest, face)

            if syncDigest != self._digestTree.getRoot():
                index = self._logFind(syncDigest)
                if index == -1:
                    # To see whether there is any data packet coming back, wait
                    #   2 seconds using the Interest timeout mechanism.
                    # TODO: Are we sure using a "/local/timeout" interest is the
                    #   best future call approach?
                    timeout = Interest(Name("/local/timeout"))
                    timeout.setInterestLifetimeMilliseconds(2000)
                    self._face.expressInterest(
                        timeout, self._dummyOnData,
                        self._makeJudgeRecovery(syncDigest, face))
                    logging.getLogger(__name__).info("set timer recover")
                else:
                    # common interest processing
                    self._processSyncInterest(index, syncDigest, face)

    def _onData(self, interest, data):
        """
        Process Sync Data.
        """
        if not self._enabled:
            # Ignore callbacks after the application calls shutdown().
            return

        logging.getLogger(__name__).info(
            "Sync ContentObject received in callback")
        logging.getLogger(__name__).info("name: %s", data.getName().toUri())
        # TODO: Check if this works in Python 3.
        tempContent = SyncStateMsg()
        #pylint: disable=E1103
        tempContent.ParseFromString(data.getContent().toBytes())
        #pylint: enable=E1103
        content = getattr(tempContent, "ss")
        if self._digestTree.getRoot() == "00":
            isRecovery = True
            #processing initial sync data
            self._initialOndata(content)
        else:
            self._update(content)
            if (interest.getName().size() ==
                    self._applicationBroadcastPrefix.size() + 2):
                # Assume this is a recovery interest.
                isRecovery = True
            else:
                isRecovery = False

        # Send the interests to fetch the application data.
        syncStates = []
        for i in range(len(content)):
            syncState = content[i]

            # Only report UPDATE sync states.
            if syncState.type == SyncState_UPDATE:
                if len(syncState.application_info) > 0:
                    applicationInfo = Blob(syncState.application_info, True)
                else:
                    applicationInfo = Blob()

                syncStates.append(
                    self.SyncState(syncState.name, syncState.seqno.session,
                                   syncState.seqno.seq, applicationInfo))

        try:
            self._onReceivedSyncState(syncStates, isRecovery)
        except:
            logging.exception("Error in onReceivedSyncState")

        name = Name(self._applicationBroadcastPrefix)
        name.append(self._digestTree.getRoot())
        syncInterest = Interest(name)
        syncInterest.setInterestLifetimeMilliseconds(self._syncLifetime)
        self._face.expressInterest(syncInterest, self._onData,
                                   self._syncTimeout)
        logging.getLogger(__name__).info("Syncinterest expressed:")
        logging.getLogger(__name__).info("%s", name.toUri())

    def _initialTimeOut(self, interest):
        """
        Initial sync interest timeout, which means there are no other publishers
        yet.
        """
        if not self._enabled:
            # Ignore callbacks after the application calls shutdown().
            return

        logging.getLogger(__name__).info("initial sync timeout")
        logging.getLogger(__name__).info("no other people")
        self._sequenceNo += 1
        if self._sequenceNo != 0:
            # Since there were no other users, we expect sequence no 0.
            raise RuntimeError(
                "ChronoSync: sequenceNo_ is not the expected value of 0 for first use."
            )

        tempContent = SyncStateMsg()
        content = getattr(tempContent, "ss").add()
        content.name = self._applicationDataPrefixUri
        content.type = SyncState_UPDATE
        content.seqno.seq = self._sequenceNo
        content.seqno.session = self._sessionNo
        self._update(getattr(tempContent, "ss"))

        try:
            self._onInitialized()
        except:
            logging.exception("Error in onInitialized")

        name = Name(self._applicationBroadcastPrefix)
        name.append(self._digestTree.getRoot())
        retryInterest = Interest(name)
        retryInterest.setInterestLifetimeMilliseconds(self._syncLifetime)
        self._face.expressInterest(retryInterest, self._onData,
                                   self._syncTimeout)
        logging.getLogger(__name__).info("Syncinterest expressed:")
        logging.getLogger(__name__).info("%s", name.toUri())

    def _processRecoveryInterest(self, interest, syncDigest, face):
        logging.getLogger(__name__).info("processRecoveryInterest")
        if self._logFind(syncDigest) != -1:
            tempContent = SyncStateMsg()
            for i in range(self._digestTree.size()):
                content = getattr(tempContent, "ss").add()
                content.name = self._digestTree.get(i).getDataPrefix()
                content.type = SyncState_UPDATE
                content.seqno.seq = self._digestTree.get(i).getSequenceNo()
                content.seqno.session = self._digestTree.get(i).getSessionNo()

            if len(getattr(tempContent, "ss")) != 0:
                # TODO: Check if this works in Python 3.
                #pylint: disable=E1103
                array = tempContent.SerializeToString()
                #pylint: enable=E1103
                data = Data(interest.getName())
                data.setContent(Blob(array))
                if interest.getName().get(-1).toEscapedString() == "00":
                    # Limit the lifetime of replies to interest for "00" since
                    # they can be different.
                    data.getMetaInfo().setFreshnessPeriod(1000)

                self._keyChain.sign(data, self._certificateName)
                try:
                    face.putData(data)
                except Exception as ex:
                    logging.getLogger(__name__).error(
                        "Error in face.putData: %s", str(ex))
                    return

                logging.getLogger(__name__).info("send recovery data back")
                logging.getLogger(__name__).info("%s",
                                                 interest.getName().toUri())

    def _processSyncInterest(self, index, syncDigest, face):
        """
        Common interest processing, using digest log to find the difference
        after syncDigest.

        :return: True if sent a data packet to satisfy the interest, otherwise
          False.
        :rtype: bool
        """
        nameList = []  # of str
        sequenceNoList = []  # of int
        sessionNoList = []  # of int
        for j in range(index + 1, len(self._digestLog)):
            temp = self._digestLog[j].getData(
            )  # array of sync_state_pb2.SyncState.
            for i in range(len(temp)):
                syncState = temp[i]
                if syncState.type != SyncState_UPDATE:
                    continue

                if self._digestTree.find(syncState.name,
                                         syncState.seqno.session) != -1:
                    n = -1
                    for k in range(len(nameList)):
                        if nameList[k] == syncState.name:
                            n = k
                            break

                    if n == -1:
                        nameList.append(syncState.name)
                        sequenceNoList.append(syncState.seqno.seq)
                        sessionNoList.append(syncState.seqno.session)
                    else:
                        sequenceNoList[n] = syncState.seqno.seq
                        sessionNoList[n] = syncState.seqno.session

        tempContent = SyncStateMsg()
        for i in range(len(nameList)):
            content = getattr(tempContent, "ss").add()
            content.name = nameList[i]
            content.type = SyncState_UPDATE
            content.seqno.seq = sequenceNoList[i]
            content.seqno.session = sessionNoList[i]

        sent = False
        if len(getattr(tempContent, "ss")) != 0:
            name = Name(self._applicationBroadcastPrefix)
            name.append(syncDigest)
            # TODO: Check if this works in Python 3.
            #pylint: disable=E1103
            array = tempContent.SerializeToString()
            #pylint: enable=E1103
            data = Data(name)
            data.setContent(Blob(array))
            self._keyChain.sign(data, self._certificateName)

            try:
                face.putData(data)
            except Exception as ex:
                logging.getLogger(__name__).error("Error in face.putData: %s",
                                                  str(ex))
                return

            sent = True
            logging.getLogger(__name__).info("Sync Data send")
            logging.getLogger(__name__).info("%s", name.toUri())

        return sent

    def _sendRecovery(self, syncDigest):
        """
        Send Recovery Interest.
        """
        logging.getLogger(__name__).info("unknown digest: ")
        name = Name(self._applicationBroadcastPrefix)
        name.append("recovery").append(syncDigest)
        interest = Interest(name)
        interest.setInterestLifetimeMilliseconds(self._syncLifetime)
        self._face.expressInterest(interest, self._onData, self._syncTimeout)
        logging.getLogger(__name__).info("Recovery Syncinterest expressed:")
        logging.getLogger(__name__).info("%s", name.toUri())

    def _makeJudgeRecovery(self, syncDigest, face):
        """
        Return a function for onTimeout which calls _judgeRecovery.
        """
        def f(interest):
            self._judgeRecovery(interest, syncDigest, face)

        return f

    def _judgeRecovery(self, interest, syncDigest, face):
        """
        This is called by _onInterest after a timeout to check if a recovery is
        needed.
        """
        if not self._enabled:
            # Ignore callbacks after the application calls shutdown().
            return

        index2 = self._logFind(syncDigest)
        if index2 != -1:
            if syncDigest != self._digestTree.getRoot():
                self._processSyncInterest(index2, syncDigest, face)
        else:
            self._sendRecovery(syncDigest)

    def _syncTimeout(self, interest):
        """
        Sync interest time out.  If the interest is the static one send again.
        """
        if not self._enabled:
            # Ignore callbacks after the application calls shutdown().
            return

        logging.getLogger(__name__).info("Sync Interest time out.")
        logging.getLogger(__name__).info("Sync Interest name: %s",
                                         interest.getName().toUri())
        component = interest.getName().get(4).toEscapedString()
        if component == self._digestTree.getRoot():
            name = Name(interest.getName())
            retryInterest = Interest(interest.getName())
            retryInterest.setInterestLifetimeMilliseconds(self._syncLifetime)
            self._face.expressInterest(retryInterest, self._onData,
                                       self._syncTimeout)

            logging.getLogger(__name__).info("Syncinterest expressed:")
            logging.getLogger(__name__).info("%s", name.toUri())

    def _initialOndata(self, content):
        """
        Process initial data which usually includes all other publisher's info,
        and send back the new comer's own info.
        """
        # The user is a new comer and receive data of all other people in the group.
        self._update(content)
        digest = self._digestTree.getRoot()
        for i in range(len(content)):
            syncState = content[i]
            if (syncState.name == self._applicationDataPrefixUri
                    and syncState.seqno.session == self._sessionNo):
                # If the user was an old comer, after add the static log he
                #   needs to increase his sequence number by 1.
                tempContent = SyncStateMsg()
                # Use getattr to avoid pylint errors.
                content2 = getattr(tempContent, "ss").add()
                content2.name = self._applicationDataPrefixUri
                content2.type = SyncState_UPDATE
                content2.seqno.seq = syncState.seqno.seq + 1
                content2.seqno.session = self._sessionNo

                if self._update(getattr(tempContent, "ss")):
                    try:
                        self._onInitialized()
                    except:
                        logging.exception("Error in onInitialized")

        tempContent2 = SyncStateMsg()
        if self._sequenceNo >= 0:
            # Send the data packet with the new sequence number back.
            content2 = getattr(tempContent2, "ss").add()
            content2.name = self._applicationDataPrefixUri
            content2.type = SyncState_UPDATE
            content2.seqno.seq = self._sequenceNo
            content2.seqno.session = self._sessionNo
        else:
            content2 = getattr(tempContent2, "ss").add()
            content2.name = self._applicationDataPrefixUri
            content2.type = SyncState_UPDATE
            content2.seqno.seq = 0
            content2.seqno.session = self._sessionNo

        self._broadcastSyncState(digest, tempContent2)

        if (self._digestTree.find(self._applicationDataPrefixUri,
                                  self._sessionNo) == -1):
            # The user hasn't put himself in the digest tree.
            logging.getLogger(__name__).info("initial state")
            self._sequenceNo += 1
            tempContent = SyncStateMsg()
            content2 = getattr(tempContent, "ss").add()
            content2.name = self._applicationDataPrefixUri
            content2.type = SyncState_UPDATE
            content2.seqno.seq = self._sequenceNo
            content2.seqno.session = self._sessionNo

            if self._update(getattr(tempContent, "ss")):
                try:
                    self._onInitialized()
                except:
                    logging.exception("Error in onInitialized")

    @staticmethod
    def _dummyOnData(interest, data):
        """
        This is a do-nothing onData for using expressInterest for timeouts.
        This should never be called.
        """
        pass
Exemplo n.º 16
0
       global done
       try:
           while not done:   
               face.processEvents()
               if registerFail.call_count > 0:
                   logger.error("Registration failed!")
                   done = True
               time.sleep(0.01)
       except:
           face.shutdown()
           return 0

    publisher_face = Face("localhost")
    publisher_face.setCommandSigningInfo(keychain, certName)

    dataCache = MemoryContentCache(publisher_face, 1)
    dataCache.registerPrefix(data_prefix,  registerFail, onDataMissing)

    publisher = Thread(target=publisher_loop, name="Data publisher", args=(publisher_face,))    
    publisher.start()

    try:
        # sleep a second, like the repo-ng test
        time.sleep(1)
        while not done:
            #pick a random data name
            data_part = suffix# str(randint(0,N))

            fullName = Name(data_prefix).append(Name(data_part))

            # currently we need to provide the version ourselves when we
Exemplo n.º 17
0
class CachedContentPublisher(IotNode):
    def __init__(self):
        super(CachedContentPublisher, self).__init__()
        self._missedRequests = 0
        self._dataPrefix = None
        self.addCommand(Name('listPrefixes'), self.listDataPrefixes, ['repo'],
                        False)

    def setupComplete(self):
        # The cache will clear old values every 100s
        self._dataCache = MemoryContentCache(self.face, 100000)
        self._dataPrefix = Name(self.prefix).append('data')
        self.registerCachePrefix()
        print "Serving data at {}".format(self._dataPrefix.toUri())
        self.loop.call_soon(self.publishData)

    def listDataPrefixes(self, interest):
        d = Data(interest.getName())
        if self._dataPrefix is not None:
            d.setContent(json.dumps([self._dataPrefix.toUri()]))
        d.getMetaInfo().setFreshnessPeriod(10000)
        return d

    def registerCachePrefix(self):
        self._dataCache.registerPrefix(self._dataPrefix,
                                       self.cacheRegisterFail,
                                       self.onDataMissing)

    def unknownCommandResponse(self, interest):
        # we override this so the MemoryContentCache can handle data requests
        afterPrefix = interest.getName().get(
            self.prefix.size()).toEscapedString()
        if afterPrefix == 'data':
            return None
        else:
            return super(CachedContentPublisher,
                         self).unknownCommandResponse(interest)

    def cacheRegisterFail(self, interest):
        # just try again
        self.log.warn('Could not register data cache')
        self.registerCachePrefix()

    def onDataMissing(self, prefix, interest, transport, prefixId):
        self._missedRequests += 1
        # let it timeout

    def publishData(self):
        timestamp = time.time()
        cpu_use = ps.cpu_percent()
        users = [u.name for u in ps.users()]
        nProcesses = len(ps.pids())
        memUse = ps.virtual_memory().percent
        swapUse = ps.swap_memory().percent

        info = {
            'cpu_usage': cpu_use,
            'users': users,
            'processes': nProcesses,
            'memory_usage': memUse,
            'swap_usage': swapUse
        }

        dataOut = Data(Name(self._dataPrefix).appendVersion(int(timestamp)))
        dataOut.setContent(json.dumps(info))
        dataOut.getMetaInfo().setFreshnessPeriod(10000)
        self.signData(dataOut)

        self._dataCache.add(dataOut)

        # repeat every 5 seconds
        self.loop.call_later(5, self.publishData)
Exemplo n.º 18
0
 def start(self):
     self._dataCache = MemoryContentCache(self._face, 100000)
     self.registerCachePrefix()
     print "Serving data at {}".format(self._dataPrefix.toUri())
     self._face.callLater(5000, self.publishData)
     return
Exemplo n.º 19
0
class CachedContentPublisher(IotNode):
    def __init__(self):
        super(CachedContentPublisher, self).__init__()
        self._missedRequests = 0
        self._dataPrefix = None
        self.addCommand(Name('listPrefixes'), self.listDataPrefixes, ['repo'],
            False)

    def setupComplete(self):
        # The cache will clear old values every 100s
        self._dataCache = MemoryContentCache(self.face, 100000)
        self._dataPrefix = Name(self.prefix).append('data')
        self.registerCachePrefix()
        print "Serving data at {}".format(self._dataPrefix.toUri())
        self.loop.call_soon(self.publishData)

    def listDataPrefixes(self, interest):
        d = Data(interest.getName())
        if self._dataPrefix is not None:
            d.setContent(json.dumps([self._dataPrefix.toUri()]))
        d.getMetaInfo().setFreshnessPeriod(10000)
        return d

    def registerCachePrefix(self):
        self._dataCache.registerPrefix(self._dataPrefix, self.cacheRegisterFail , self.onDataMissing)

    def unknownCommandResponse(self, interest):
        # we override this so the MemoryContentCache can handle data requests
        afterPrefix = interest.getName().get(self.prefix.size()).toEscapedString()
        if afterPrefix == 'data':
            return None
        else:
            return super(CachedContentPublisher, self).unknownCommandResponse(interest)

    def cacheRegisterFail(self, interest):
        # just try again
        self.log.warn('Could not register data cache')
        self.registerCachePrefix()

    def onDataMissing(self, prefix, interest, transport, prefixId):
        self._missedRequests += 1
        # let it timeout

    def publishData(self):
        timestamp = time.time() 
        cpu_use = ps.cpu_percent()
        users = [u.name for u in ps.users()]
        nProcesses = len(ps.pids())
        memUse = ps.virtual_memory().percent
        swapUse = ps.swap_memory().percent

        info = {'cpu_usage':cpu_use, 'users':users, 'processes':nProcesses,
                 'memory_usage':memUse, 'swap_usage':swapUse}
    
        dataOut = Data(Name(self._dataPrefix).appendVersion(int(timestamp)))
        dataOut.setContent(json.dumps(info))
        dataOut.getMetaInfo().setFreshnessPeriod(10000)
        self.signData(dataOut)

        self._dataCache.add(dataOut)

        # repeat every 5 seconds
        self.loop.call_later(5, self.publishData)